PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * StartReadBuffer() -- as above, with separate wait step
23  * StartReadBuffers() -- multiple block version
24  * WaitReadBuffers() -- second step of above
25  *
26  * ReleaseBuffer() -- unpin a buffer
27  *
28  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
29  * The disk write is delayed until buffer replacement or checkpoint.
30  *
31  * See also these files:
32  * freelist.c -- chooses victim for buffer replacement
33  * buf_table.c -- manages the buffer lookup table
34  */
35 #include "postgres.h"
36 
37 #include <sys/file.h>
38 #include <unistd.h>
39 
40 #include "access/tableam.h"
41 #include "access/xloginsert.h"
42 #include "access/xlogutils.h"
43 #include "catalog/storage.h"
44 #include "catalog/storage_xlog.h"
45 #include "executor/instrument.h"
46 #include "lib/binaryheap.h"
47 #include "miscadmin.h"
48 #include "pg_trace.h"
49 #include "pgstat.h"
50 #include "postmaster/bgwriter.h"
51 #include "storage/buf_internals.h"
52 #include "storage/bufmgr.h"
53 #include "storage/fd.h"
54 #include "storage/ipc.h"
55 #include "storage/lmgr.h"
56 #include "storage/proc.h"
57 #include "storage/smgr.h"
58 #include "storage/standby.h"
59 #include "utils/memdebug.h"
60 #include "utils/ps_status.h"
61 #include "utils/rel.h"
62 #include "utils/resowner.h"
63 #include "utils/timestamp.h"
64 
65 
66 /* Note: these two macros only work on shared buffers, not local ones! */
67 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
68 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
69 
70 /* Note: this macro only works on local buffers, not shared ones! */
71 #define LocalBufHdrGetBlock(bufHdr) \
72  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
73 
74 /* Bits in SyncOneBuffer's return value */
75 #define BUF_WRITTEN 0x01
76 #define BUF_REUSABLE 0x02
77 
78 #define RELS_BSEARCH_THRESHOLD 20
79 
80 /*
81  * This is the size (in the number of blocks) above which we scan the
82  * entire buffer pool to remove the buffers for all the pages of relation
83  * being dropped. For the relations with size below this threshold, we find
84  * the buffers by doing lookups in BufMapping table.
85  */
86 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
87 
88 typedef struct PrivateRefCountEntry
89 {
93 
94 /* 64 bytes, about the size of a cache line on common systems */
95 #define REFCOUNT_ARRAY_ENTRIES 8
96 
97 /*
98  * Status of buffers to checkpoint for a particular tablespace, used
99  * internally in BufferSync.
100  */
101 typedef struct CkptTsStatus
102 {
103  /* oid of the tablespace */
105 
106  /*
107  * Checkpoint progress for this tablespace. To make progress comparable
108  * between tablespaces the progress is, for each tablespace, measured as a
109  * number between 0 and the total number of to-be-checkpointed pages. Each
110  * page checkpointed in this tablespace increments this space's progress
111  * by progress_slice.
112  */
115 
116  /* number of to-be checkpointed pages in this tablespace */
118  /* already processed pages in this tablespace */
120 
121  /* current offset in CkptBufferIds for this tablespace */
122  int index;
124 
125 /*
126  * Type for array used to sort SMgrRelations
127  *
128  * FlushRelationsAllBuffers shares the same comparator function with
129  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
130  * compatible.
131  */
132 typedef struct SMgrSortArray
133 {
134  RelFileLocator rlocator; /* This must be the first member */
137 
138 /* GUC variables */
139 bool zero_damaged_pages = false;
142 bool track_io_timing = false;
143 
144 /*
145  * How many buffers PrefetchBuffer callers should try to stay ahead of their
146  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
147  * for buffers not belonging to tablespaces that have their
148  * effective_io_concurrency parameter set.
149  */
151 
152 /*
153  * Like effective_io_concurrency, but used by maintenance code paths that might
154  * benefit from a higher setting because they work on behalf of many sessions.
155  * Overridden by the tablespace setting of the same name.
156  */
158 
159 /*
160  * Limit on how many blocks should be handled in single I/O operations.
161  * StartReadBuffers() callers should respect it, as should other operations
162  * that call smgr APIs directly.
163  */
165 
166 /*
167  * GUC variables about triggering kernel writeback for buffers written; OS
168  * dependent defaults are set via the GUC mechanism.
169  */
173 
174 /* local state for LockBufferForCleanup */
176 
177 /*
178  * Backend-Private refcount management:
179  *
180  * Each buffer also has a private refcount that keeps track of the number of
181  * times the buffer is pinned in the current process. This is so that the
182  * shared refcount needs to be modified only once if a buffer is pinned more
183  * than once by an individual backend. It's also used to check that no buffers
184  * are still pinned at the end of transactions and when exiting.
185  *
186  *
187  * To avoid - as we used to - requiring an array with NBuffers entries to keep
188  * track of local buffers, we use a small sequentially searched array
189  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
190  * keep track of backend local pins.
191  *
192  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
193  * refcounts are kept track of in the array; after that, new array entries
194  * displace old ones into the hash table. That way a frequently used entry
195  * can't get "stuck" in the hashtable while infrequent ones clog the array.
196  *
197  * Note that in most scenarios the number of pinned buffers will not exceed
198  * REFCOUNT_ARRAY_ENTRIES.
199  *
200  *
201  * To enter a buffer into the refcount tracking mechanism first reserve a free
202  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
203  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
204  * memory allocations in NewPrivateRefCountEntry() which can be important
205  * because in some scenarios it's called with a spinlock held...
206  */
208 static HTAB *PrivateRefCountHash = NULL;
212 
213 static void ReservePrivateRefCountEntry(void);
216 static inline int32 GetPrivateRefCount(Buffer buffer);
218 
219 /* ResourceOwner callbacks to hold in-progress I/Os and buffer pins */
220 static void ResOwnerReleaseBufferIO(Datum res);
221 static char *ResOwnerPrintBufferIO(Datum res);
222 static void ResOwnerReleaseBufferPin(Datum res);
223 static char *ResOwnerPrintBufferPin(Datum res);
224 
226 {
227  .name = "buffer io",
228  .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
229  .release_priority = RELEASE_PRIO_BUFFER_IOS,
230  .ReleaseResource = ResOwnerReleaseBufferIO,
231  .DebugPrint = ResOwnerPrintBufferIO
232 };
233 
235 {
236  .name = "buffer pin",
237  .release_phase = RESOURCE_RELEASE_BEFORE_LOCKS,
238  .release_priority = RELEASE_PRIO_BUFFER_PINS,
239  .ReleaseResource = ResOwnerReleaseBufferPin,
240  .DebugPrint = ResOwnerPrintBufferPin
241 };
242 
243 /*
244  * Ensure that the PrivateRefCountArray has sufficient space to store one more
245  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
246  * a new entry - but it's perfectly fine to not use a reserved entry.
247  */
248 static void
250 {
251  /* Already reserved (or freed), nothing to do */
252  if (ReservedRefCountEntry != NULL)
253  return;
254 
255  /*
256  * First search for a free entry the array, that'll be sufficient in the
257  * majority of cases.
258  */
259  {
260  int i;
261 
262  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
263  {
265 
267 
268  if (res->buffer == InvalidBuffer)
269  {
271  return;
272  }
273  }
274  }
275 
276  /*
277  * No luck. All array entries are full. Move one array entry into the hash
278  * table.
279  */
280  {
281  /*
282  * Move entry from the current clock position in the array into the
283  * hashtable. Use that slot.
284  */
285  PrivateRefCountEntry *hashent;
286  bool found;
287 
288  /* select victim slot */
291 
292  /* Better be used, otherwise we shouldn't get here. */
294 
295  /* enter victim array entry into hashtable */
298  HASH_ENTER,
299  &found);
300  Assert(!found);
302 
303  /* clear the now free array slot */
306 
308  }
309 }
310 
311 /*
312  * Fill a previously reserved refcount entry.
313  */
314 static PrivateRefCountEntry *
316 {
318 
319  /* only allowed to be called when a reservation has been made */
320  Assert(ReservedRefCountEntry != NULL);
321 
322  /* use up the reserved entry */
324  ReservedRefCountEntry = NULL;
325 
326  /* and fill it */
327  res->buffer = buffer;
328  res->refcount = 0;
329 
330  return res;
331 }
332 
333 /*
334  * Return the PrivateRefCount entry for the passed buffer.
335  *
336  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
337  * do_move is true, and the entry resides in the hashtable the entry is
338  * optimized for frequent access by moving it to the array.
339  */
340 static PrivateRefCountEntry *
342 {
344  int i;
345 
348 
349  /*
350  * First search for references in the array, that'll be sufficient in the
351  * majority of cases.
352  */
353  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
354  {
356 
357  if (res->buffer == buffer)
358  return res;
359  }
360 
361  /*
362  * By here we know that the buffer, if already pinned, isn't residing in
363  * the array.
364  *
365  * Only look up the buffer in the hashtable if we've previously overflowed
366  * into it.
367  */
368  if (PrivateRefCountOverflowed == 0)
369  return NULL;
370 
372 
373  if (res == NULL)
374  return NULL;
375  else if (!do_move)
376  {
377  /* caller doesn't want us to move the hash entry into the array */
378  return res;
379  }
380  else
381  {
382  /* move buffer from hashtable into the free array slot */
383  bool found;
385 
386  /* Ensure there's a free array slot */
388 
389  /* Use up the reserved slot */
390  Assert(ReservedRefCountEntry != NULL);
392  ReservedRefCountEntry = NULL;
393  Assert(free->buffer == InvalidBuffer);
394 
395  /* and fill it */
396  free->buffer = buffer;
397  free->refcount = res->refcount;
398 
399  /* delete from hashtable */
401  Assert(found);
404 
405  return free;
406  }
407 }
408 
409 /*
410  * Returns how many times the passed buffer is pinned by this backend.
411  *
412  * Only works for shared memory buffers!
413  */
414 static inline int32
416 {
418 
421 
422  /*
423  * Not moving the entry - that's ok for the current users, but we might
424  * want to change this one day.
425  */
426  ref = GetPrivateRefCountEntry(buffer, false);
427 
428  if (ref == NULL)
429  return 0;
430  return ref->refcount;
431 }
432 
433 /*
434  * Release resources used to track the reference count of a buffer which we no
435  * longer have pinned and don't want to pin again immediately.
436  */
437 static void
439 {
440  Assert(ref->refcount == 0);
441 
442  if (ref >= &PrivateRefCountArray[0] &&
444  {
445  ref->buffer = InvalidBuffer;
446 
447  /*
448  * Mark the just used entry as reserved - in many scenarios that
449  * allows us to avoid ever having to search the array/hash for free
450  * entries.
451  */
452  ReservedRefCountEntry = ref;
453  }
454  else
455  {
456  bool found;
457  Buffer buffer = ref->buffer;
458 
460  Assert(found);
463  }
464 }
465 
466 /*
467  * BufferIsPinned
468  * True iff the buffer is pinned (also checks for valid buffer number).
469  *
470  * NOTE: what we check here is that *this* backend holds a pin on
471  * the buffer. We do not care whether some other backend does.
472  */
473 #define BufferIsPinned(bufnum) \
474 ( \
475  !BufferIsValid(bufnum) ? \
476  false \
477  : \
478  BufferIsLocal(bufnum) ? \
479  (LocalRefCount[-(bufnum) - 1] > 0) \
480  : \
481  (GetPrivateRefCount(bufnum) > 0) \
482 )
483 
484 
486  SMgrRelation smgr, char smgr_persistence,
487  ForkNumber forkNum, BlockNumber blockNum,
490  ForkNumber fork,
491  BufferAccessStrategy strategy,
492  uint32 flags,
493  uint32 extend_by,
494  BlockNumber extend_upto,
495  Buffer *buffers,
496  uint32 *extended_by);
498  ForkNumber fork,
499  BufferAccessStrategy strategy,
500  uint32 flags,
501  uint32 extend_by,
502  BlockNumber extend_upto,
503  Buffer *buffers,
504  uint32 *extended_by);
505 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
506 static void PinBuffer_Locked(BufferDesc *buf);
507 static void UnpinBuffer(BufferDesc *buf);
508 static void UnpinBufferNoOwner(BufferDesc *buf);
509 static void BufferSync(int flags);
511 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
512  WritebackContext *wb_context);
513 static void WaitIO(BufferDesc *buf);
514 static bool StartBufferIO(BufferDesc *buf, bool forInput, bool nowait);
515 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
516  uint32 set_flag_bits, bool forget_owner);
517 static void AbortBufferIO(Buffer buffer);
518 static void shared_buffer_write_error_callback(void *arg);
519 static void local_buffer_write_error_callback(void *arg);
520 static inline BufferDesc *BufferAlloc(SMgrRelation smgr,
521  char relpersistence,
522  ForkNumber forkNum,
523  BlockNumber blockNum,
524  BufferAccessStrategy strategy,
525  bool *foundPtr, IOContext io_context);
526 static Buffer GetVictimBuffer(BufferAccessStrategy strategy, IOContext io_context);
527 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln,
528  IOObject io_object, IOContext io_context);
529 static void FindAndDropRelationBuffers(RelFileLocator rlocator,
530  ForkNumber forkNum,
531  BlockNumber nForkBlock,
532  BlockNumber firstDelBlock);
533 static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
534  RelFileLocator dstlocator,
535  ForkNumber forkNum, bool permanent);
536 static void AtProcExit_Buffers(int code, Datum arg);
537 static void CheckForBufferLeaks(void);
538 static int rlocator_comparator(const void *p1, const void *p2);
539 static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
540 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
541 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
542 
543 
544 /*
545  * Implementation of PrefetchBuffer() for shared buffers.
546  */
549  ForkNumber forkNum,
550  BlockNumber blockNum)
551 {
552  PrefetchBufferResult result = {InvalidBuffer, false};
553  BufferTag newTag; /* identity of requested block */
554  uint32 newHash; /* hash value for newTag */
555  LWLock *newPartitionLock; /* buffer partition lock for it */
556  int buf_id;
557 
558  Assert(BlockNumberIsValid(blockNum));
559 
560  /* create a tag so we can lookup the buffer */
561  InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
562  forkNum, blockNum);
563 
564  /* determine its hash code and partition lock ID */
565  newHash = BufTableHashCode(&newTag);
566  newPartitionLock = BufMappingPartitionLock(newHash);
567 
568  /* see if the block is in the buffer pool already */
569  LWLockAcquire(newPartitionLock, LW_SHARED);
570  buf_id = BufTableLookup(&newTag, newHash);
571  LWLockRelease(newPartitionLock);
572 
573  /* If not in buffers, initiate prefetch */
574  if (buf_id < 0)
575  {
576 #ifdef USE_PREFETCH
577  /*
578  * Try to initiate an asynchronous read. This returns false in
579  * recovery if the relation file doesn't exist.
580  */
581  if ((io_direct_flags & IO_DIRECT_DATA) == 0 &&
582  smgrprefetch(smgr_reln, forkNum, blockNum, 1))
583  {
584  result.initiated_io = true;
585  }
586 #endif /* USE_PREFETCH */
587  }
588  else
589  {
590  /*
591  * Report the buffer it was in at that time. The caller may be able
592  * to avoid a buffer table lookup, but it's not pinned and it must be
593  * rechecked!
594  */
595  result.recent_buffer = buf_id + 1;
596  }
597 
598  /*
599  * If the block *is* in buffers, we do nothing. This is not really ideal:
600  * the block might be just about to be evicted, which would be stupid
601  * since we know we are going to need it soon. But the only easy answer
602  * is to bump the usage_count, which does not seem like a great solution:
603  * when the caller does ultimately touch the block, usage_count would get
604  * bumped again, resulting in too much favoritism for blocks that are
605  * involved in a prefetch sequence. A real fix would involve some
606  * additional per-buffer state, and it's not clear that there's enough of
607  * a problem to justify that.
608  */
609 
610  return result;
611 }
612 
613 /*
614  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
615  *
616  * This is named by analogy to ReadBuffer but doesn't actually allocate a
617  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
618  * block will not be delayed by the I/O. Prefetching is optional.
619  *
620  * There are three possible outcomes:
621  *
622  * 1. If the block is already cached, the result includes a valid buffer that
623  * could be used by the caller to avoid the need for a later buffer lookup, but
624  * it's not pinned, so the caller must recheck it.
625  *
626  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
627  * true. Currently there is no way to know if the data was already cached by
628  * the kernel and therefore didn't really initiate I/O, and no way to know when
629  * the I/O completes other than using synchronous ReadBuffer().
630  *
631  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and
632  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
633  * lack of a kernel facility), direct I/O is enabled, or the underlying
634  * relation file wasn't found and we are in recovery. (If the relation file
635  * wasn't found and we are not in recovery, an error is raised).
636  */
639 {
640  Assert(RelationIsValid(reln));
641  Assert(BlockNumberIsValid(blockNum));
642 
643  if (RelationUsesLocalBuffers(reln))
644  {
645  /* see comments in ReadBufferExtended */
646  if (RELATION_IS_OTHER_TEMP(reln))
647  ereport(ERROR,
648  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
649  errmsg("cannot access temporary tables of other sessions")));
650 
651  /* pass it off to localbuf.c */
652  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
653  }
654  else
655  {
656  /* pass it to the shared buffer version */
657  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
658  }
659 }
660 
661 /*
662  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
663  *
664  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
665  * successful. Return true if the buffer is valid and still has the expected
666  * tag. In that case, the buffer is pinned and the usage count is bumped.
667  */
668 bool
670  Buffer recent_buffer)
671 {
672  BufferDesc *bufHdr;
673  BufferTag tag;
674  uint32 buf_state;
675  bool have_private_ref;
676 
677  Assert(BufferIsValid(recent_buffer));
678 
681  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
682 
683  if (BufferIsLocal(recent_buffer))
684  {
685  int b = -recent_buffer - 1;
686 
687  bufHdr = GetLocalBufferDescriptor(b);
688  buf_state = pg_atomic_read_u32(&bufHdr->state);
689 
690  /* Is it still valid and holding the right tag? */
691  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
692  {
693  PinLocalBuffer(bufHdr, true);
694 
696 
697  return true;
698  }
699  }
700  else
701  {
702  bufHdr = GetBufferDescriptor(recent_buffer - 1);
703  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
704 
705  /*
706  * Do we already have this buffer pinned with a private reference? If
707  * so, it must be valid and it is safe to check the tag without
708  * locking. If not, we have to lock the header first and then check.
709  */
710  if (have_private_ref)
711  buf_state = pg_atomic_read_u32(&bufHdr->state);
712  else
713  buf_state = LockBufHdr(bufHdr);
714 
715  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
716  {
717  /*
718  * It's now safe to pin the buffer. We can't pin first and ask
719  * questions later, because it might confuse code paths like
720  * InvalidateBuffer() if we pinned a random non-matching buffer.
721  */
722  if (have_private_ref)
723  PinBuffer(bufHdr, NULL); /* bump pin count */
724  else
725  PinBuffer_Locked(bufHdr); /* pin for first time */
726 
728 
729  return true;
730  }
731 
732  /* If we locked the header above, now unlock. */
733  if (!have_private_ref)
734  UnlockBufHdr(bufHdr, buf_state);
735  }
736 
737  return false;
738 }
739 
740 /*
741  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
742  * fork with RBM_NORMAL mode and default strategy.
743  */
744 Buffer
746 {
747  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
748 }
749 
750 /*
751  * ReadBufferExtended -- returns a buffer containing the requested
752  * block of the requested relation. If the blknum
753  * requested is P_NEW, extend the relation file and
754  * allocate a new block. (Caller is responsible for
755  * ensuring that only one backend tries to extend a
756  * relation at the same time!)
757  *
758  * Returns: the buffer number for the buffer containing
759  * the block read. The returned buffer has been pinned.
760  * Does not return on error --- elog's instead.
761  *
762  * Assume when this function is called, that reln has been opened already.
763  *
764  * In RBM_NORMAL mode, the page is read from disk, and the page header is
765  * validated. An error is thrown if the page header is not valid. (But
766  * note that an all-zero page is considered "valid"; see
767  * PageIsVerifiedExtended().)
768  *
769  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
770  * valid, the page is zeroed instead of throwing an error. This is intended
771  * for non-critical data, where the caller is prepared to repair errors.
772  *
773  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
774  * filled with zeros instead of reading it from disk. Useful when the caller
775  * is going to fill the page from scratch, since this saves I/O and avoids
776  * unnecessary failure if the page-on-disk has corrupt page headers.
777  * The page is returned locked to ensure that the caller has a chance to
778  * initialize the page before it's made visible to others.
779  * Caution: do not use this mode to read a page that is beyond the relation's
780  * current physical EOF; that is likely to cause problems in md.c when
781  * the page is modified and written out. P_NEW is OK, though.
782  *
783  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
784  * a cleanup-strength lock on the page.
785  *
786  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
787  *
788  * If strategy is not NULL, a nondefault buffer access strategy is used.
789  * See buffer/README for details.
790  */
791 inline Buffer
794 {
795  Buffer buf;
796 
797  /*
798  * Reject attempts to read non-local temporary relations; we would be
799  * likely to get wrong data since we have no visibility into the owning
800  * session's local buffers.
801  */
802  if (RELATION_IS_OTHER_TEMP(reln))
803  ereport(ERROR,
804  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
805  errmsg("cannot access temporary tables of other sessions")));
806 
807  /*
808  * Read the buffer, and update pgstat counters to reflect a cache hit or
809  * miss.
810  */
811  buf = ReadBuffer_common(reln, RelationGetSmgr(reln), 0,
812  forkNum, blockNum, mode, strategy);
813 
814  return buf;
815 }
816 
817 
818 /*
819  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
820  * a relcache entry for the relation.
821  *
822  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
823  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
824  * cannot be used for temporary relations (and making that work might be
825  * difficult, unless we only want to read temporary relations for our own
826  * ProcNumber).
827  */
828 Buffer
830  BlockNumber blockNum, ReadBufferMode mode,
831  BufferAccessStrategy strategy, bool permanent)
832 {
833  SMgrRelation smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
834 
835  return ReadBuffer_common(NULL, smgr,
836  permanent ? RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED,
837  forkNum, blockNum,
838  mode, strategy);
839 }
840 
841 /*
842  * Convenience wrapper around ExtendBufferedRelBy() extending by one block.
843  */
844 Buffer
846  ForkNumber forkNum,
847  BufferAccessStrategy strategy,
848  uint32 flags)
849 {
850  Buffer buf;
851  uint32 extend_by = 1;
852 
853  ExtendBufferedRelBy(bmr, forkNum, strategy, flags, extend_by,
854  &buf, &extend_by);
855 
856  return buf;
857 }
858 
859 /*
860  * Extend relation by multiple blocks.
861  *
862  * Tries to extend the relation by extend_by blocks. Depending on the
863  * availability of resources the relation may end up being extended by a
864  * smaller number of pages (unless an error is thrown, always by at least one
865  * page). *extended_by is updated to the number of pages the relation has been
866  * extended to.
867  *
868  * buffers needs to be an array that is at least extend_by long. Upon
869  * completion, the first extend_by array elements will point to a pinned
870  * buffer.
871  *
872  * If EB_LOCK_FIRST is part of flags, the first returned buffer is
873  * locked. This is useful for callers that want a buffer that is guaranteed to
874  * be empty.
875  */
878  ForkNumber fork,
879  BufferAccessStrategy strategy,
880  uint32 flags,
881  uint32 extend_by,
882  Buffer *buffers,
883  uint32 *extended_by)
884 {
885  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
886  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
887  Assert(extend_by > 0);
888 
889  if (bmr.smgr == NULL)
890  {
891  bmr.smgr = RelationGetSmgr(bmr.rel);
892  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
893  }
894 
895  return ExtendBufferedRelCommon(bmr, fork, strategy, flags,
896  extend_by, InvalidBlockNumber,
897  buffers, extended_by);
898 }
899 
900 /*
901  * Extend the relation so it is at least extend_to blocks large, return buffer
902  * (extend_to - 1).
903  *
904  * This is useful for callers that want to write a specific page, regardless
905  * of the current size of the relation (e.g. useful for visibilitymap and for
906  * crash recovery).
907  */
908 Buffer
910  ForkNumber fork,
911  BufferAccessStrategy strategy,
912  uint32 flags,
913  BlockNumber extend_to,
915 {
917  uint32 extended_by = 0;
919  Buffer buffers[64];
920 
921  Assert((bmr.rel != NULL) != (bmr.smgr != NULL));
922  Assert(bmr.smgr == NULL || bmr.relpersistence != 0);
923  Assert(extend_to != InvalidBlockNumber && extend_to > 0);
924 
925  if (bmr.smgr == NULL)
926  {
927  bmr.smgr = RelationGetSmgr(bmr.rel);
928  bmr.relpersistence = bmr.rel->rd_rel->relpersistence;
929  }
930 
931  /*
932  * If desired, create the file if it doesn't exist. If
933  * smgr_cached_nblocks[fork] is positive then it must exist, no need for
934  * an smgrexists call.
935  */
936  if ((flags & EB_CREATE_FORK_IF_NEEDED) &&
937  (bmr.smgr->smgr_cached_nblocks[fork] == 0 ||
939  !smgrexists(bmr.smgr, fork))
940  {
942 
943  /* recheck, fork might have been created concurrently */
944  if (!smgrexists(bmr.smgr, fork))
945  smgrcreate(bmr.smgr, fork, flags & EB_PERFORMING_RECOVERY);
946 
948  }
949 
950  /*
951  * If requested, invalidate size cache, so that smgrnblocks asks the
952  * kernel.
953  */
954  if (flags & EB_CLEAR_SIZE_CACHE)
956 
957  /*
958  * Estimate how many pages we'll need to extend by. This avoids acquiring
959  * unnecessarily many victim buffers.
960  */
961  current_size = smgrnblocks(bmr.smgr, fork);
962 
963  /*
964  * Since no-one else can be looking at the page contents yet, there is no
965  * difference between an exclusive lock and a cleanup-strength lock. Note
966  * that we pass the original mode to ReadBuffer_common() below, when
967  * falling back to reading the buffer to a concurrent relation extension.
968  */
970  flags |= EB_LOCK_TARGET;
971 
972  while (current_size < extend_to)
973  {
974  uint32 num_pages = lengthof(buffers);
975  BlockNumber first_block;
976 
977  if ((uint64) current_size + num_pages > extend_to)
978  num_pages = extend_to - current_size;
979 
980  first_block = ExtendBufferedRelCommon(bmr, fork, strategy, flags,
981  num_pages, extend_to,
982  buffers, &extended_by);
983 
984  current_size = first_block + extended_by;
985  Assert(num_pages != 0 || current_size >= extend_to);
986 
987  for (uint32 i = 0; i < extended_by; i++)
988  {
989  if (first_block + i != extend_to - 1)
990  ReleaseBuffer(buffers[i]);
991  else
992  buffer = buffers[i];
993  }
994  }
995 
996  /*
997  * It's possible that another backend concurrently extended the relation.
998  * In that case read the buffer.
999  *
1000  * XXX: Should we control this via a flag?
1001  */
1002  if (buffer == InvalidBuffer)
1003  {
1004  Assert(extended_by == 0);
1005  buffer = ReadBuffer_common(bmr.rel, bmr.smgr, 0,
1006  fork, extend_to - 1, mode, strategy);
1007  }
1008 
1009  return buffer;
1010 }
1011 
1012 /*
1013  * Lock and optionally zero a buffer, as part of the implementation of
1014  * RBM_ZERO_AND_LOCK or RBM_ZERO_AND_CLEANUP_LOCK. The buffer must be already
1015  * pinned. If the buffer is not already valid, it is zeroed and made valid.
1016  */
1017 static void
1019 {
1020  BufferDesc *bufHdr;
1021  bool need_to_zero;
1022  bool isLocalBuf = BufferIsLocal(buffer);
1023 
1025 
1026  if (already_valid)
1027  {
1028  /*
1029  * If the caller already knew the buffer was valid, we can skip some
1030  * header interaction. The caller just wants to lock the buffer.
1031  */
1032  need_to_zero = false;
1033  }
1034  else if (isLocalBuf)
1035  {
1036  /* Simple case for non-shared buffers. */
1037  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1038  need_to_zero = (pg_atomic_read_u32(&bufHdr->state) & BM_VALID) == 0;
1039  }
1040  else
1041  {
1042  /*
1043  * Take BM_IO_IN_PROGRESS, or discover that BM_VALID has been set
1044  * concurrently. Even though we aren't doing I/O, that ensures that
1045  * we don't zero a page that someone else has pinned. An exclusive
1046  * content lock wouldn't be enough, because readers are allowed to
1047  * drop the content lock after determining that a tuple is visible
1048  * (see buffer access rules in README).
1049  */
1050  bufHdr = GetBufferDescriptor(buffer - 1);
1051  need_to_zero = StartBufferIO(bufHdr, true, false);
1052  }
1053 
1054  if (need_to_zero)
1055  {
1056  memset(BufferGetPage(buffer), 0, BLCKSZ);
1057 
1058  /*
1059  * Grab the buffer content lock before marking the page as valid, to
1060  * make sure that no other backend sees the zeroed page before the
1061  * caller has had a chance to initialize it.
1062  *
1063  * Since no-one else can be looking at the page contents yet, there is
1064  * no difference between an exclusive lock and a cleanup-strength
1065  * lock. (Note that we cannot use LockBuffer() or
1066  * LockBufferForCleanup() here, because they assert that the buffer is
1067  * already valid.)
1068  */
1069  if (!isLocalBuf)
1071 
1072  if (isLocalBuf)
1073  {
1074  /* Only need to adjust flags */
1075  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1076 
1077  buf_state |= BM_VALID;
1078  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1079  }
1080  else
1081  {
1082  /* Set BM_VALID, terminate IO, and wake up any waiters */
1083  TerminateBufferIO(bufHdr, false, BM_VALID, true);
1084  }
1085  }
1086  else if (!isLocalBuf)
1087  {
1088  /*
1089  * The buffer is valid, so we can't zero it. The caller still expects
1090  * the page to be locked on return.
1091  */
1092  if (mode == RBM_ZERO_AND_LOCK)
1094  else
1096  }
1097 }
1098 
1099 /*
1100  * Pin a buffer for a given block. *foundPtr is set to true if the block was
1101  * already present, or false if more work is required to either read it in or
1102  * zero it.
1103  */
1106  SMgrRelation smgr,
1107  char smgr_persistence,
1108  ForkNumber forkNum,
1109  BlockNumber blockNum,
1110  BufferAccessStrategy strategy,
1111  bool *foundPtr)
1112 {
1113  BufferDesc *bufHdr;
1114  IOContext io_context;
1115  IOObject io_object;
1116  char persistence;
1117 
1118  Assert(blockNum != P_NEW);
1119 
1120  /*
1121  * If there is no Relation it usually implies recovery and thus permanent,
1122  * but we take an argument because CreateAndCopyRelationData can reach us
1123  * with only an SMgrRelation for an unlogged relation that we don't want
1124  * to flag with BM_PERMANENT.
1125  */
1126  if (rel)
1127  persistence = rel->rd_rel->relpersistence;
1128  else if (smgr_persistence == 0)
1129  persistence = RELPERSISTENCE_PERMANENT;
1130  else
1131  persistence = smgr_persistence;
1132 
1133  if (persistence == RELPERSISTENCE_TEMP)
1134  {
1135  io_context = IOCONTEXT_NORMAL;
1136  io_object = IOOBJECT_TEMP_RELATION;
1137  }
1138  else
1139  {
1140  io_context = IOContextForStrategy(strategy);
1141  io_object = IOOBJECT_RELATION;
1142  }
1143 
1144  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
1146  smgr->smgr_rlocator.locator.dbOid,
1148  smgr->smgr_rlocator.backend);
1149 
1150  if (persistence == RELPERSISTENCE_TEMP)
1151  {
1152  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, foundPtr);
1153  if (*foundPtr)
1155  }
1156  else
1157  {
1158  bufHdr = BufferAlloc(smgr, persistence, forkNum, blockNum,
1159  strategy, foundPtr, io_context);
1160  if (*foundPtr)
1162  }
1163  if (rel)
1164  {
1165  /*
1166  * While pgBufferUsage's "read" counter isn't bumped unless we reach
1167  * WaitReadBuffers() (so, not for hits, and not for buffers that are
1168  * zeroed instead), the per-relation stats always count them.
1169  */
1171  if (*foundPtr)
1173  }
1174  if (*foundPtr)
1175  {
1176  VacuumPageHit++;
1177  pgstat_count_io_op(io_object, io_context, IOOP_HIT);
1178  if (VacuumCostActive)
1180 
1181  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1183  smgr->smgr_rlocator.locator.dbOid,
1185  smgr->smgr_rlocator.backend,
1186  true);
1187  }
1188 
1189  return BufferDescriptorGetBuffer(bufHdr);
1190 }
1191 
1192 /*
1193  * ReadBuffer_common -- common logic for all ReadBuffer variants
1194  *
1195  * smgr is required, rel is optional unless using P_NEW.
1196  */
1198 ReadBuffer_common(Relation rel, SMgrRelation smgr, char smgr_persistence,
1199  ForkNumber forkNum,
1200  BlockNumber blockNum, ReadBufferMode mode,
1201  BufferAccessStrategy strategy)
1202 {
1203  ReadBuffersOperation operation;
1204  Buffer buffer;
1205  int flags;
1206 
1207  /*
1208  * Backward compatibility path, most code should use ExtendBufferedRel()
1209  * instead, as acquiring the extension lock inside ExtendBufferedRel()
1210  * scales a lot better.
1211  */
1212  if (unlikely(blockNum == P_NEW))
1213  {
1215 
1216  /*
1217  * Since no-one else can be looking at the page contents yet, there is
1218  * no difference between an exclusive lock and a cleanup-strength
1219  * lock.
1220  */
1222  flags |= EB_LOCK_FIRST;
1223 
1224  return ExtendBufferedRel(BMR_REL(rel), forkNum, strategy, flags);
1225  }
1226 
1228  mode == RBM_ZERO_AND_LOCK))
1229  {
1230  bool found;
1231 
1232  buffer = PinBufferForBlock(rel, smgr, smgr_persistence,
1233  forkNum, blockNum, strategy, &found);
1234  ZeroAndLockBuffer(buffer, mode, found);
1235  return buffer;
1236  }
1237 
1238  if (mode == RBM_ZERO_ON_ERROR)
1240  else
1241  flags = 0;
1242  operation.smgr = smgr;
1243  operation.rel = rel;
1244  operation.smgr_persistence = smgr_persistence;
1245  operation.forknum = forkNum;
1246  operation.strategy = strategy;
1247  if (StartReadBuffer(&operation,
1248  &buffer,
1249  blockNum,
1250  flags))
1251  WaitReadBuffers(&operation);
1252 
1253  return buffer;
1254 }
1255 
1256 static pg_attribute_always_inline bool
1258  Buffer *buffers,
1259  BlockNumber blockNum,
1260  int *nblocks,
1261  int flags)
1262 {
1263  int actual_nblocks = *nblocks;
1264  int io_buffers_len = 0;
1265 
1266  Assert(*nblocks > 0);
1267  Assert(*nblocks <= MAX_IO_COMBINE_LIMIT);
1268 
1269  for (int i = 0; i < actual_nblocks; ++i)
1270  {
1271  bool found;
1272 
1273  buffers[i] = PinBufferForBlock(operation->rel,
1274  operation->smgr,
1275  operation->smgr_persistence,
1276  operation->forknum,
1277  blockNum + i,
1278  operation->strategy,
1279  &found);
1280 
1281  if (found)
1282  {
1283  /*
1284  * Terminate the read as soon as we get a hit. It could be a
1285  * single buffer hit, or it could be a hit that follows a readable
1286  * range. We don't want to create more than one readable range,
1287  * so we stop here.
1288  */
1289  actual_nblocks = i + 1;
1290  break;
1291  }
1292  else
1293  {
1294  /* Extend the readable range to cover this block. */
1295  io_buffers_len++;
1296  }
1297  }
1298  *nblocks = actual_nblocks;
1299 
1300  if (likely(io_buffers_len == 0))
1301  return false;
1302 
1303  /* Populate information needed for I/O. */
1304  operation->buffers = buffers;
1305  operation->blocknum = blockNum;
1306  operation->flags = flags;
1307  operation->nblocks = actual_nblocks;
1308  operation->io_buffers_len = io_buffers_len;
1309 
1310  if (flags & READ_BUFFERS_ISSUE_ADVICE)
1311  {
1312  /*
1313  * In theory we should only do this if PinBufferForBlock() had to
1314  * allocate new buffers above. That way, if two calls to
1315  * StartReadBuffers() were made for the same blocks before
1316  * WaitReadBuffers(), only the first would issue the advice. That'd be
1317  * a better simulation of true asynchronous I/O, which would only
1318  * start the I/O once, but isn't done here for simplicity. Note also
1319  * that the following call might actually issue two advice calls if we
1320  * cross a segment boundary; in a true asynchronous version we might
1321  * choose to process only one real I/O at a time in that case.
1322  */
1323  smgrprefetch(operation->smgr,
1324  operation->forknum,
1325  blockNum,
1326  operation->io_buffers_len);
1327  }
1328 
1329  /* Indicate that WaitReadBuffers() should be called. */
1330  return true;
1331 }
1332 
1333 /*
1334  * Begin reading a range of blocks beginning at blockNum and extending for
1335  * *nblocks. On return, up to *nblocks pinned buffers holding those blocks
1336  * are written into the buffers array, and *nblocks is updated to contain the
1337  * actual number, which may be fewer than requested. Caller sets some of the
1338  * members of operation; see struct definition.
1339  *
1340  * If false is returned, no I/O is necessary. If true is returned, one I/O
1341  * has been started, and WaitReadBuffers() must be called with the same
1342  * operation object before the buffers are accessed. Along with the operation
1343  * object, the caller-supplied array of buffers must remain valid until
1344  * WaitReadBuffers() is called.
1345  *
1346  * Currently the I/O is only started with optional operating system advice if
1347  * requested by the caller with READ_BUFFERS_ISSUE_ADVICE, and the real I/O
1348  * happens synchronously in WaitReadBuffers(). In future work, true I/O could
1349  * be initiated here.
1350  */
1351 bool
1353  Buffer *buffers,
1354  BlockNumber blockNum,
1355  int *nblocks,
1356  int flags)
1357 {
1358  return StartReadBuffersImpl(operation, buffers, blockNum, nblocks, flags);
1359 }
1360 
1361 /*
1362  * Single block version of the StartReadBuffers(). This might save a few
1363  * instructions when called from another translation unit, because it is
1364  * specialized for nblocks == 1.
1365  */
1366 bool
1368  Buffer *buffer,
1369  BlockNumber blocknum,
1370  int flags)
1371 {
1372  int nblocks = 1;
1373  bool result;
1374 
1375  result = StartReadBuffersImpl(operation, buffer, blocknum, &nblocks, flags);
1376  Assert(nblocks == 1); /* single block can't be short */
1377 
1378  return result;
1379 }
1380 
1381 static inline bool
1383 {
1384  if (BufferIsLocal(buffer))
1385  {
1386  BufferDesc *bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1387 
1388  return (pg_atomic_read_u32(&bufHdr->state) & BM_VALID) == 0;
1389  }
1390  else
1391  return StartBufferIO(GetBufferDescriptor(buffer - 1), true, nowait);
1392 }
1393 
1394 void
1396 {
1397  Buffer *buffers;
1398  int nblocks;
1399  BlockNumber blocknum;
1400  ForkNumber forknum;
1401  IOContext io_context;
1402  IOObject io_object;
1403  char persistence;
1404 
1405  /*
1406  * Currently operations are only allowed to include a read of some range,
1407  * with an optional extra buffer that is already pinned at the end. So
1408  * nblocks can be at most one more than io_buffers_len.
1409  */
1410  Assert((operation->nblocks == operation->io_buffers_len) ||
1411  (operation->nblocks == operation->io_buffers_len + 1));
1412 
1413  /* Find the range of the physical read we need to perform. */
1414  nblocks = operation->io_buffers_len;
1415  if (nblocks == 0)
1416  return; /* nothing to do */
1417 
1418  buffers = &operation->buffers[0];
1419  blocknum = operation->blocknum;
1420  forknum = operation->forknum;
1421 
1422  persistence = operation->rel
1423  ? operation->rel->rd_rel->relpersistence
1424  : RELPERSISTENCE_PERMANENT;
1425  if (persistence == RELPERSISTENCE_TEMP)
1426  {
1427  io_context = IOCONTEXT_NORMAL;
1428  io_object = IOOBJECT_TEMP_RELATION;
1429  }
1430  else
1431  {
1432  io_context = IOContextForStrategy(operation->strategy);
1433  io_object = IOOBJECT_RELATION;
1434  }
1435 
1436  /*
1437  * We count all these blocks as read by this backend. This is traditional
1438  * behavior, but might turn out to be not true if we find that someone
1439  * else has beaten us and completed the read of some of these blocks. In
1440  * that case the system globally double-counts, but we traditionally don't
1441  * count this as a "hit", and we don't have a separate counter for "miss,
1442  * but another backend completed the read".
1443  */
1444  if (persistence == RELPERSISTENCE_TEMP)
1445  pgBufferUsage.local_blks_read += nblocks;
1446  else
1447  pgBufferUsage.shared_blks_read += nblocks;
1448 
1449  for (int i = 0; i < nblocks; ++i)
1450  {
1451  int io_buffers_len;
1452  Buffer io_buffers[MAX_IO_COMBINE_LIMIT];
1453  void *io_pages[MAX_IO_COMBINE_LIMIT];
1454  instr_time io_start;
1455  BlockNumber io_first_block;
1456 
1457  /*
1458  * Skip this block if someone else has already completed it. If an
1459  * I/O is already in progress in another backend, this will wait for
1460  * the outcome: either done, or something went wrong and we will
1461  * retry.
1462  */
1463  if (!WaitReadBuffersCanStartIO(buffers[i], false))
1464  {
1465  /*
1466  * Report this as a 'hit' for this backend, even though it must
1467  * have started out as a miss in PinBufferForBlock().
1468  */
1469  TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, blocknum + i,
1470  operation->smgr->smgr_rlocator.locator.spcOid,
1471  operation->smgr->smgr_rlocator.locator.dbOid,
1472  operation->smgr->smgr_rlocator.locator.relNumber,
1473  operation->smgr->smgr_rlocator.backend,
1474  true);
1475  continue;
1476  }
1477 
1478  /* We found a buffer that we need to read in. */
1479  io_buffers[0] = buffers[i];
1480  io_pages[0] = BufferGetBlock(buffers[i]);
1481  io_first_block = blocknum + i;
1482  io_buffers_len = 1;
1483 
1484  /*
1485  * How many neighboring-on-disk blocks can we can scatter-read into
1486  * other buffers at the same time? In this case we don't wait if we
1487  * see an I/O already in progress. We already hold BM_IO_IN_PROGRESS
1488  * for the head block, so we should get on with that I/O as soon as
1489  * possible. We'll come back to this block again, above.
1490  */
1491  while ((i + 1) < nblocks &&
1492  WaitReadBuffersCanStartIO(buffers[i + 1], true))
1493  {
1494  /* Must be consecutive block numbers. */
1495  Assert(BufferGetBlockNumber(buffers[i + 1]) ==
1496  BufferGetBlockNumber(buffers[i]) + 1);
1497 
1498  io_buffers[io_buffers_len] = buffers[++i];
1499  io_pages[io_buffers_len++] = BufferGetBlock(buffers[i]);
1500  }
1501 
1503  smgrreadv(operation->smgr, forknum, io_first_block, io_pages, io_buffers_len);
1504  pgstat_count_io_op_time(io_object, io_context, IOOP_READ, io_start,
1505  io_buffers_len);
1506 
1507  /* Verify each block we read, and terminate the I/O. */
1508  for (int j = 0; j < io_buffers_len; ++j)
1509  {
1510  BufferDesc *bufHdr;
1511  Block bufBlock;
1512 
1513  if (persistence == RELPERSISTENCE_TEMP)
1514  {
1515  bufHdr = GetLocalBufferDescriptor(-io_buffers[j] - 1);
1516  bufBlock = LocalBufHdrGetBlock(bufHdr);
1517  }
1518  else
1519  {
1520  bufHdr = GetBufferDescriptor(io_buffers[j] - 1);
1521  bufBlock = BufHdrGetBlock(bufHdr);
1522  }
1523 
1524  /* check for garbage data */
1525  if (!PageIsVerifiedExtended((Page) bufBlock, io_first_block + j,
1527  {
1528  if ((operation->flags & READ_BUFFERS_ZERO_ON_ERROR) || zero_damaged_pages)
1529  {
1530  ereport(WARNING,
1532  errmsg("invalid page in block %u of relation %s; zeroing out page",
1533  io_first_block + j,
1534  relpath(operation->smgr->smgr_rlocator, forknum))));
1535  memset(bufBlock, 0, BLCKSZ);
1536  }
1537  else
1538  ereport(ERROR,
1540  errmsg("invalid page in block %u of relation %s",
1541  io_first_block + j,
1542  relpath(operation->smgr->smgr_rlocator, forknum))));
1543  }
1544 
1545  /* Terminate I/O and set BM_VALID. */
1546  if (persistence == RELPERSISTENCE_TEMP)
1547  {
1548  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1549 
1550  buf_state |= BM_VALID;
1551  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1552  }
1553  else
1554  {
1555  /* Set BM_VALID, terminate IO, and wake up any waiters */
1556  TerminateBufferIO(bufHdr, false, BM_VALID, true);
1557  }
1558 
1559  /* Report I/Os as completing individually. */
1560  TRACE_POSTGRESQL_BUFFER_READ_DONE(forknum, io_first_block + j,
1561  operation->smgr->smgr_rlocator.locator.spcOid,
1562  operation->smgr->smgr_rlocator.locator.dbOid,
1563  operation->smgr->smgr_rlocator.locator.relNumber,
1564  operation->smgr->smgr_rlocator.backend,
1565  false);
1566  }
1567 
1568  VacuumPageMiss += io_buffers_len;
1569  if (VacuumCostActive)
1570  VacuumCostBalance += VacuumCostPageMiss * io_buffers_len;
1571  }
1572 }
1573 
1574 /*
1575  * BufferAlloc -- subroutine for PinBufferForBlock. Handles lookup of a shared
1576  * buffer. If no buffer exists already, selects a replacement victim and
1577  * evicts the old page, but does NOT read in new page.
1578  *
1579  * "strategy" can be a buffer replacement strategy object, or NULL for
1580  * the default strategy. The selected buffer's usage_count is advanced when
1581  * using the default strategy, but otherwise possibly not (see PinBuffer).
1582  *
1583  * The returned buffer is pinned and is already marked as holding the
1584  * desired page. If it already did have the desired page, *foundPtr is
1585  * set true. Otherwise, *foundPtr is set false.
1586  *
1587  * io_context is passed as an output parameter to avoid calling
1588  * IOContextForStrategy() when there is a shared buffers hit and no IO
1589  * statistics need be captured.
1590  *
1591  * No locks are held either at entry or exit.
1592  */
1594 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1595  BlockNumber blockNum,
1596  BufferAccessStrategy strategy,
1597  bool *foundPtr, IOContext io_context)
1598 {
1599  BufferTag newTag; /* identity of requested block */
1600  uint32 newHash; /* hash value for newTag */
1601  LWLock *newPartitionLock; /* buffer partition lock for it */
1602  int existing_buf_id;
1603  Buffer victim_buffer;
1604  BufferDesc *victim_buf_hdr;
1605  uint32 victim_buf_state;
1606 
1607  /* Make sure we will have room to remember the buffer pin */
1610 
1611  /* create a tag so we can lookup the buffer */
1612  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
1613 
1614  /* determine its hash code and partition lock ID */
1615  newHash = BufTableHashCode(&newTag);
1616  newPartitionLock = BufMappingPartitionLock(newHash);
1617 
1618  /* see if the block is in the buffer pool already */
1619  LWLockAcquire(newPartitionLock, LW_SHARED);
1620  existing_buf_id = BufTableLookup(&newTag, newHash);
1621  if (existing_buf_id >= 0)
1622  {
1623  BufferDesc *buf;
1624  bool valid;
1625 
1626  /*
1627  * Found it. Now, pin the buffer so no one can steal it from the
1628  * buffer pool, and check to see if the correct data has been loaded
1629  * into the buffer.
1630  */
1631  buf = GetBufferDescriptor(existing_buf_id);
1632 
1633  valid = PinBuffer(buf, strategy);
1634 
1635  /* Can release the mapping lock as soon as we've pinned it */
1636  LWLockRelease(newPartitionLock);
1637 
1638  *foundPtr = true;
1639 
1640  if (!valid)
1641  {
1642  /*
1643  * We can only get here if (a) someone else is still reading in
1644  * the page, (b) a previous read attempt failed, or (c) someone
1645  * called StartReadBuffers() but not yet WaitReadBuffers().
1646  */
1647  *foundPtr = false;
1648  }
1649 
1650  return buf;
1651  }
1652 
1653  /*
1654  * Didn't find it in the buffer pool. We'll have to initialize a new
1655  * buffer. Remember to unlock the mapping lock while doing the work.
1656  */
1657  LWLockRelease(newPartitionLock);
1658 
1659  /*
1660  * Acquire a victim buffer. Somebody else might try to do the same, we
1661  * don't hold any conflicting locks. If so we'll have to undo our work
1662  * later.
1663  */
1664  victim_buffer = GetVictimBuffer(strategy, io_context);
1665  victim_buf_hdr = GetBufferDescriptor(victim_buffer - 1);
1666 
1667  /*
1668  * Try to make a hashtable entry for the buffer under its new tag. If
1669  * somebody else inserted another buffer for the tag, we'll release the
1670  * victim buffer we acquired and use the already inserted one.
1671  */
1672  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1673  existing_buf_id = BufTableInsert(&newTag, newHash, victim_buf_hdr->buf_id);
1674  if (existing_buf_id >= 0)
1675  {
1676  BufferDesc *existing_buf_hdr;
1677  bool valid;
1678 
1679  /*
1680  * Got a collision. Someone has already done what we were about to do.
1681  * We'll just handle this as if it were found in the buffer pool in
1682  * the first place. First, give up the buffer we were planning to
1683  * use.
1684  *
1685  * We could do this after releasing the partition lock, but then we'd
1686  * have to call ResourceOwnerEnlarge() & ReservePrivateRefCountEntry()
1687  * before acquiring the lock, for the rare case of such a collision.
1688  */
1689  UnpinBuffer(victim_buf_hdr);
1690 
1691  /*
1692  * The victim buffer we acquired previously is clean and unused, let
1693  * it be found again quickly
1694  */
1695  StrategyFreeBuffer(victim_buf_hdr);
1696 
1697  /* remaining code should match code at top of routine */
1698 
1699  existing_buf_hdr = GetBufferDescriptor(existing_buf_id);
1700 
1701  valid = PinBuffer(existing_buf_hdr, strategy);
1702 
1703  /* Can release the mapping lock as soon as we've pinned it */
1704  LWLockRelease(newPartitionLock);
1705 
1706  *foundPtr = true;
1707 
1708  if (!valid)
1709  {
1710  /*
1711  * We can only get here if (a) someone else is still reading in
1712  * the page, (b) a previous read attempt failed, or (c) someone
1713  * called StartReadBuffers() but not yet WaitReadBuffers().
1714  */
1715  *foundPtr = false;
1716  }
1717 
1718  return existing_buf_hdr;
1719  }
1720 
1721  /*
1722  * Need to lock the buffer header too in order to change its tag.
1723  */
1724  victim_buf_state = LockBufHdr(victim_buf_hdr);
1725 
1726  /* some sanity checks while we hold the buffer header lock */
1727  Assert(BUF_STATE_GET_REFCOUNT(victim_buf_state) == 1);
1728  Assert(!(victim_buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY | BM_IO_IN_PROGRESS)));
1729 
1730  victim_buf_hdr->tag = newTag;
1731 
1732  /*
1733  * Make sure BM_PERMANENT is set for buffers that must be written at every
1734  * checkpoint. Unlogged buffers only need to be written at shutdown
1735  * checkpoints, except for their "init" forks, which need to be treated
1736  * just like permanent relations.
1737  */
1738  victim_buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1739  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1740  victim_buf_state |= BM_PERMANENT;
1741 
1742  UnlockBufHdr(victim_buf_hdr, victim_buf_state);
1743 
1744  LWLockRelease(newPartitionLock);
1745 
1746  /*
1747  * Buffer contents are currently invalid.
1748  */
1749  *foundPtr = false;
1750 
1751  return victim_buf_hdr;
1752 }
1753 
1754 /*
1755  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1756  * freelist.
1757  *
1758  * The buffer header spinlock must be held at entry. We drop it before
1759  * returning. (This is sane because the caller must have locked the
1760  * buffer in order to be sure it should be dropped.)
1761  *
1762  * This is used only in contexts such as dropping a relation. We assume
1763  * that no other backend could possibly be interested in using the page,
1764  * so the only reason the buffer might be pinned is if someone else is
1765  * trying to write it out. We have to let them finish before we can
1766  * reclaim the buffer.
1767  *
1768  * The buffer could get reclaimed by someone else while we are waiting
1769  * to acquire the necessary locks; if so, don't mess it up.
1770  */
1771 static void
1773 {
1774  BufferTag oldTag;
1775  uint32 oldHash; /* hash value for oldTag */
1776  LWLock *oldPartitionLock; /* buffer partition lock for it */
1777  uint32 oldFlags;
1778  uint32 buf_state;
1779 
1780  /* Save the original buffer tag before dropping the spinlock */
1781  oldTag = buf->tag;
1782 
1783  buf_state = pg_atomic_read_u32(&buf->state);
1784  Assert(buf_state & BM_LOCKED);
1785  UnlockBufHdr(buf, buf_state);
1786 
1787  /*
1788  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1789  * worth storing the hashcode in BufferDesc so we need not recompute it
1790  * here? Probably not.
1791  */
1792  oldHash = BufTableHashCode(&oldTag);
1793  oldPartitionLock = BufMappingPartitionLock(oldHash);
1794 
1795 retry:
1796 
1797  /*
1798  * Acquire exclusive mapping lock in preparation for changing the buffer's
1799  * association.
1800  */
1801  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1802 
1803  /* Re-lock the buffer header */
1804  buf_state = LockBufHdr(buf);
1805 
1806  /* If it's changed while we were waiting for lock, do nothing */
1807  if (!BufferTagsEqual(&buf->tag, &oldTag))
1808  {
1809  UnlockBufHdr(buf, buf_state);
1810  LWLockRelease(oldPartitionLock);
1811  return;
1812  }
1813 
1814  /*
1815  * We assume the only reason for it to be pinned is that someone else is
1816  * flushing the page out. Wait for them to finish. (This could be an
1817  * infinite loop if the refcount is messed up... it would be nice to time
1818  * out after awhile, but there seems no way to be sure how many loops may
1819  * be needed. Note that if the other guy has pinned the buffer but not
1820  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1821  * be busy-looping here.)
1822  */
1823  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1824  {
1825  UnlockBufHdr(buf, buf_state);
1826  LWLockRelease(oldPartitionLock);
1827  /* safety check: should definitely not be our *own* pin */
1829  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1830  WaitIO(buf);
1831  goto retry;
1832  }
1833 
1834  /*
1835  * Clear out the buffer's tag and flags. We must do this to ensure that
1836  * linear scans of the buffer array don't think the buffer is valid.
1837  */
1838  oldFlags = buf_state & BUF_FLAG_MASK;
1839  ClearBufferTag(&buf->tag);
1840  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1841  UnlockBufHdr(buf, buf_state);
1842 
1843  /*
1844  * Remove the buffer from the lookup hashtable, if it was in there.
1845  */
1846  if (oldFlags & BM_TAG_VALID)
1847  BufTableDelete(&oldTag, oldHash);
1848 
1849  /*
1850  * Done with mapping lock.
1851  */
1852  LWLockRelease(oldPartitionLock);
1853 
1854  /*
1855  * Insert the buffer at the head of the list of free buffers.
1856  */
1858 }
1859 
1860 /*
1861  * Helper routine for GetVictimBuffer()
1862  *
1863  * Needs to be called on a buffer with a valid tag, pinned, but without the
1864  * buffer header spinlock held.
1865  *
1866  * Returns true if the buffer can be reused, in which case the buffer is only
1867  * pinned by this backend and marked as invalid, false otherwise.
1868  */
1869 static bool
1871 {
1872  uint32 buf_state;
1873  uint32 hash;
1874  LWLock *partition_lock;
1875  BufferTag tag;
1876 
1878 
1879  /* have buffer pinned, so it's safe to read tag without lock */
1880  tag = buf_hdr->tag;
1881 
1882  hash = BufTableHashCode(&tag);
1883  partition_lock = BufMappingPartitionLock(hash);
1884 
1885  LWLockAcquire(partition_lock, LW_EXCLUSIVE);
1886 
1887  /* lock the buffer header */
1888  buf_state = LockBufHdr(buf_hdr);
1889 
1890  /*
1891  * We have the buffer pinned nobody else should have been able to unset
1892  * this concurrently.
1893  */
1894  Assert(buf_state & BM_TAG_VALID);
1895  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1896  Assert(BufferTagsEqual(&buf_hdr->tag, &tag));
1897 
1898  /*
1899  * If somebody else pinned the buffer since, or even worse, dirtied it,
1900  * give up on this buffer: It's clearly in use.
1901  */
1902  if (BUF_STATE_GET_REFCOUNT(buf_state) != 1 || (buf_state & BM_DIRTY))
1903  {
1904  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1905 
1906  UnlockBufHdr(buf_hdr, buf_state);
1907  LWLockRelease(partition_lock);
1908 
1909  return false;
1910  }
1911 
1912  /*
1913  * Clear out the buffer's tag and flags and usagecount. This is not
1914  * strictly required, as BM_TAG_VALID/BM_VALID needs to be checked before
1915  * doing anything with the buffer. But currently it's beneficial, as the
1916  * cheaper pre-check for several linear scans of shared buffers use the
1917  * tag (see e.g. FlushDatabaseBuffers()).
1918  */
1919  ClearBufferTag(&buf_hdr->tag);
1920  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1921  UnlockBufHdr(buf_hdr, buf_state);
1922 
1923  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1924 
1925  /* finally delete buffer from the buffer mapping table */
1926  BufTableDelete(&tag, hash);
1927 
1928  LWLockRelease(partition_lock);
1929 
1930  Assert(!(buf_state & (BM_DIRTY | BM_VALID | BM_TAG_VALID)));
1931  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1933 
1934  return true;
1935 }
1936 
1937 static Buffer
1939 {
1940  BufferDesc *buf_hdr;
1941  Buffer buf;
1942  uint32 buf_state;
1943  bool from_ring;
1944 
1945  /*
1946  * Ensure, while the spinlock's not yet held, that there's a free refcount
1947  * entry, and a resource owner slot for the pin.
1948  */
1951 
1952  /* we return here if a prospective victim buffer gets used concurrently */
1953 again:
1954 
1955  /*
1956  * Select a victim buffer. The buffer is returned with its header
1957  * spinlock still held!
1958  */
1959  buf_hdr = StrategyGetBuffer(strategy, &buf_state, &from_ring);
1960  buf = BufferDescriptorGetBuffer(buf_hdr);
1961 
1962  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1963 
1964  /* Pin the buffer and then release the buffer spinlock */
1965  PinBuffer_Locked(buf_hdr);
1966 
1967  /*
1968  * We shouldn't have any other pins for this buffer.
1969  */
1971 
1972  /*
1973  * If the buffer was dirty, try to write it out. There is a race
1974  * condition here, in that someone might dirty it after we released the
1975  * buffer header lock above, or even while we are writing it out (since
1976  * our share-lock won't prevent hint-bit updates). We will recheck the
1977  * dirty bit after re-locking the buffer header.
1978  */
1979  if (buf_state & BM_DIRTY)
1980  {
1981  LWLock *content_lock;
1982 
1983  Assert(buf_state & BM_TAG_VALID);
1984  Assert(buf_state & BM_VALID);
1985 
1986  /*
1987  * We need a share-lock on the buffer contents to write it out (else
1988  * we might write invalid data, eg because someone else is compacting
1989  * the page contents while we write). We must use a conditional lock
1990  * acquisition here to avoid deadlock. Even though the buffer was not
1991  * pinned (and therefore surely not locked) when StrategyGetBuffer
1992  * returned it, someone else could have pinned and exclusive-locked it
1993  * by the time we get here. If we try to get the lock unconditionally,
1994  * we'd block waiting for them; if they later block waiting for us,
1995  * deadlock ensues. (This has been observed to happen when two
1996  * backends are both trying to split btree index pages, and the second
1997  * one just happens to be trying to split the page the first one got
1998  * from StrategyGetBuffer.)
1999  */
2000  content_lock = BufferDescriptorGetContentLock(buf_hdr);
2001  if (!LWLockConditionalAcquire(content_lock, LW_SHARED))
2002  {
2003  /*
2004  * Someone else has locked the buffer, so give it up and loop back
2005  * to get another one.
2006  */
2007  UnpinBuffer(buf_hdr);
2008  goto again;
2009  }
2010 
2011  /*
2012  * If using a nondefault strategy, and writing the buffer would
2013  * require a WAL flush, let the strategy decide whether to go ahead
2014  * and write/reuse the buffer or to choose another victim. We need a
2015  * lock to inspect the page LSN, so this can't be done inside
2016  * StrategyGetBuffer.
2017  */
2018  if (strategy != NULL)
2019  {
2020  XLogRecPtr lsn;
2021 
2022  /* Read the LSN while holding buffer header lock */
2023  buf_state = LockBufHdr(buf_hdr);
2024  lsn = BufferGetLSN(buf_hdr);
2025  UnlockBufHdr(buf_hdr, buf_state);
2026 
2027  if (XLogNeedsFlush(lsn)
2028  && StrategyRejectBuffer(strategy, buf_hdr, from_ring))
2029  {
2030  LWLockRelease(content_lock);
2031  UnpinBuffer(buf_hdr);
2032  goto again;
2033  }
2034  }
2035 
2036  /* OK, do the I/O */
2037  FlushBuffer(buf_hdr, NULL, IOOBJECT_RELATION, io_context);
2038  LWLockRelease(content_lock);
2039 
2041  &buf_hdr->tag);
2042  }
2043 
2044 
2045  if (buf_state & BM_VALID)
2046  {
2047  /*
2048  * When a BufferAccessStrategy is in use, blocks evicted from shared
2049  * buffers are counted as IOOP_EVICT in the corresponding context
2050  * (e.g. IOCONTEXT_BULKWRITE). Shared buffers are evicted by a
2051  * strategy in two cases: 1) while initially claiming buffers for the
2052  * strategy ring 2) to replace an existing strategy ring buffer
2053  * because it is pinned or in use and cannot be reused.
2054  *
2055  * Blocks evicted from buffers already in the strategy ring are
2056  * counted as IOOP_REUSE in the corresponding strategy context.
2057  *
2058  * At this point, we can accurately count evictions and reuses,
2059  * because we have successfully claimed the valid buffer. Previously,
2060  * we may have been forced to release the buffer due to concurrent
2061  * pinners or erroring out.
2062  */
2064  from_ring ? IOOP_REUSE : IOOP_EVICT);
2065  }
2066 
2067  /*
2068  * If the buffer has an entry in the buffer mapping table, delete it. This
2069  * can fail because another backend could have pinned or dirtied the
2070  * buffer.
2071  */
2072  if ((buf_state & BM_TAG_VALID) && !InvalidateVictimBuffer(buf_hdr))
2073  {
2074  UnpinBuffer(buf_hdr);
2075  goto again;
2076  }
2077 
2078  /* a final set of sanity checks */
2079 #ifdef USE_ASSERT_CHECKING
2080  buf_state = pg_atomic_read_u32(&buf_hdr->state);
2081 
2082  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2083  Assert(!(buf_state & (BM_TAG_VALID | BM_VALID | BM_DIRTY)));
2084 
2086 #endif
2087 
2088  return buf;
2089 }
2090 
2091 /*
2092  * Limit the number of pins a batch operation may additionally acquire, to
2093  * avoid running out of pinnable buffers.
2094  *
2095  * One additional pin is always allowed, as otherwise the operation likely
2096  * cannot be performed at all.
2097  *
2098  * The number of allowed pins for a backend is computed based on
2099  * shared_buffers and the maximum number of connections possible. That's very
2100  * pessimistic, but outside of toy-sized shared_buffers it should allow
2101  * sufficient pins.
2102  */
2103 void
2104 LimitAdditionalPins(uint32 *additional_pins)
2105 {
2106  uint32 max_backends;
2107  int max_proportional_pins;
2108 
2109  if (*additional_pins <= 1)
2110  return;
2111 
2112  max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
2113  max_proportional_pins = NBuffers / max_backends;
2114 
2115  /*
2116  * Subtract the approximate number of buffers already pinned by this
2117  * backend. We get the number of "overflowed" pins for free, but don't
2118  * know the number of pins in PrivateRefCountArray. The cost of
2119  * calculating that exactly doesn't seem worth it, so just assume the max.
2120  */
2121  max_proportional_pins -= PrivateRefCountOverflowed + REFCOUNT_ARRAY_ENTRIES;
2122 
2123  if (max_proportional_pins <= 0)
2124  max_proportional_pins = 1;
2125 
2126  if (*additional_pins > max_proportional_pins)
2127  *additional_pins = max_proportional_pins;
2128 }
2129 
2130 /*
2131  * Logic shared between ExtendBufferedRelBy(), ExtendBufferedRelTo(). Just to
2132  * avoid duplicating the tracing and relpersistence related logic.
2133  */
2134 static BlockNumber
2136  ForkNumber fork,
2137  BufferAccessStrategy strategy,
2138  uint32 flags,
2139  uint32 extend_by,
2140  BlockNumber extend_upto,
2141  Buffer *buffers,
2142  uint32 *extended_by)
2143 {
2144  BlockNumber first_block;
2145 
2146  TRACE_POSTGRESQL_BUFFER_EXTEND_START(fork,
2150  bmr.smgr->smgr_rlocator.backend,
2151  extend_by);
2152 
2153  if (bmr.relpersistence == RELPERSISTENCE_TEMP)
2154  first_block = ExtendBufferedRelLocal(bmr, fork, flags,
2155  extend_by, extend_upto,
2156  buffers, &extend_by);
2157  else
2158  first_block = ExtendBufferedRelShared(bmr, fork, strategy, flags,
2159  extend_by, extend_upto,
2160  buffers, &extend_by);
2161  *extended_by = extend_by;
2162 
2163  TRACE_POSTGRESQL_BUFFER_EXTEND_DONE(fork,
2167  bmr.smgr->smgr_rlocator.backend,
2168  *extended_by,
2169  first_block);
2170 
2171  return first_block;
2172 }
2173 
2174 /*
2175  * Implementation of ExtendBufferedRelBy() and ExtendBufferedRelTo() for
2176  * shared buffers.
2177  */
2178 static BlockNumber
2180  ForkNumber fork,
2181  BufferAccessStrategy strategy,
2182  uint32 flags,
2183  uint32 extend_by,
2184  BlockNumber extend_upto,
2185  Buffer *buffers,
2186  uint32 *extended_by)
2187 {
2188  BlockNumber first_block;
2189  IOContext io_context = IOContextForStrategy(strategy);
2190  instr_time io_start;
2191 
2192  LimitAdditionalPins(&extend_by);
2193 
2194  /*
2195  * Acquire victim buffers for extension without holding extension lock.
2196  * Writing out victim buffers is the most expensive part of extending the
2197  * relation, particularly when doing so requires WAL flushes. Zeroing out
2198  * the buffers is also quite expensive, so do that before holding the
2199  * extension lock as well.
2200  *
2201  * These pages are pinned by us and not valid. While we hold the pin they
2202  * can't be acquired as victim buffers by another backend.
2203  */
2204  for (uint32 i = 0; i < extend_by; i++)
2205  {
2206  Block buf_block;
2207 
2208  buffers[i] = GetVictimBuffer(strategy, io_context);
2209  buf_block = BufHdrGetBlock(GetBufferDescriptor(buffers[i] - 1));
2210 
2211  /* new buffers are zero-filled */
2212  MemSet((char *) buf_block, 0, BLCKSZ);
2213  }
2214 
2215  /*
2216  * Lock relation against concurrent extensions, unless requested not to.
2217  *
2218  * We use the same extension lock for all forks. That's unnecessarily
2219  * restrictive, but currently extensions for forks don't happen often
2220  * enough to make it worth locking more granularly.
2221  *
2222  * Note that another backend might have extended the relation by the time
2223  * we get the lock.
2224  */
2225  if (!(flags & EB_SKIP_EXTENSION_LOCK))
2227 
2228  /*
2229  * If requested, invalidate size cache, so that smgrnblocks asks the
2230  * kernel.
2231  */
2232  if (flags & EB_CLEAR_SIZE_CACHE)
2234 
2235  first_block = smgrnblocks(bmr.smgr, fork);
2236 
2237  /*
2238  * Now that we have the accurate relation size, check if the caller wants
2239  * us to extend to only up to a specific size. If there were concurrent
2240  * extensions, we might have acquired too many buffers and need to release
2241  * them.
2242  */
2243  if (extend_upto != InvalidBlockNumber)
2244  {
2245  uint32 orig_extend_by = extend_by;
2246 
2247  if (first_block > extend_upto)
2248  extend_by = 0;
2249  else if ((uint64) first_block + extend_by > extend_upto)
2250  extend_by = extend_upto - first_block;
2251 
2252  for (uint32 i = extend_by; i < orig_extend_by; i++)
2253  {
2254  BufferDesc *buf_hdr = GetBufferDescriptor(buffers[i] - 1);
2255 
2256  /*
2257  * The victim buffer we acquired previously is clean and unused,
2258  * let it be found again quickly
2259  */
2260  StrategyFreeBuffer(buf_hdr);
2261  UnpinBuffer(buf_hdr);
2262  }
2263 
2264  if (extend_by == 0)
2265  {
2266  if (!(flags & EB_SKIP_EXTENSION_LOCK))
2268  *extended_by = extend_by;
2269  return first_block;
2270  }
2271  }
2272 
2273  /* Fail if relation is already at maximum possible length */
2274  if ((uint64) first_block + extend_by >= MaxBlockNumber)
2275  ereport(ERROR,
2276  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
2277  errmsg("cannot extend relation %s beyond %u blocks",
2278  relpath(bmr.smgr->smgr_rlocator, fork),
2279  MaxBlockNumber)));
2280 
2281  /*
2282  * Insert buffers into buffer table, mark as IO_IN_PROGRESS.
2283  *
2284  * This needs to happen before we extend the relation, because as soon as
2285  * we do, other backends can start to read in those pages.
2286  */
2287  for (uint32 i = 0; i < extend_by; i++)
2288  {
2289  Buffer victim_buf = buffers[i];
2290  BufferDesc *victim_buf_hdr = GetBufferDescriptor(victim_buf - 1);
2291  BufferTag tag;
2292  uint32 hash;
2293  LWLock *partition_lock;
2294  int existing_id;
2295 
2296  /* in case we need to pin an existing buffer below */
2299 
2300  InitBufferTag(&tag, &bmr.smgr->smgr_rlocator.locator, fork, first_block + i);
2301  hash = BufTableHashCode(&tag);
2302  partition_lock = BufMappingPartitionLock(hash);
2303 
2304  LWLockAcquire(partition_lock, LW_EXCLUSIVE);
2305 
2306  existing_id = BufTableInsert(&tag, hash, victim_buf_hdr->buf_id);
2307 
2308  /*
2309  * We get here only in the corner case where we are trying to extend
2310  * the relation but we found a pre-existing buffer. This can happen
2311  * because a prior attempt at extending the relation failed, and
2312  * because mdread doesn't complain about reads beyond EOF (when
2313  * zero_damaged_pages is ON) and so a previous attempt to read a block
2314  * beyond EOF could have left a "valid" zero-filled buffer.
2315  * Unfortunately, we have also seen this case occurring because of
2316  * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
2317  * that doesn't account for a recent write. In that situation, the
2318  * pre-existing buffer would contain valid data that we don't want to
2319  * overwrite. Since the legitimate cases should always have left a
2320  * zero-filled buffer, complain if not PageIsNew.
2321  */
2322  if (existing_id >= 0)
2323  {
2324  BufferDesc *existing_hdr = GetBufferDescriptor(existing_id);
2325  Block buf_block;
2326  bool valid;
2327 
2328  /*
2329  * Pin the existing buffer before releasing the partition lock,
2330  * preventing it from being evicted.
2331  */
2332  valid = PinBuffer(existing_hdr, strategy);
2333 
2334  LWLockRelease(partition_lock);
2335 
2336  /*
2337  * The victim buffer we acquired previously is clean and unused,
2338  * let it be found again quickly
2339  */
2340  StrategyFreeBuffer(victim_buf_hdr);
2341  UnpinBuffer(victim_buf_hdr);
2342 
2343  buffers[i] = BufferDescriptorGetBuffer(existing_hdr);
2344  buf_block = BufHdrGetBlock(existing_hdr);
2345 
2346  if (valid && !PageIsNew((Page) buf_block))
2347  ereport(ERROR,
2348  (errmsg("unexpected data beyond EOF in block %u of relation %s",
2349  existing_hdr->tag.blockNum, relpath(bmr.smgr->smgr_rlocator, fork)),
2350  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
2351 
2352  /*
2353  * We *must* do smgr[zero]extend before succeeding, else the page
2354  * will not be reserved by the kernel, and the next P_NEW call
2355  * will decide to return the same page. Clear the BM_VALID bit,
2356  * do StartBufferIO() and proceed.
2357  *
2358  * Loop to handle the very small possibility that someone re-sets
2359  * BM_VALID between our clearing it and StartBufferIO inspecting
2360  * it.
2361  */
2362  do
2363  {
2364  uint32 buf_state = LockBufHdr(existing_hdr);
2365 
2366  buf_state &= ~BM_VALID;
2367  UnlockBufHdr(existing_hdr, buf_state);
2368  } while (!StartBufferIO(existing_hdr, true, false));
2369  }
2370  else
2371  {
2372  uint32 buf_state;
2373 
2374  buf_state = LockBufHdr(victim_buf_hdr);
2375 
2376  /* some sanity checks while we hold the buffer header lock */
2377  Assert(!(buf_state & (BM_VALID | BM_TAG_VALID | BM_DIRTY | BM_JUST_DIRTIED)));
2378  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 1);
2379 
2380  victim_buf_hdr->tag = tag;
2381 
2382  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
2383  if (bmr.relpersistence == RELPERSISTENCE_PERMANENT || fork == INIT_FORKNUM)
2384  buf_state |= BM_PERMANENT;
2385 
2386  UnlockBufHdr(victim_buf_hdr, buf_state);
2387 
2388  LWLockRelease(partition_lock);
2389 
2390  /* XXX: could combine the locked operations in it with the above */
2391  StartBufferIO(victim_buf_hdr, true, false);
2392  }
2393  }
2394 
2396 
2397  /*
2398  * Note: if smgrzeroextend fails, we will end up with buffers that are
2399  * allocated but not marked BM_VALID. The next relation extension will
2400  * still select the same block number (because the relation didn't get any
2401  * longer on disk) and so future attempts to extend the relation will find
2402  * the same buffers (if they have not been recycled) but come right back
2403  * here to try smgrzeroextend again.
2404  *
2405  * We don't need to set checksum for all-zero pages.
2406  */
2407  smgrzeroextend(bmr.smgr, fork, first_block, extend_by, false);
2408 
2409  /*
2410  * Release the file-extension lock; it's now OK for someone else to extend
2411  * the relation some more.
2412  *
2413  * We remove IO_IN_PROGRESS after this, as waking up waiting backends can
2414  * take noticeable time.
2415  */
2416  if (!(flags & EB_SKIP_EXTENSION_LOCK))
2418 
2420  io_start, extend_by);
2421 
2422  /* Set BM_VALID, terminate IO, and wake up any waiters */
2423  for (uint32 i = 0; i < extend_by; i++)
2424  {
2425  Buffer buf = buffers[i];
2426  BufferDesc *buf_hdr = GetBufferDescriptor(buf - 1);
2427  bool lock = false;
2428 
2429  if (flags & EB_LOCK_FIRST && i == 0)
2430  lock = true;
2431  else if (flags & EB_LOCK_TARGET)
2432  {
2433  Assert(extend_upto != InvalidBlockNumber);
2434  if (first_block + i + 1 == extend_upto)
2435  lock = true;
2436  }
2437 
2438  if (lock)
2440 
2441  TerminateBufferIO(buf_hdr, false, BM_VALID, true);
2442  }
2443 
2444  pgBufferUsage.shared_blks_written += extend_by;
2445 
2446  *extended_by = extend_by;
2447 
2448  return first_block;
2449 }
2450 
2451 /*
2452  * BufferIsExclusiveLocked
2453  *
2454  * Checks if buffer is exclusive-locked.
2455  *
2456  * Buffer must be pinned.
2457  */
2458 bool
2460 {
2461  BufferDesc *bufHdr;
2462 
2463  if (BufferIsLocal(buffer))
2464  {
2465  int bufid = -buffer - 1;
2466 
2467  bufHdr = GetLocalBufferDescriptor(bufid);
2468  }
2469  else
2470  {
2471  bufHdr = GetBufferDescriptor(buffer - 1);
2472  }
2473 
2476  LW_EXCLUSIVE);
2477 }
2478 
2479 /*
2480  * BufferIsDirty
2481  *
2482  * Checks if buffer is already dirty.
2483  *
2484  * Buffer must be pinned and exclusive-locked. (Without an exclusive lock,
2485  * the result may be stale before it's returned.)
2486  */
2487 bool
2489 {
2490  BufferDesc *bufHdr;
2491 
2492  if (BufferIsLocal(buffer))
2493  {
2494  int bufid = -buffer - 1;
2495 
2496  bufHdr = GetLocalBufferDescriptor(bufid);
2497  }
2498  else
2499  {
2500  bufHdr = GetBufferDescriptor(buffer - 1);
2501  }
2502 
2505  LW_EXCLUSIVE));
2506 
2507  return pg_atomic_read_u32(&bufHdr->state) & BM_DIRTY;
2508 }
2509 
2510 /*
2511  * MarkBufferDirty
2512  *
2513  * Marks buffer contents as dirty (actual write happens later).
2514  *
2515  * Buffer must be pinned and exclusive-locked. (If caller does not hold
2516  * exclusive lock, then somebody could be in process of writing the buffer,
2517  * leading to risk of bad data written to disk.)
2518  */
2519 void
2521 {
2522  BufferDesc *bufHdr;
2523  uint32 buf_state;
2524  uint32 old_buf_state;
2525 
2526  if (!BufferIsValid(buffer))
2527  elog(ERROR, "bad buffer ID: %d", buffer);
2528 
2529  if (BufferIsLocal(buffer))
2530  {
2532  return;
2533  }
2534 
2535  bufHdr = GetBufferDescriptor(buffer - 1);
2536 
2539  LW_EXCLUSIVE));
2540 
2541  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
2542  for (;;)
2543  {
2544  if (old_buf_state & BM_LOCKED)
2545  old_buf_state = WaitBufHdrUnlocked(bufHdr);
2546 
2547  buf_state = old_buf_state;
2548 
2549  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
2550  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
2551 
2552  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
2553  buf_state))
2554  break;
2555  }
2556 
2557  /*
2558  * If the buffer was not dirty already, do vacuum accounting.
2559  */
2560  if (!(old_buf_state & BM_DIRTY))
2561  {
2562  VacuumPageDirty++;
2564  if (VacuumCostActive)
2566  }
2567 }
2568 
2569 /*
2570  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
2571  *
2572  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
2573  * compared to calling the two routines separately. Now it's mainly just
2574  * a convenience function. However, if the passed buffer is valid and
2575  * already contains the desired block, we just return it as-is; and that
2576  * does save considerable work compared to a full release and reacquire.
2577  *
2578  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
2579  * buffer actually needs to be released. This case is the same as ReadBuffer,
2580  * but can save some tests in the caller.
2581  */
2582 Buffer
2584  Relation relation,
2585  BlockNumber blockNum)
2586 {
2587  ForkNumber forkNum = MAIN_FORKNUM;
2588  BufferDesc *bufHdr;
2589 
2590  if (BufferIsValid(buffer))
2591  {
2593  if (BufferIsLocal(buffer))
2594  {
2595  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2596  if (bufHdr->tag.blockNum == blockNum &&
2597  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2598  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2599  return buffer;
2601  }
2602  else
2603  {
2604  bufHdr = GetBufferDescriptor(buffer - 1);
2605  /* we have pin, so it's ok to examine tag without spinlock */
2606  if (bufHdr->tag.blockNum == blockNum &&
2607  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
2608  BufTagGetForkNum(&bufHdr->tag) == forkNum)
2609  return buffer;
2610  UnpinBuffer(bufHdr);
2611  }
2612  }
2613 
2614  return ReadBuffer(relation, blockNum);
2615 }
2616 
2617 /*
2618  * PinBuffer -- make buffer unavailable for replacement.
2619  *
2620  * For the default access strategy, the buffer's usage_count is incremented
2621  * when we first pin it; for other strategies we just make sure the usage_count
2622  * isn't zero. (The idea of the latter is that we don't want synchronized
2623  * heap scans to inflate the count, but we need it to not be zero to discourage
2624  * other backends from stealing buffers from our ring. As long as we cycle
2625  * through the ring faster than the global clock-sweep cycles, buffers in
2626  * our ring won't be chosen as victims for replacement by other backends.)
2627  *
2628  * This should be applied only to shared buffers, never local ones.
2629  *
2630  * Since buffers are pinned/unpinned very frequently, pin buffers without
2631  * taking the buffer header lock; instead update the state variable in loop of
2632  * CAS operations. Hopefully it's just a single CAS.
2633  *
2634  * Note that ResourceOwnerEnlarge() and ReservePrivateRefCountEntry()
2635  * must have been done already.
2636  *
2637  * Returns true if buffer is BM_VALID, else false. This provision allows
2638  * some callers to avoid an extra spinlock cycle.
2639  */
2640 static bool
2642 {
2644  bool result;
2645  PrivateRefCountEntry *ref;
2646 
2647  Assert(!BufferIsLocal(b));
2648  Assert(ReservedRefCountEntry != NULL);
2649 
2650  ref = GetPrivateRefCountEntry(b, true);
2651 
2652  if (ref == NULL)
2653  {
2654  uint32 buf_state;
2655  uint32 old_buf_state;
2656 
2657  ref = NewPrivateRefCountEntry(b);
2658 
2659  old_buf_state = pg_atomic_read_u32(&buf->state);
2660  for (;;)
2661  {
2662  if (old_buf_state & BM_LOCKED)
2663  old_buf_state = WaitBufHdrUnlocked(buf);
2664 
2665  buf_state = old_buf_state;
2666 
2667  /* increase refcount */
2668  buf_state += BUF_REFCOUNT_ONE;
2669 
2670  if (strategy == NULL)
2671  {
2672  /* Default case: increase usagecount unless already max. */
2674  buf_state += BUF_USAGECOUNT_ONE;
2675  }
2676  else
2677  {
2678  /*
2679  * Ring buffers shouldn't evict others from pool. Thus we
2680  * don't make usagecount more than 1.
2681  */
2682  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2683  buf_state += BUF_USAGECOUNT_ONE;
2684  }
2685 
2686  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
2687  buf_state))
2688  {
2689  result = (buf_state & BM_VALID) != 0;
2690 
2691  /*
2692  * Assume that we acquired a buffer pin for the purposes of
2693  * Valgrind buffer client checks (even in !result case) to
2694  * keep things simple. Buffers that are unsafe to access are
2695  * not generally guaranteed to be marked undefined or
2696  * non-accessible in any case.
2697  */
2699  break;
2700  }
2701  }
2702  }
2703  else
2704  {
2705  /*
2706  * If we previously pinned the buffer, it is likely to be valid, but
2707  * it may not be if StartReadBuffers() was called and
2708  * WaitReadBuffers() hasn't been called yet. We'll check by loading
2709  * the flags without locking. This is racy, but it's OK to return
2710  * false spuriously: when WaitReadBuffers() calls StartBufferIO(),
2711  * it'll see that it's now valid.
2712  *
2713  * Note: We deliberately avoid a Valgrind client request here.
2714  * Individual access methods can optionally superimpose buffer page
2715  * client requests on top of our client requests to enforce that
2716  * buffers are only accessed while locked (and pinned). It's possible
2717  * that the buffer page is legitimately non-accessible here. We
2718  * cannot meddle with that.
2719  */
2720  result = (pg_atomic_read_u32(&buf->state) & BM_VALID) != 0;
2721  }
2722 
2723  ref->refcount++;
2724  Assert(ref->refcount > 0);
2726  return result;
2727 }
2728 
2729 /*
2730  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
2731  * The spinlock is released before return.
2732  *
2733  * As this function is called with the spinlock held, the caller has to
2734  * previously call ReservePrivateRefCountEntry() and
2735  * ResourceOwnerEnlarge(CurrentResourceOwner);
2736  *
2737  * Currently, no callers of this function want to modify the buffer's
2738  * usage_count at all, so there's no need for a strategy parameter.
2739  * Also we don't bother with a BM_VALID test (the caller could check that for
2740  * itself).
2741  *
2742  * Also all callers only ever use this function when it's known that the
2743  * buffer can't have a preexisting pin by this backend. That allows us to skip
2744  * searching the private refcount array & hash, which is a boon, because the
2745  * spinlock is still held.
2746  *
2747  * Note: use of this routine is frequently mandatory, not just an optimization
2748  * to save a spin lock/unlock cycle, because we need to pin a buffer before
2749  * its state can change under us.
2750  */
2751 static void
2753 {
2754  Buffer b;
2755  PrivateRefCountEntry *ref;
2756  uint32 buf_state;
2757 
2758  /*
2759  * As explained, We don't expect any preexisting pins. That allows us to
2760  * manipulate the PrivateRefCount after releasing the spinlock
2761  */
2763 
2764  /*
2765  * Buffer can't have a preexisting pin, so mark its page as defined to
2766  * Valgrind (this is similar to the PinBuffer() case where the backend
2767  * doesn't already have a buffer pin)
2768  */
2770 
2771  /*
2772  * Since we hold the buffer spinlock, we can update the buffer state and
2773  * release the lock in one operation.
2774  */
2775  buf_state = pg_atomic_read_u32(&buf->state);
2776  Assert(buf_state & BM_LOCKED);
2777  buf_state += BUF_REFCOUNT_ONE;
2778  UnlockBufHdr(buf, buf_state);
2779 
2781 
2782  ref = NewPrivateRefCountEntry(b);
2783  ref->refcount++;
2784 
2786 }
2787 
2788 /*
2789  * UnpinBuffer -- make buffer available for replacement.
2790  *
2791  * This should be applied only to shared buffers, never local ones. This
2792  * always adjusts CurrentResourceOwner.
2793  */
2794 static void
2796 {
2798 
2801 }
2802 
2803 static void
2805 {
2806  PrivateRefCountEntry *ref;
2808 
2809  Assert(!BufferIsLocal(b));
2810 
2811  /* not moving as we're likely deleting it soon anyway */
2812  ref = GetPrivateRefCountEntry(b, false);
2813  Assert(ref != NULL);
2814  Assert(ref->refcount > 0);
2815  ref->refcount--;
2816  if (ref->refcount == 0)
2817  {
2818  uint32 buf_state;
2819  uint32 old_buf_state;
2820 
2821  /*
2822  * Mark buffer non-accessible to Valgrind.
2823  *
2824  * Note that the buffer may have already been marked non-accessible
2825  * within access method code that enforces that buffers are only
2826  * accessed while a buffer lock is held.
2827  */
2829 
2830  /* I'd better not still hold the buffer content lock */
2832 
2833  /*
2834  * Decrement the shared reference count.
2835  *
2836  * Since buffer spinlock holder can update status using just write,
2837  * it's not safe to use atomic decrement here; thus use a CAS loop.
2838  */
2839  old_buf_state = pg_atomic_read_u32(&buf->state);
2840  for (;;)
2841  {
2842  if (old_buf_state & BM_LOCKED)
2843  old_buf_state = WaitBufHdrUnlocked(buf);
2844 
2845  buf_state = old_buf_state;
2846 
2847  buf_state -= BUF_REFCOUNT_ONE;
2848 
2849  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
2850  buf_state))
2851  break;
2852  }
2853 
2854  /* Support LockBufferForCleanup() */
2855  if (buf_state & BM_PIN_COUNT_WAITER)
2856  {
2857  /*
2858  * Acquire the buffer header lock, re-check that there's a waiter.
2859  * Another backend could have unpinned this buffer, and already
2860  * woken up the waiter. There's no danger of the buffer being
2861  * replaced after we unpinned it above, as it's pinned by the
2862  * waiter.
2863  */
2864  buf_state = LockBufHdr(buf);
2865 
2866  if ((buf_state & BM_PIN_COUNT_WAITER) &&
2867  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
2868  {
2869  /* we just released the last pin other than the waiter's */
2870  int wait_backend_pgprocno = buf->wait_backend_pgprocno;
2871 
2872  buf_state &= ~BM_PIN_COUNT_WAITER;
2873  UnlockBufHdr(buf, buf_state);
2874  ProcSendSignal(wait_backend_pgprocno);
2875  }
2876  else
2877  UnlockBufHdr(buf, buf_state);
2878  }
2880  }
2881 }
2882 
2883 #define ST_SORT sort_checkpoint_bufferids
2884 #define ST_ELEMENT_TYPE CkptSortItem
2885 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
2886 #define ST_SCOPE static
2887 #define ST_DEFINE
2888 #include <lib/sort_template.h>
2889 
2890 /*
2891  * BufferSync -- Write out all dirty buffers in the pool.
2892  *
2893  * This is called at checkpoint time to write out all dirty shared buffers.
2894  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
2895  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
2896  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
2897  * unlogged buffers, which are otherwise skipped. The remaining flags
2898  * currently have no effect here.
2899  */
2900 static void
2901 BufferSync(int flags)
2902 {
2903  uint32 buf_state;
2904  int buf_id;
2905  int num_to_scan;
2906  int num_spaces;
2907  int num_processed;
2908  int num_written;
2909  CkptTsStatus *per_ts_stat = NULL;
2910  Oid last_tsid;
2911  binaryheap *ts_heap;
2912  int i;
2913  int mask = BM_DIRTY;
2914  WritebackContext wb_context;
2915 
2916  /*
2917  * Unless this is a shutdown checkpoint or we have been explicitly told,
2918  * we write only permanent, dirty buffers. But at shutdown or end of
2919  * recovery, we write all dirty buffers.
2920  */
2923  mask |= BM_PERMANENT;
2924 
2925  /*
2926  * Loop over all buffers, and mark the ones that need to be written with
2927  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
2928  * can estimate how much work needs to be done.
2929  *
2930  * This allows us to write only those pages that were dirty when the
2931  * checkpoint began, and not those that get dirtied while it proceeds.
2932  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
2933  * later in this function, or by normal backends or the bgwriter cleaning
2934  * scan, the flag is cleared. Any buffer dirtied after this point won't
2935  * have the flag set.
2936  *
2937  * Note that if we fail to write some buffer, we may leave buffers with
2938  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
2939  * certainly need to be written for the next checkpoint attempt, too.
2940  */
2941  num_to_scan = 0;
2942  for (buf_id = 0; buf_id < NBuffers; buf_id++)
2943  {
2944  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2945 
2946  /*
2947  * Header spinlock is enough to examine BM_DIRTY, see comment in
2948  * SyncOneBuffer.
2949  */
2950  buf_state = LockBufHdr(bufHdr);
2951 
2952  if ((buf_state & mask) == mask)
2953  {
2954  CkptSortItem *item;
2955 
2956  buf_state |= BM_CHECKPOINT_NEEDED;
2957 
2958  item = &CkptBufferIds[num_to_scan++];
2959  item->buf_id = buf_id;
2960  item->tsId = bufHdr->tag.spcOid;
2961  item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
2962  item->forkNum = BufTagGetForkNum(&bufHdr->tag);
2963  item->blockNum = bufHdr->tag.blockNum;
2964  }
2965 
2966  UnlockBufHdr(bufHdr, buf_state);
2967 
2968  /* Check for barrier events in case NBuffers is large. */
2971  }
2972 
2973  if (num_to_scan == 0)
2974  return; /* nothing to do */
2975 
2977 
2978  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2979 
2980  /*
2981  * Sort buffers that need to be written to reduce the likelihood of random
2982  * IO. The sorting is also important for the implementation of balancing
2983  * writes between tablespaces. Without balancing writes we'd potentially
2984  * end up writing to the tablespaces one-by-one; possibly overloading the
2985  * underlying system.
2986  */
2987  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2988 
2989  num_spaces = 0;
2990 
2991  /*
2992  * Allocate progress status for each tablespace with buffers that need to
2993  * be flushed. This requires the to-be-flushed array to be sorted.
2994  */
2995  last_tsid = InvalidOid;
2996  for (i = 0; i < num_to_scan; i++)
2997  {
2998  CkptTsStatus *s;
2999  Oid cur_tsid;
3000 
3001  cur_tsid = CkptBufferIds[i].tsId;
3002 
3003  /*
3004  * Grow array of per-tablespace status structs, every time a new
3005  * tablespace is found.
3006  */
3007  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
3008  {
3009  Size sz;
3010 
3011  num_spaces++;
3012 
3013  /*
3014  * Not worth adding grow-by-power-of-2 logic here - even with a
3015  * few hundred tablespaces this should be fine.
3016  */
3017  sz = sizeof(CkptTsStatus) * num_spaces;
3018 
3019  if (per_ts_stat == NULL)
3020  per_ts_stat = (CkptTsStatus *) palloc(sz);
3021  else
3022  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
3023 
3024  s = &per_ts_stat[num_spaces - 1];
3025  memset(s, 0, sizeof(*s));
3026  s->tsId = cur_tsid;
3027 
3028  /*
3029  * The first buffer in this tablespace. As CkptBufferIds is sorted
3030  * by tablespace all (s->num_to_scan) buffers in this tablespace
3031  * will follow afterwards.
3032  */
3033  s->index = i;
3034 
3035  /*
3036  * progress_slice will be determined once we know how many buffers
3037  * are in each tablespace, i.e. after this loop.
3038  */
3039 
3040  last_tsid = cur_tsid;
3041  }
3042  else
3043  {
3044  s = &per_ts_stat[num_spaces - 1];
3045  }
3046 
3047  s->num_to_scan++;
3048 
3049  /* Check for barrier events. */
3052  }
3053 
3054  Assert(num_spaces > 0);
3055 
3056  /*
3057  * Build a min-heap over the write-progress in the individual tablespaces,
3058  * and compute how large a portion of the total progress a single
3059  * processed buffer is.
3060  */
3061  ts_heap = binaryheap_allocate(num_spaces,
3063  NULL);
3064 
3065  for (i = 0; i < num_spaces; i++)
3066  {
3067  CkptTsStatus *ts_stat = &per_ts_stat[i];
3068 
3069  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
3070 
3071  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
3072  }
3073 
3074  binaryheap_build(ts_heap);
3075 
3076  /*
3077  * Iterate through to-be-checkpointed buffers and write the ones (still)
3078  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
3079  * tablespaces; otherwise the sorting would lead to only one tablespace
3080  * receiving writes at a time, making inefficient use of the hardware.
3081  */
3082  num_processed = 0;
3083  num_written = 0;
3084  while (!binaryheap_empty(ts_heap))
3085  {
3086  BufferDesc *bufHdr = NULL;
3087  CkptTsStatus *ts_stat = (CkptTsStatus *)
3089 
3090  buf_id = CkptBufferIds[ts_stat->index].buf_id;
3091  Assert(buf_id != -1);
3092 
3093  bufHdr = GetBufferDescriptor(buf_id);
3094 
3095  num_processed++;
3096 
3097  /*
3098  * We don't need to acquire the lock here, because we're only looking
3099  * at a single bit. It's possible that someone else writes the buffer
3100  * and clears the flag right after we check, but that doesn't matter
3101  * since SyncOneBuffer will then do nothing. However, there is a
3102  * further race condition: it's conceivable that between the time we
3103  * examine the bit here and the time SyncOneBuffer acquires the lock,
3104  * someone else not only wrote the buffer but replaced it with another
3105  * page and dirtied it. In that improbable case, SyncOneBuffer will
3106  * write the buffer though we didn't need to. It doesn't seem worth
3107  * guarding against this, though.
3108  */
3110  {
3111  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
3112  {
3113  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
3115  num_written++;
3116  }
3117  }
3118 
3119  /*
3120  * Measure progress independent of actually having to flush the buffer
3121  * - otherwise writing become unbalanced.
3122  */
3123  ts_stat->progress += ts_stat->progress_slice;
3124  ts_stat->num_scanned++;
3125  ts_stat->index++;
3126 
3127  /* Have all the buffers from the tablespace been processed? */
3128  if (ts_stat->num_scanned == ts_stat->num_to_scan)
3129  {
3130  binaryheap_remove_first(ts_heap);
3131  }
3132  else
3133  {
3134  /* update heap with the new progress */
3135  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
3136  }
3137 
3138  /*
3139  * Sleep to throttle our I/O rate.
3140  *
3141  * (This will check for barrier events even if it doesn't sleep.)
3142  */
3143  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
3144  }
3145 
3146  /*
3147  * Issue all pending flushes. Only checkpointer calls BufferSync(), so
3148  * IOContext will always be IOCONTEXT_NORMAL.
3149  */
3151 
3152  pfree(per_ts_stat);
3153  per_ts_stat = NULL;
3154  binaryheap_free(ts_heap);
3155 
3156  /*
3157  * Update checkpoint statistics. As noted above, this doesn't include
3158  * buffers written by other backends or bgwriter scan.
3159  */
3160  CheckpointStats.ckpt_bufs_written += num_written;
3161 
3162  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
3163 }
3164 
3165 /*
3166  * BgBufferSync -- Write out some dirty buffers in the pool.
3167  *
3168  * This is called periodically by the background writer process.
3169  *
3170  * Returns true if it's appropriate for the bgwriter process to go into
3171  * low-power hibernation mode. (This happens if the strategy clock sweep
3172  * has been "lapped" and no buffer allocations have occurred recently,
3173  * or if the bgwriter has been effectively disabled by setting
3174  * bgwriter_lru_maxpages to 0.)
3175  */
3176 bool
3178 {
3179  /* info obtained from freelist.c */
3180  int strategy_buf_id;
3181  uint32 strategy_passes;
3182  uint32 recent_alloc;
3183 
3184  /*
3185  * Information saved between calls so we can determine the strategy
3186  * point's advance rate and avoid scanning already-cleaned buffers.
3187  */
3188  static bool saved_info_valid = false;
3189  static int prev_strategy_buf_id;
3190  static uint32 prev_strategy_passes;
3191  static int next_to_clean;
3192  static uint32 next_passes;
3193 
3194  /* Moving averages of allocation rate and clean-buffer density */
3195  static float smoothed_alloc = 0;
3196  static float smoothed_density = 10.0;
3197 
3198  /* Potentially these could be tunables, but for now, not */
3199  float smoothing_samples = 16;
3200  float scan_whole_pool_milliseconds = 120000.0;
3201 
3202  /* Used to compute how far we scan ahead */
3203  long strategy_delta;
3204  int bufs_to_lap;
3205  int bufs_ahead;
3206  float scans_per_alloc;
3207  int reusable_buffers_est;
3208  int upcoming_alloc_est;
3209  int min_scan_buffers;
3210 
3211  /* Variables for the scanning loop proper */
3212  int num_to_scan;
3213  int num_written;
3214  int reusable_buffers;
3215 
3216  /* Variables for final smoothed_density update */
3217  long new_strategy_delta;
3218  uint32 new_recent_alloc;
3219 
3220  /*
3221  * Find out where the freelist clock sweep currently is, and how many
3222  * buffer allocations have happened since our last call.
3223  */
3224  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
3225 
3226  /* Report buffer alloc counts to pgstat */
3227  PendingBgWriterStats.buf_alloc += recent_alloc;
3228 
3229  /*
3230  * If we're not running the LRU scan, just stop after doing the stats
3231  * stuff. We mark the saved state invalid so that we can recover sanely
3232  * if LRU scan is turned back on later.
3233  */
3234  if (bgwriter_lru_maxpages <= 0)
3235  {
3236  saved_info_valid = false;
3237  return true;
3238  }
3239 
3240  /*
3241  * Compute strategy_delta = how many buffers have been scanned by the
3242  * clock sweep since last time. If first time through, assume none. Then
3243  * see if we are still ahead of the clock sweep, and if so, how many
3244  * buffers we could scan before we'd catch up with it and "lap" it. Note:
3245  * weird-looking coding of xxx_passes comparisons are to avoid bogus
3246  * behavior when the passes counts wrap around.
3247  */
3248  if (saved_info_valid)
3249  {
3250  int32 passes_delta = strategy_passes - prev_strategy_passes;
3251 
3252  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
3253  strategy_delta += (long) passes_delta * NBuffers;
3254 
3255  Assert(strategy_delta >= 0);
3256 
3257  if ((int32) (next_passes - strategy_passes) > 0)
3258  {
3259  /* we're one pass ahead of the strategy point */
3260  bufs_to_lap = strategy_buf_id - next_to_clean;
3261 #ifdef BGW_DEBUG
3262  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3263  next_passes, next_to_clean,
3264  strategy_passes, strategy_buf_id,
3265  strategy_delta, bufs_to_lap);
3266 #endif
3267  }
3268  else if (next_passes == strategy_passes &&
3269  next_to_clean >= strategy_buf_id)
3270  {
3271  /* on same pass, but ahead or at least not behind */
3272  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
3273 #ifdef BGW_DEBUG
3274  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
3275  next_passes, next_to_clean,
3276  strategy_passes, strategy_buf_id,
3277  strategy_delta, bufs_to_lap);
3278 #endif
3279  }
3280  else
3281  {
3282  /*
3283  * We're behind, so skip forward to the strategy point and start
3284  * cleaning from there.
3285  */
3286 #ifdef BGW_DEBUG
3287  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
3288  next_passes, next_to_clean,
3289  strategy_passes, strategy_buf_id,
3290  strategy_delta);
3291 #endif
3292  next_to_clean = strategy_buf_id;
3293  next_passes = strategy_passes;
3294  bufs_to_lap = NBuffers;
3295  }
3296  }
3297  else
3298  {
3299  /*
3300  * Initializing at startup or after LRU scanning had been off. Always
3301  * start at the strategy point.
3302  */
3303 #ifdef BGW_DEBUG
3304  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
3305  strategy_passes, strategy_buf_id);
3306 #endif
3307  strategy_delta = 0;
3308  next_to_clean = strategy_buf_id;
3309  next_passes = strategy_passes;
3310  bufs_to_lap = NBuffers;
3311  }
3312 
3313  /* Update saved info for next time */
3314  prev_strategy_buf_id = strategy_buf_id;
3315  prev_strategy_passes = strategy_passes;
3316  saved_info_valid = true;
3317 
3318  /*
3319  * Compute how many buffers had to be scanned for each new allocation, ie,
3320  * 1/density of reusable buffers, and track a moving average of that.
3321  *
3322  * If the strategy point didn't move, we don't update the density estimate
3323  */
3324  if (strategy_delta > 0 && recent_alloc > 0)
3325  {
3326  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
3327  smoothed_density += (scans_per_alloc - smoothed_density) /
3328  smoothing_samples;
3329  }
3330 
3331  /*
3332  * Estimate how many reusable buffers there are between the current
3333  * strategy point and where we've scanned ahead to, based on the smoothed
3334  * density estimate.
3335  */
3336  bufs_ahead = NBuffers - bufs_to_lap;
3337  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
3338 
3339  /*
3340  * Track a moving average of recent buffer allocations. Here, rather than
3341  * a true average we want a fast-attack, slow-decline behavior: we
3342  * immediately follow any increase.
3343  */
3344  if (smoothed_alloc <= (float) recent_alloc)
3345  smoothed_alloc = recent_alloc;
3346  else
3347  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
3348  smoothing_samples;
3349 
3350  /* Scale the estimate by a GUC to allow more aggressive tuning. */
3351  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
3352 
3353  /*
3354  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
3355  * eventually underflow to zero, and the underflows produce annoying
3356  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
3357  * zero, there's no point in tracking smaller and smaller values of
3358  * smoothed_alloc, so just reset it to exactly zero to avoid this
3359  * syndrome. It will pop back up as soon as recent_alloc increases.
3360  */
3361  if (upcoming_alloc_est == 0)
3362  smoothed_alloc = 0;
3363 
3364  /*
3365  * Even in cases where there's been little or no buffer allocation
3366  * activity, we want to make a small amount of progress through the buffer
3367  * cache so that as many reusable buffers as possible are clean after an
3368  * idle period.
3369  *
3370  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
3371  * the BGW will be called during the scan_whole_pool time; slice the
3372  * buffer pool into that many sections.
3373  */
3374  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
3375 
3376  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
3377  {
3378 #ifdef BGW_DEBUG
3379  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
3380  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
3381 #endif
3382  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
3383  }
3384 
3385  /*
3386  * Now write out dirty reusable buffers, working forward from the
3387  * next_to_clean point, until we have lapped the strategy scan, or cleaned
3388  * enough buffers to match our estimate of the next cycle's allocation
3389  * requirements, or hit the bgwriter_lru_maxpages limit.
3390  */
3391 
3392  num_to_scan = bufs_to_lap;
3393  num_written = 0;
3394  reusable_buffers = reusable_buffers_est;
3395 
3396  /* Execute the LRU scan */
3397  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
3398  {
3399  int sync_state = SyncOneBuffer(next_to_clean, true,
3400  wb_context);
3401 
3402  if (++next_to_clean >= NBuffers)
3403  {
3404  next_to_clean = 0;
3405  next_passes++;
3406  }
3407  num_to_scan--;
3408 
3409  if (sync_state & BUF_WRITTEN)
3410  {
3411  reusable_buffers++;
3412  if (++num_written >= bgwriter_lru_maxpages)
3413  {
3415  break;
3416  }
3417  }
3418  else if (sync_state & BUF_REUSABLE)
3419  reusable_buffers++;
3420  }
3421 
3422  PendingBgWriterStats.buf_written_clean += num_written;
3423 
3424 #ifdef BGW_DEBUG
3425  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
3426  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
3427  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
3428  bufs_to_lap - num_to_scan,
3429  num_written,
3430  reusable_buffers - reusable_buffers_est);
3431 #endif
3432 
3433  /*
3434  * Consider the above scan as being like a new allocation scan.
3435  * Characterize its density and update the smoothed one based on it. This
3436  * effectively halves the moving average period in cases where both the
3437  * strategy and the background writer are doing some useful scanning,
3438  * which is helpful because a long memory isn't as desirable on the
3439  * density estimates.
3440  */
3441  new_strategy_delta = bufs_to_lap - num_to_scan;
3442  new_recent_alloc = reusable_buffers - reusable_buffers_est;
3443  if (new_strategy_delta > 0 && new_recent_alloc > 0)
3444  {
3445  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
3446  smoothed_density += (scans_per_alloc - smoothed_density) /
3447  smoothing_samples;
3448 
3449 #ifdef BGW_DEBUG
3450  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
3451  new_recent_alloc, new_strategy_delta,
3452  scans_per_alloc, smoothed_density);
3453 #endif
3454  }
3455 
3456  /* Return true if OK to hibernate */
3457  return (bufs_to_lap == 0 && recent_alloc == 0);
3458 }
3459 
3460 /*
3461  * SyncOneBuffer -- process a single buffer during syncing.
3462  *
3463  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
3464  * buffers marked recently used, as these are not replacement candidates.
3465  *
3466  * Returns a bitmask containing the following flag bits:
3467  * BUF_WRITTEN: we wrote the buffer.
3468  * BUF_REUSABLE: buffer is available for replacement, ie, it has
3469  * pin count 0 and usage count 0.
3470  *
3471  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
3472  * after locking it, but we don't care all that much.)
3473  */
3474 static int
3475 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
3476 {
3477  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
3478  int result = 0;
3479  uint32 buf_state;
3480  BufferTag tag;
3481 
3482  /* Make sure we can handle the pin */
3485 
3486  /*
3487  * Check whether buffer needs writing.
3488  *
3489  * We can make this check without taking the buffer content lock so long
3490  * as we mark pages dirty in access methods *before* logging changes with
3491  * XLogInsert(): if someone marks the buffer dirty just after our check we
3492  * don't worry because our checkpoint.redo points before log record for
3493  * upcoming changes and so we are not required to write such dirty buffer.
3494  */
3495  buf_state = LockBufHdr(bufHdr);
3496 
3497  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
3498  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
3499  {
3500  result |= BUF_REUSABLE;
3501  }
3502  else if (skip_recently_used)
3503  {
3504  /* Caller told us not to write recently-used buffers */
3505  UnlockBufHdr(bufHdr, buf_state);
3506  return result;
3507  }
3508 
3509  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
3510  {
3511  /* It's clean, so nothing to do */
3512  UnlockBufHdr(bufHdr, buf_state);
3513  return result;
3514  }
3515 
3516  /*
3517  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
3518  * buffer is clean by the time we've locked it.)
3519  */
3520  PinBuffer_Locked(bufHdr);
3522 
3524 
3526 
3527  tag = bufHdr->tag;
3528 
3529  UnpinBuffer(bufHdr);
3530 
3531  /*
3532  * SyncOneBuffer() is only called by checkpointer and bgwriter, so
3533  * IOContext will always be IOCONTEXT_NORMAL.
3534  */
3536 
3537  return result | BUF_WRITTEN;
3538 }
3539 
3540 /*
3541  * AtEOXact_Buffers - clean up at end of transaction.
3542  *
3543  * As of PostgreSQL 8.0, buffer pins should get released by the
3544  * ResourceOwner mechanism. This routine is just a debugging
3545  * cross-check that no pins remain.
3546  */
3547 void
3548 AtEOXact_Buffers(bool isCommit)
3549 {
3551 
3552  AtEOXact_LocalBuffers(isCommit);
3553 
3555 }
3556 
3557 /*
3558  * Initialize access to shared buffer pool
3559  *
3560  * This is called during backend startup (whether standalone or under the
3561  * postmaster). It sets up for this backend's access to the already-existing
3562  * buffer pool.
3563  */
3564 void
3566 {
3567  HASHCTL hash_ctl;
3568 
3569  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
3570 
3571  hash_ctl.keysize = sizeof(int32);
3572  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
3573 
3574  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
3575  HASH_ELEM | HASH_BLOBS);
3576 
3577  /*
3578  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
3579  * the corresponding phase of backend shutdown.
3580  */
3581  Assert(MyProc != NULL);
3583 }
3584 
3585 /*
3586  * During backend exit, ensure that we released all shared-buffer locks and
3587  * assert that we have no remaining pins.
3588  */
3589 static void
3591 {
3592  UnlockBuffers();
3593 
3595 
3596  /* localbuf.c needs a chance too */
3598 }
3599 
3600 /*
3601  * CheckForBufferLeaks - ensure this backend holds no buffer pins
3602  *
3603  * As of PostgreSQL 8.0, buffer pins should get released by the
3604  * ResourceOwner mechanism. This routine is just a debugging
3605  * cross-check that no pins remain.
3606  */
3607 static void
3609 {
3610 #ifdef USE_ASSERT_CHECKING
3611  int RefCountErrors = 0;
3613  int i;
3614  char *s;
3615 
3616  /* check the array */
3617  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
3618  {
3620 
3621  if (res->buffer != InvalidBuffer)
3622  {
3623  s = DebugPrintBufferRefcount(res->buffer);
3624  elog(WARNING, "buffer refcount leak: %s", s);
3625  pfree(s);
3626 
3627  RefCountErrors++;
3628  }
3629  }
3630 
3631  /* if necessary search the hash */
3633  {
3634  HASH_SEQ_STATUS hstat;
3635 
3637  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
3638  {
3639  s = DebugPrintBufferRefcount(res->buffer);
3640  elog(WARNING, "buffer refcount leak: %s", s);
3641  pfree(s);
3642  RefCountErrors++;
3643  }
3644  }
3645 
3646  Assert(RefCountErrors == 0);
3647 #endif
3648 }
3649 
3650 /*
3651  * Helper routine to issue warnings when a buffer is unexpectedly pinned
3652  */
3653 char *
3655 {
3656  BufferDesc *buf;
3657  int32 loccount;
3658  char *path;
3659  char *result;
3660  ProcNumber backend;
3661  uint32 buf_state;
3662 
3664  if (BufferIsLocal(buffer))
3665  {
3667  loccount = LocalRefCount[-buffer - 1];
3668  backend = MyProcNumber;
3669  }
3670  else
3671  {
3673  loccount = GetPrivateRefCount(buffer);
3674  backend = INVALID_PROC_NUMBER;
3675  }
3676 
3677  /* theoretically we should lock the bufhdr here */
3678  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
3679  BufTagGetForkNum(&buf->tag));
3680  buf_state = pg_atomic_read_u32(&buf->state);
3681 
3682  result = psprintf("[%03d] (rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
3683  buffer, path,
3684  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
3685  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
3686  pfree(path);
3687  return result;
3688 }
3689 
3690 /*
3691  * CheckPointBuffers
3692  *
3693  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
3694  *
3695  * Note: temporary relations do not participate in checkpoints, so they don't
3696  * need to be flushed.
3697  */
3698 void
3700 {
3701  BufferSync(flags);
3702 }
3703 
3704 /*
3705  * BufferGetBlockNumber
3706  * Returns the block number associated with a buffer.
3707  *
3708  * Note:
3709  * Assumes that the buffer is valid and pinned, else the
3710  * value may be obsolete immediately...
3711  */
3714 {
3715  BufferDesc *bufHdr;
3716 
3718 
3719  if (BufferIsLocal(buffer))
3720  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3721  else
3722  bufHdr = GetBufferDescriptor(buffer - 1);
3723 
3724  /* pinned, so OK to read tag without spinlock */
3725  return bufHdr->tag.blockNum;
3726 }
3727 
3728 /*
3729  * BufferGetTag
3730  * Returns the relfilelocator, fork number and block number associated with
3731  * a buffer.
3732  */
3733 void
3735  BlockNumber *blknum)
3736 {
3737  BufferDesc *bufHdr;
3738 
3739  /* Do the same checks as BufferGetBlockNumber. */
3741 
3742  if (BufferIsLocal(buffer))
3743  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
3744  else
3745  bufHdr = GetBufferDescriptor(buffer - 1);
3746 
3747  /* pinned, so OK to read tag without spinlock */
3748  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3749  *forknum = BufTagGetForkNum(&bufHdr->tag);
3750  *blknum = bufHdr->tag.blockNum;
3751 }
3752 
3753 /*
3754  * FlushBuffer
3755  * Physically write out a shared buffer.
3756  *
3757  * NOTE: this actually just passes the buffer contents to the kernel; the
3758  * real write to disk won't happen until the kernel feels like it. This
3759  * is okay from our point of view since we can redo the changes from WAL.
3760  * However, we will need to force the changes to disk via fsync before
3761  * we can checkpoint WAL.
3762  *
3763  * The caller must hold a pin on the buffer and have share-locked the
3764  * buffer contents. (Note: a share-lock does not prevent updates of
3765  * hint bits in the buffer, so the page could change while the write
3766  * is in progress, but we assume that that will not invalidate the data
3767  * written.)
3768  *
3769  * If the caller has an smgr reference for the buffer's relation, pass it
3770  * as the second parameter. If not, pass NULL.
3771  */
3772 static void
3774  IOContext io_context)
3775 {
3776  XLogRecPtr recptr;
3777  ErrorContextCallback errcallback;
3778  instr_time io_start;
3779  Block bufBlock;
3780  char *bufToWrite;
3781  uint32 buf_state;
3782 
3783  /*
3784  * Try to start an I/O operation. If StartBufferIO returns false, then
3785  * someone else flushed the buffer before we could, so we need not do
3786  * anything.
3787  */
3788  if (!StartBufferIO(buf, false, false))
3789  return;
3790 
3791  /* Setup error traceback support for ereport() */
3793  errcallback.arg = (void *) buf;
3794  errcallback.previous = error_context_stack;
3795  error_context_stack = &errcallback;
3796 
3797  /* Find smgr relation for buffer */
3798  if (reln == NULL)
3800 
3801  TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
3802  buf->tag.blockNum,
3804  reln->smgr_rlocator.locator.dbOid,
3806 
3807  buf_state = LockBufHdr(buf);
3808 
3809  /*
3810  * Run PageGetLSN while holding header lock, since we don't have the
3811  * buffer locked exclusively in all cases.
3812  */
3813  recptr = BufferGetLSN(buf);
3814 
3815  /* To check if block content changes while flushing. - vadim 01/17/97 */
3816  buf_state &= ~BM_JUST_DIRTIED;
3817  UnlockBufHdr(buf, buf_state);
3818 
3819  /*
3820  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
3821  * rule that log updates must hit disk before any of the data-file changes
3822  * they describe do.
3823  *
3824  * However, this rule does not apply to unlogged relations, which will be
3825  * lost after a crash anyway. Most unlogged relation pages do not bear
3826  * LSNs since we never emit WAL records for them, and therefore flushing
3827  * up through the buffer LSN would be useless, but harmless. However,
3828  * GiST indexes use LSNs internally to track page-splits, and therefore
3829  * unlogged GiST pages bear "fake" LSNs generated by
3830  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
3831  * LSN counter could advance past the WAL insertion point; and if it did
3832  * happen, attempting to flush WAL through that location would fail, with
3833  * disastrous system-wide consequences. To make sure that can't happen,
3834  * skip the flush if the buffer isn't permanent.
3835  */
3836  if (buf_state & BM_PERMANENT)
3837  XLogFlush(recptr);
3838 
3839  /*
3840  * Now it's safe to write buffer to disk. Note that no one else should
3841  * have been able to write it while we were busy with log flushing because
3842  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
3843  */
3844  bufBlock = BufHdrGetBlock(buf);
3845 
3846  /*
3847  * Update page checksum if desired. Since we have only shared lock on the
3848  * buffer, other processes might be updating hint bits in it, so we must
3849  * copy the page to private storage if we do checksumming.
3850  */
3851  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
3852 
3854 
3855  /*
3856  * bufToWrite is either the shared buffer or a copy, as appropriate.
3857  */
3858  smgrwrite(reln,
3859  BufTagGetForkNum(&buf->tag),
3860  buf->tag.blockNum,
3861  bufToWrite,
3862  false);
3863 
3864  /*
3865  * When a strategy is in use, only flushes of dirty buffers already in the
3866  * strategy ring are counted as strategy writes (IOCONTEXT
3867  * [BULKREAD|BULKWRITE|VACUUM] IOOP_WRITE) for the purpose of IO
3868  * statistics tracking.
3869  *
3870  * If a shared buffer initially added to the ring must be flushed before
3871  * being used, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE.
3872  *
3873  * If a shared buffer which was added to the ring later because the
3874  * current strategy buffer is pinned or in use or because all strategy
3875  * buffers were dirty and rejected (for BAS_BULKREAD operations only)
3876  * requires flushing, this is counted as an IOCONTEXT_NORMAL IOOP_WRITE
3877  * (from_ring will be false).
3878  *
3879  * When a strategy is not in use, the write can only be a "regular" write
3880  * of a dirty shared buffer (IOCONTEXT_NORMAL IOOP_WRITE).
3881  */
3883  IOOP_WRITE, io_start, 1);
3884 
3886 
3887  /*
3888  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
3889  * end the BM_IO_IN_PROGRESS state.
3890  */
3891  TerminateBufferIO(buf, true, 0, true);
3892 
3893  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
3894  buf->tag.blockNum,
3896  reln->smgr_rlocator.locator.dbOid,
3898 
3899  /* Pop the error context stack */
3900  error_context_stack = errcallback.previous;
3901 }
3902 
3903 /*
3904  * RelationGetNumberOfBlocksInFork
3905  * Determines the current number of pages in the specified relation fork.
3906  *
3907  * Note that the accuracy of the result will depend on the details of the
3908  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
3909  * it might not be.
3910  */
3913 {
3914  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
3915  {
3916  /*
3917  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
3918  * tableam returns the size in bytes - but for the purpose of this
3919  * routine, we want the number of blocks. Therefore divide, rounding
3920  * up.
3921  */
3922  uint64 szbytes;
3923 
3924  szbytes = table_relation_size(relation, forkNum);
3925 
3926  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
3927  }
3928  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
3929  {
3930  return smgrnblocks(RelationGetSmgr(relation), forkNum);
3931  }
3932  else
3933  Assert(false);
3934 
3935  return 0; /* keep compiler quiet */
3936 }
3937 
3938 /*
3939  * BufferIsPermanent
3940  * Determines whether a buffer will potentially still be around after
3941  * a crash. Caller must hold a buffer pin.
3942  */
3943 bool
3945 {
3946  BufferDesc *bufHdr;
3947 
3948  /* Local buffers are used only for temp relations. */
3949  if (BufferIsLocal(buffer))
3950  return false;
3951 
3952  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3955 
3956  /*
3957  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
3958  * need not bother with the buffer header spinlock. Even if someone else
3959  * changes the buffer header state while we're doing this, the state is
3960  * changed atomically, so we'll read the old value or the new value, but
3961  * not random garbage.
3962  */
3963  bufHdr = GetBufferDescriptor(buffer - 1);
3964  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3965 }
3966 
3967 /*
3968  * BufferGetLSNAtomic
3969  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3970  * This is necessary for some callers who may not have an exclusive lock
3971  * on the buffer.
3972  */
3973 XLogRecPtr
3975 {
3976  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3977  char *page = BufferGetPage(buffer);
3978  XLogRecPtr lsn;
3979  uint32 buf_state;
3980 
3981  /*
3982  * If we don't need locking for correctness, fastpath out.
3983  */
3985  return PageGetLSN(page);
3986 
3987  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3990 
3991  buf_state = LockBufHdr(bufHdr);
3992  lsn = PageGetLSN(page);
3993  UnlockBufHdr(bufHdr, buf_state);
3994 
3995  return lsn;
3996 }
3997 
3998 /* ---------------------------------------------------------------------
3999  * DropRelationBuffers
4000  *
4001  * This function removes from the buffer pool all the pages of the
4002  * specified relation forks that have block numbers >= firstDelBlock.
4003  * (In particular, with firstDelBlock = 0, all pages are removed.)
4004  * Dirty pages are simply dropped, without bothering to write them
4005  * out first. Therefore, this is NOT rollback-able, and so should be
4006  * used only with extreme caution!
4007  *
4008  * Currently, this is called only from smgr.c when the underlying file
4009  * is about to be deleted or truncated (firstDelBlock is needed for
4010  * the truncation case). The data in the affected pages would therefore
4011  * be deleted momentarily anyway, and there is no point in writing it.
4012  * It is the responsibility of higher-level code to ensure that the
4013  * deletion or truncation does not lose any data that could be needed
4014  * later. It is also the responsibility of higher-level code to ensure
4015  * that no other process could be trying to load more pages of the
4016  * relation into buffers.
4017  * --------------------------------------------------------------------
4018  */
4019 void
4021  int nforks, BlockNumber *firstDelBlock)
4022 {
4023  int i;
4024  int j;
4025  RelFileLocatorBackend rlocator;
4026  BlockNumber nForkBlock[MAX_FORKNUM];
4027  uint64 nBlocksToInvalidate = 0;
4028 
4029  rlocator = smgr_reln->smgr_rlocator;
4030 
4031  /* If it's a local relation, it's localbuf.c's problem. */
4032  if (RelFileLocatorBackendIsTemp(rlocator))
4033  {
4034  if (rlocator.backend == MyProcNumber)
4035  {
4036  for (j = 0; j < nforks; j++)
4037  DropRelationLocalBuffers(rlocator.locator, forkNum[j],
4038  firstDelBlock[j]);
4039  }
4040  return;
4041  }
4042 
4043  /*
4044  * To remove all the pages of the specified relation forks from the buffer
4045  * pool, we need to scan the entire buffer pool but we can optimize it by
4046  * finding the buffers from BufMapping table provided we know the exact
4047  * size of each fork of the relation. The exact size is required to ensure
4048  * that we don't leave any buffer for the relation being dropped as
4049  * otherwise the background writer or checkpointer can lead to a PANIC
4050  * error while flushing buffers corresponding to files that don't exist.
4051  *
4052  * To know the exact size, we rely on the size cached for each fork by us
4053  * during recovery which limits the optimization to recovery and on
4054  * standbys but we can easily extend it once we have shared cache for
4055  * relation size.
4056  *
4057  * In recovery, we cache the value returned by the first lseek(SEEK_END)
4058  * and the future writes keeps the cached value up-to-date. See
4059  * smgrextend. It is possible that the value of the first lseek is smaller
4060  * than the actual number of existing blocks in the file due to buggy
4061  * Linux kernels that might not have accounted for the recent write. But
4062  * that should be fine because there must not be any buffers after that
4063  * file size.
4064  */
4065  for (i = 0; i < nforks; i++)
4066  {
4067  /* Get the number of blocks for a relation's fork */
4068  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
4069 
4070  if (nForkBlock[i] == InvalidBlockNumber)
4071  {
4072  nBlocksToInvalidate = InvalidBlockNumber;
4073  break;
4074  }
4075 
4076  /* calculate the number of blocks to be invalidated */
4077  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
4078  }
4079 
4080  /*
4081  * We apply the optimization iff the total number of blocks to invalidate
4082  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4083  */
4084  if (BlockNumberIsValid(nBlocksToInvalidate) &&
4085  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4086  {
4087  for (j = 0; j < nforks; j++)
4088  FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
4089  nForkBlock[j], firstDelBlock[j]);
4090  return;
4091  }
4092 
4093  for (i = 0; i < NBuffers; i++)
4094  {
4095  BufferDesc *bufHdr = GetBufferDescriptor(i);
4096  uint32 buf_state;
4097 
4098  /*
4099  * We can make this a tad faster by prechecking the buffer tag before
4100  * we attempt to lock the buffer; this saves a lot of lock
4101  * acquisitions in typical cases. It should be safe because the
4102  * caller must have AccessExclusiveLock on the relation, or some other
4103  * reason to be certain that no one is loading new pages of the rel
4104  * into the buffer pool. (Otherwise we might well miss such pages
4105  * entirely.) Therefore, while the tag might be changing while we
4106  * look at it, it can't be changing *to* a value we care about, only
4107  * *away* from such a value. So false negatives are impossible, and
4108  * false positives are safe because we'll recheck after getting the
4109  * buffer lock.
4110  *
4111  * We could check forkNum and blockNum as well as the rlocator, but
4112  * the incremental win from doing so seems small.
4113  */
4114  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
4115  continue;
4116 
4117  buf_state = LockBufHdr(bufHdr);
4118 
4119  for (j = 0; j < nforks; j++)
4120  {
4121  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
4122  BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
4123  bufHdr->tag.blockNum >= firstDelBlock[j])
4124  {
4125  InvalidateBuffer(bufHdr); /* releases spinlock */
4126  break;
4127  }
4128  }
4129  if (j >= nforks)
4130  UnlockBufHdr(bufHdr, buf_state);
4131  }
4132 }
4133 
4134 /* ---------------------------------------------------------------------
4135  * DropRelationsAllBuffers
4136  *
4137  * This function removes from the buffer pool all the pages of all
4138  * forks of the specified relations. It's equivalent to calling
4139  * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
4140  * --------------------------------------------------------------------
4141  */
4142 void
4143 DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
4144 {
4145  int i;
4146  int n = 0;
4147  SMgrRelation *rels;
4148  BlockNumber (*block)[MAX_FORKNUM + 1];
4149  uint64 nBlocksToInvalidate = 0;
4150  RelFileLocator *locators;
4151  bool cached = true;
4152  bool use_bsearch;
4153 
4154  if (nlocators == 0)
4155  return;
4156 
4157  rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
4158 
4159  /* If it's a local relation, it's localbuf.c's problem. */
4160  for (i = 0; i < nlocators; i++)
4161  {
4162  if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
4163  {
4164  if (smgr_reln[i]->smgr_rlocator.backend == MyProcNumber)
4165  DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
4166  }
4167  else
4168  rels[n++] = smgr_reln[i];
4169  }
4170 
4171  /*
4172  * If there are no non-local relations, then we're done. Release the
4173  * memory and return.
4174  */
4175  if (n == 0)
4176  {
4177  pfree(rels);
4178  return;
4179  }
4180 
4181  /*
4182  * This is used to remember the number of blocks for all the relations
4183  * forks.
4184  */
4185  block = (BlockNumber (*)[MAX_FORKNUM + 1])
4186  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
4187 
4188  /*
4189  * We can avoid scanning the entire buffer pool if we know the exact size
4190  * of each of the given relation forks. See DropRelationBuffers.
4191  */
4192  for (i = 0; i < n && cached; i++)
4193  {
4194  for (int j = 0; j <= MAX_FORKNUM; j++)
4195  {
4196  /* Get the number of blocks for a relation's fork. */
4197  block[i][j] = smgrnblocks_cached(rels[i], j);
4198 
4199  /* We need to only consider the relation forks that exists. */
4200  if (block[i][j] == InvalidBlockNumber)
4201  {
4202  if (!smgrexists(rels[i], j))
4203  continue;
4204  cached = false;
4205  break;
4206  }
4207 
4208  /* calculate the total number of blocks to be invalidated */
4209  nBlocksToInvalidate += block[i][j];
4210  }
4211  }
4212 
4213  /*
4214  * We apply the optimization iff the total number of blocks to invalidate
4215  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
4216  */
4217  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
4218  {
4219  for (i = 0; i < n; i++)
4220  {
4221  for (int j = 0; j <= MAX_FORKNUM; j++)
4222  {
4223  /* ignore relation forks that doesn't exist */
4224  if (!BlockNumberIsValid(block[i][j]))
4225  continue;
4226 
4227  /* drop all the buffers for a particular relation fork */
4228  FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
4229  j, block[i][j], 0);
4230  }
4231  }
4232 
4233  pfree(block);
4234  pfree(rels);
4235  return;
4236  }
4237 
4238  pfree(block);
4239  locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
4240  for (i = 0; i < n; i++)
4241  locators[i] = rels[i]->smgr_rlocator.locator;
4242 
4243  /*
4244  * For low number of relations to drop just use a simple walk through, to
4245  * save the bsearch overhead. The threshold to use is rather a guess than
4246  * an exactly determined value, as it depends on many factors (CPU and RAM
4247  * speeds, amount of shared buffers etc.).
4248  */
4249  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
4250 
4251  /* sort the list of rlocators if necessary */
4252  if (use_bsearch)
4253  qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
4254 
4255  for (i = 0; i < NBuffers; i++)
4256  {
4257  RelFileLocator *rlocator = NULL;
4258  BufferDesc *bufHdr = GetBufferDescriptor(i);
4259  uint32 buf_state;
4260 
4261  /*
4262  * As in DropRelationBuffers, an unlocked precheck should be safe and
4263  * saves some cycles.
4264  */
4265 
4266  if (!use_bsearch)
4267  {
4268  int j;
4269 
4270  for (j = 0; j < n; j++)
4271  {
4272  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
4273  {
4274  rlocator = &locators[j];
4275  break;
4276  }
4277  }
4278  }
4279  else
4280  {
4281  RelFileLocator locator;
4282 
4283  locator = BufTagGetRelFileLocator(&bufHdr->tag);
4284  rlocator = bsearch((const void *) &(locator),
4285  locators, n, sizeof(RelFileLocator),
4287  }
4288 
4289  /* buffer doesn't belong to any of the given relfilelocators; skip it */
4290  if (rlocator == NULL)
4291  continue;
4292 
4293  buf_state = LockBufHdr(bufHdr);
4294  if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
4295  InvalidateBuffer(bufHdr); /* releases spinlock */
4296  else
4297  UnlockBufHdr(bufHdr, buf_state);
4298  }
4299 
4300  pfree(locators);
4301  pfree(rels);
4302 }
4303 
4304 /* ---------------------------------------------------------------------
4305  * FindAndDropRelationBuffers
4306  *
4307  * This function performs look up in BufMapping table and removes from the
4308  * buffer pool all the pages of the specified relation fork that has block
4309  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
4310  * pages are removed.)
4311  * --------------------------------------------------------------------
4312  */
4313 static void
4315  BlockNumber nForkBlock,
4316  BlockNumber firstDelBlock)
4317 {
4318  BlockNumber curBlock;
4319 
4320  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
4321  {
4322  uint32 bufHash; /* hash value for tag */
4323  BufferTag bufTag; /* identity of requested block */
4324  LWLock *bufPartitionLock; /* buffer partition lock for it */
4325  int buf_id;
4326  BufferDesc *bufHdr;
4327  uint32 buf_state;
4328 
4329  /* create a tag so we can lookup the buffer */
4330  InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
4331 
4332  /* determine its hash code and partition lock ID */
4333  bufHash = BufTableHashCode(&bufTag);
4334  bufPartitionLock = BufMappingPartitionLock(bufHash);
4335 
4336  /* Check that it is in the buffer pool. If not, do nothing. */
4337  LWLockAcquire(bufPartitionLock, LW_SHARED);
4338  buf_id = BufTableLookup(&bufTag, bufHash);
4339  LWLockRelease(bufPartitionLock);
4340 
4341  if (buf_id < 0)
4342  continue;
4343 
4344  bufHdr = GetBufferDescriptor(buf_id);
4345 
4346  /*
4347  * We need to lock the buffer header and recheck if the buffer is
4348  * still associated with the same block because the buffer could be
4349  * evicted by some other backend loading blocks for a different
4350  * relation after we release lock on the BufMapping table.
4351  */
4352  buf_state = LockBufHdr(bufHdr);
4353 
4354  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
4355  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
4356  bufHdr->tag.blockNum >= firstDelBlock)
4357  InvalidateBuffer(bufHdr); /* releases spinlock */
4358  else
4359  UnlockBufHdr(bufHdr, buf_state);
4360  }
4361 }
4362 
4363 /* ---------------------------------------------------------------------
4364  * DropDatabaseBuffers
4365  *
4366  * This function removes all the buffers in the buffer cache for a
4367  * particular database. Dirty pages are simply dropped, without
4368  * bothering to write them out first. This is used when we destroy a
4369  * database, to avoid trying to flush data to disk when the directory
4370  * tree no longer exists. Implementation is pretty similar to
4371  * DropRelationBuffers() which is for destroying just one relation.
4372  * --------------------------------------------------------------------
4373  */
4374 void
4376 {
4377  int i;
4378 
4379  /*
4380  * We needn't consider local buffers, since by assumption the target
4381  * database isn't our own.
4382  */
4383 
4384  for (i = 0; i < NBuffers; i++)
4385  {
4386  BufferDesc *bufHdr = GetBufferDescriptor(i);
4387  uint32 buf_state;
4388 
4389  /*
4390  * As in DropRelationBuffers, an unlocked precheck should be safe and
4391  * saves some cycles.
4392  */
4393  if (bufHdr->tag.dbOid != dbid)
4394  continue;
4395 
4396  buf_state = LockBufHdr(bufHdr);
4397  if (bufHdr->tag.dbOid == dbid)
4398  InvalidateBuffer(bufHdr); /* releases spinlock */
4399  else
4400  UnlockBufHdr(bufHdr, buf_state);
4401  }
4402 }
4403 
4404 /* -----------------------------------------------------------------
4405  * PrintBufferDescs
4406  *
4407  * this function prints all the buffer descriptors, for debugging
4408  * use only.
4409  * -----------------------------------------------------------------
4410  */
4411 #ifdef NOT_USED
4412 void
4413 PrintBufferDescs(void)
4414 {
4415  int i;
4416 
4417  for (i = 0; i < NBuffers; ++i)
4418  {
4421 
4422  /* theoretically we should lock the bufhdr here */
4423  elog(LOG,
4424  "[%02d] (freeNext=%d, rel=%s, "
4425  "blockNum=%u, flags=0x%x, refcount=%u %d)",
4426  i, buf->freeNext,
4429  buf->tag.blockNum, buf->flags,
4430  buf->refcount, GetPrivateRefCount(b));
4431  }
4432 }
4433 #endif
4434 
4435 #ifdef NOT_USED
4436 void
4437 PrintPinnedBufs(void)
4438 {
4439  int i;
4440 
4441  for (i = 0; i < NBuffers; ++i)
4442  {
4445 
4446  if (GetPrivateRefCount(b) > 0)
4447  {
4448  /* theoretically we should lock the bufhdr here */
4449  elog(LOG,
4450  "[%02d] (freeNext=%d, rel=%s, "
4451  "blockNum=%u, flags=0x%x, refcount=%u %d)",
4452  i, buf->freeNext,
4454  BufTagGetForkNum(&buf->tag)),
4455  buf->tag.blockNum, buf->flags,
4456  buf->refcount, GetPrivateRefCount(b));
4457  }
4458  }
4459 }
4460 #endif
4461 
4462 /* ---------------------------------------------------------------------
4463  * FlushRelationBuffers
4464  *
4465  * This function writes all dirty pages of a relation out to disk
4466  * (or more accurately, out to kernel disk buffers), ensuring that the
4467  * kernel has an up-to-date view of the relation.
4468  *
4469  * Generally, the caller should be holding AccessExclusiveLock on the
4470  * target relation to ensure that no other backend is busy dirtying
4471  * more blocks of the relation; the effects can't be expected to last
4472  * after the lock is released.
4473  *
4474  * XXX currently it sequentially searches the buffer pool, should be
4475  * changed to more clever ways of searching. This routine is not
4476  * used in any performance-critical code paths, so it's not worth
4477  * adding additional overhead to normal paths to make it go faster.
4478  * --------------------------------------------------------------------
4479  */
4480 void
4482 {
4483  int i;
4484  BufferDesc *bufHdr;
4485  SMgrRelation srel = RelationGetSmgr(rel);
4486 
4487  if (RelationUsesLocalBuffers(rel))
4488  {
4489  for (i = 0; i < NLocBuffer; i++)
4490  {
4491  uint32 buf_state;
4492  instr_time io_start;
4493 
4494  bufHdr = GetLocalBufferDescriptor(i);
4495  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4496  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
4497  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4498  {
4499  ErrorContextCallback errcallback;
4500  Page localpage;
4501 
4502  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
4503 
4504  /* Setup error traceback support for ereport() */
4506  errcallback.arg = (void *) bufHdr;
4507  errcallback.previous = error_context_stack;
4508  error_context_stack = &errcallback;
4509 
4510  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
4511 
4513 
4514  smgrwrite(srel,
4515  BufTagGetForkNum(&bufHdr->tag),
4516  bufHdr->tag.blockNum,
4517  localpage,
4518  false);
4519 
4522  io_start, 1);
4523 
4524  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
4525  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
4526 
4528 
4529  /* Pop the error context stack */
4530  error_context_stack = errcallback.previous;
4531  }
4532  }
4533 
4534  return;
4535  }
4536 
4537  for (i = 0; i < NBuffers; i++)
4538  {
4539  uint32 buf_state;
4540 
4541  bufHdr = GetBufferDescriptor(i);
4542 
4543  /*
4544  * As in DropRelationBuffers, an unlocked precheck should be safe and
4545  * saves some cycles.
4546  */
4547  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
4548  continue;
4549 
4550  /* Make sure we can handle the pin */
4553 
4554  buf_state = LockBufHdr(bufHdr);
4555  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
4556  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4557  {
4558  PinBuffer_Locked(bufHdr);
4562  UnpinBuffer(bufHdr);
4563  }
4564  else
4565  UnlockBufHdr(bufHdr, buf_state);
4566  }
4567 }
4568 
4569 /* ---------------------------------------------------------------------
4570  * FlushRelationsAllBuffers
4571  *
4572  * This function flushes out of the buffer pool all the pages of all
4573  * forks of the specified smgr relations. It's equivalent to calling
4574  * FlushRelationBuffers once per relation. The relations are assumed not
4575  * to use local buffers.
4576  * --------------------------------------------------------------------
4577  */
4578 void
4580 {
4581  int i;
4582  SMgrSortArray *srels;
4583  bool use_bsearch;
4584 
4585  if (nrels == 0)
4586  return;
4587 
4588  /* fill-in array for qsort */
4589  srels = palloc(sizeof(SMgrSortArray) * nrels);
4590 
4591  for (i = 0; i < nrels; i++)
4592  {
4593  Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
4594 
4595  srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
4596  srels[i].srel = smgrs[i];
4597  }
4598 
4599  /*
4600  * Save the bsearch overhead for low number of relations to sync. See
4601  * DropRelationsAllBuffers for details.
4602  */
4603  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
4604 
4605  /* sort the list of SMgrRelations if necessary */
4606  if (use_bsearch)
4607  qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
4608 
4609  for (i = 0; i < NBuffers; i++)
4610  {
4611  SMgrSortArray *srelent = NULL;
4612  BufferDesc *bufHdr = GetBufferDescriptor(i);
4613  uint32 buf_state;
4614 
4615  /*
4616  * As in DropRelationBuffers, an unlocked precheck should be safe and
4617  * saves some cycles.
4618  */
4619 
4620  if (!use_bsearch)
4621  {
4622  int j;
4623 
4624  for (j = 0; j < nrels; j++)
4625  {
4626  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
4627  {
4628  srelent = &srels[j];
4629  break;
4630  }
4631  }
4632  }
4633  else
4634  {
4635  RelFileLocator rlocator;
4636 
4637  rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
4638  srelent = bsearch((const void *) &(rlocator),
4639  srels, nrels, sizeof(SMgrSortArray),
4641  }
4642 
4643  /* buffer doesn't belong to any of the given relfilelocators; skip it */
4644  if (srelent == NULL)
4645  continue;
4646 
4647  /* Make sure we can handle the pin */
4650 
4651  buf_state = LockBufHdr(bufHdr);
4652  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
4653  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4654  {
4655  PinBuffer_Locked(bufHdr);
4657  FlushBuffer(bufHdr, srelent->srel, IOOBJECT_RELATION, IOCONTEXT_NORMAL);
4659  UnpinBuffer(bufHdr);
4660  }
4661  else
4662  UnlockBufHdr(bufHdr, buf_state);
4663  }
4664 
4665  pfree(srels);
4666 }
4667 
4668 /* ---------------------------------------------------------------------
4669  * RelationCopyStorageUsingBuffer
4670  *
4671  * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
4672  * of using smgrread and smgrextend this will copy using bufmgr APIs.
4673  *
4674  * Refer comments atop CreateAndCopyRelationData() for details about
4675  * 'permanent' parameter.
4676  * --------------------------------------------------------------------
4677  */
4678 static void
4680  RelFileLocator dstlocator,
4681  ForkNumber forkNum, bool permanent)
4682 {
4683  Buffer srcBuf;
4684  Buffer dstBuf;
4685  Page srcPage;
4686  Page dstPage;
4687  bool use_wal;
4688  BlockNumber nblocks;
4689  BlockNumber blkno;
4691  BufferAccessStrategy bstrategy_src;
4692  BufferAccessStrategy bstrategy_dst;
4693 
4694  /*
4695  * In general, we want to write WAL whenever wal_level > 'minimal', but we
4696  * can skip it when copying any fork of an unlogged relation other than
4697  * the init fork.
4698  */
4699  use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
4700 
4701  /* Get number of blocks in the source relation. */
4702  nblocks = smgrnblocks(smgropen(srclocator, INVALID_PROC_NUMBER),
4703  forkNum);
4704 
4705  /* Nothing to copy; just return. */
4706  if (nblocks == 0)
4707  return;
4708 
4709  /*
4710  * Bulk extend the destination relation of the same size as the source
4711  * relation before starting to copy block by block.
4712  */
4713  memset(buf.data, 0, BLCKSZ);
4714  smgrextend(smgropen(dstlocator, INVALID_PROC_NUMBER), forkNum, nblocks - 1,
4715  buf.data, true);
4716 
4717  /* This is a bulk operation, so use buffer access strategies. */
4718  bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
4719  bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
4720 
4721  /* Iterate over each block of the source relation file. */
4722  for (blkno = 0; blkno < nblocks; blkno++)
4723  {
4725 
4726  /* Read block from source relation. */
4727  srcBuf = ReadBufferWithoutRelcache(srclocator, forkNum, blkno,
4728  RBM_NORMAL, bstrategy_src,
4729  permanent);
4730  LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
4731  srcPage = BufferGetPage(srcBuf);
4732 
4733  dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum, blkno,
4734  RBM_ZERO_AND_LOCK, bstrategy_dst,
4735  permanent);
4736  dstPage = BufferGetPage(dstBuf);
4737 
4739 
4740  /* Copy page data from the source to the destination. */
4741  memcpy(dstPage, srcPage, BLCKSZ);
4742  MarkBufferDirty(dstBuf);
4743 
4744  /* WAL-log the copied page. */
4745  if (use_wal)
4746  log_newpage_buffer(dstBuf, true);
4747 
4748  END_CRIT_SECTION();
4749 
4750  UnlockReleaseBuffer(dstBuf);
4751  UnlockReleaseBuffer(srcBuf);
4752  }
4753 
4754  FreeAccessStrategy(bstrategy_src);
4755  FreeAccessStrategy(bstrategy_dst);
4756 }
4757 
4758 /* ---------------------------------------------------------------------
4759  * CreateAndCopyRelationData
4760  *
4761  * Create destination relation storage and copy all forks from the
4762  * source relation to the destination.
4763  *
4764  * Pass permanent as true for permanent relations and false for
4765  * unlogged relations. Currently this API is not supported for
4766  * temporary relations.
4767  * --------------------------------------------------------------------
4768  */
4769 void
4771  RelFileLocator dst_rlocator, bool permanent)
4772 {
4773  char relpersistence;
4774  SMgrRelation src_rel;
4775  SMgrRelation dst_rel;
4776 
4777  /* Set the relpersistence. */
4778  relpersistence = permanent ?
4779  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
4780 
4781  src_rel = smgropen(src_rlocator, INVALID_PROC_NUMBER);
4782  dst_rel = smgropen(dst_rlocator, INVALID_PROC_NUMBER);
4783 
4784  /*
4785  * Create and copy all forks of the relation. During create database we
4786  * have a separate cleanup mechanism which deletes complete database
4787  * directory. Therefore, each individual relation doesn't need to be
4788  * registered for cleanup.
4789  */
4790  RelationCreateStorage(dst_rlocator, relpersistence, false);
4791 
4792  /* copy main fork. */
4793  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
4794  permanent);
4795 
4796  /* copy those extra forks that exist */
4797  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
4798  forkNum <= MAX_FORKNUM; forkNum++)
4799  {
4800  if (smgrexists(src_rel, forkNum))
4801  {
4802  smgrcreate(dst_rel, forkNum, false);
4803 
4804  /*
4805  * WAL log creation if the relation is persistent, or this is the
4806  * init fork of an unlogged relation.
4807  */
4808  if (permanent || forkNum == INIT_FORKNUM)
4809  log_smgrcreate(&dst_rlocator, forkNum);
4810 
4811  /* Copy a fork's data, block by block. */
4812  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
4813  permanent);
4814  }
4815  }
4816 }
4817 
4818 /* ---------------------------------------------------------------------
4819  * FlushDatabaseBuffers
4820  *
4821  * This function writes all dirty pages of a database out to disk
4822  * (or more accurately, out to kernel disk buffers), ensuring that the
4823  * kernel has an up-to-date view of the database.
4824  *
4825  * Generally, the caller should be holding an appropriate lock to ensure
4826  * no other backend is active in the target database; otherwise more
4827  * pages could get dirtied.
4828  *
4829  * Note we don't worry about flushing any pages of temporary relations.
4830  * It's assumed these wouldn't be interesting.
4831  * --------------------------------------------------------------------
4832  */
4833 void
4835 {
4836  int i;
4837  BufferDesc *bufHdr;
4838 
4839  for (i = 0; i < NBuffers; i++)
4840  {
4841  uint32 buf_state;
4842 
4843  bufHdr = GetBufferDescriptor(i);
4844 
4845  /*
4846  * As in DropRelationBuffers, an unlocked precheck should be safe and
4847  * saves some cycles.
4848  */
4849  if (bufHdr->tag.dbOid != dbid)
4850  continue;
4851 
4852  /* Make sure we can handle the pin */
4855 
4856  buf_state = LockBufHdr(bufHdr);
4857  if (bufHdr->tag.dbOid == dbid &&
4858  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
4859  {
4860  PinBuffer_Locked(bufHdr);
4864  UnpinBuffer(bufHdr);
4865  }
4866  else
4867  UnlockBufHdr(bufHdr, buf_state);
4868  }
4869 }
4870 
4871 /*
4872  * Flush a previously, shared or exclusively, locked and pinned buffer to the
4873  * OS.
4874  */
4875 void
4877 {
4878  BufferDesc *bufHdr;
4879 
4880  /* currently not needed, but no fundamental reason not to support */
4882 
4884 
4885  bufHdr = GetBufferDescriptor(buffer - 1);
4886 
4888 
4890 }
4891 
4892 /*
4893  * ReleaseBuffer -- release the pin on a buffer
4894  */
4895 void
4897 {
4898  if (!BufferIsValid(buffer))
4899  elog(ERROR, "bad buffer ID: %d", buffer);
4900 
4901  if (BufferIsLocal(buffer))
4903  else
4905 }
4906 
4907 /*
4908  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
4909  *
4910  * This is just a shorthand for a common combination.
4911  */
4912 void
4914 {
4917 }
4918 
4919 /*
4920  * IncrBufferRefCount
4921  * Increment the pin count on a buffer that we have *already* pinned
4922  * at least once.
4923  *
4924  * This function cannot be used on a buffer we do not have pinned,
4925  * because it doesn't change the shared buffer state.
4926  */
4927 void
4929 {
4932  if (BufferIsLocal(buffer))
4933  LocalRefCount[-buffer - 1]++;
4934  else
4935  {
4936  PrivateRefCountEntry *ref;
4937 
4938  ref = GetPrivateRefCountEntry(buffer, true);
4939  Assert(ref != NULL);
4940  ref->refcount++;
4941  }
4943 }
4944 
4945 /*
4946  * MarkBufferDirtyHint
4947  *
4948  * Mark a buffer dirty for non-critical changes.
4949  *
4950  * This is essentially the same as MarkBufferDirty, except:
4951  *
4952  * 1. The caller does not write WAL; so if checksums are enabled, we may need
4953  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
4954  * 2. The caller might have only share-lock instead of exclusive-lock on the
4955  * buffer's content lock.
4956  * 3. This function does not guarantee that the buffer is always marked dirty
4957  * (due to a race condition), so it cannot be used for important changes.
4958  */
4959 void
4961 {
4962  BufferDesc *bufHdr;
4963  Page page = BufferGetPage(buffer);
4964 
4965  if (!BufferIsValid(buffer))
4966  elog(ERROR, "bad buffer ID: %d", buffer);
4967 
4968  if (BufferIsLocal(buffer))
4969  {
4971  return;
4972  }
4973 
4974  bufHdr = GetBufferDescriptor(buffer - 1);
4975 
4977  /* here, either share or exclusive lock is OK */
4979 
4980  /*
4981  * This routine might get called many times on the same page, if we are
4982  * making the first scan after commit of an xact that added/deleted many
4983  * tuples. So, be as quick as we can if the buffer is already dirty. We
4984  * do this by not acquiring spinlock if it looks like the status bits are
4985  * already set. Since we make this test unlocked, there's a chance we
4986  * might fail to notice that the flags have just been cleared, and failed
4987  * to reset them, due to memory-ordering issues. But since this function
4988  * is only intended to be used in cases where failing to write out the
4989  * data would be harmless anyway, it doesn't really matter.
4990  */
4991  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4993  {
4995  bool dirtied = false;
4996  bool delayChkptFlags = false;
4997  uint32 buf_state;
4998 
4999  /*
5000  * If we need to protect hint bit updates from torn writes, WAL-log a
5001  * full page image of the page. This full page image is only necessary
5002  * if the hint bit update is the first change to the page since the
5003  * last checkpoint.
5004  *
5005  * We don't check full_page_writes here because that logic is included
5006  * when we call XLogInsert() since the value changes dynamically.
5007  */
5008  if (XLogHintBitIsNeeded() &&
5009  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
5010  {
5011  /*
5012  * If we must not write WAL, due to a relfilelocator-specific
5013  * condition or being in recovery, don't dirty the page. We can
5014  * set the hint, just not dirty the page as a result so the hint
5015  * is lost when we evict the page or shutdown.
5016  *
5017  * See src/backend/storage/page/README for longer discussion.
5018  */
5019  if (RecoveryInProgress() ||
5021  return;
5022 
5023  /*
5024  * If the block is already dirty because we either made a change
5025  * or set a hint already, then we don't need to write a full page
5026  * image. Note that aggressive cleaning of blocks dirtied by hint
5027  * bit setting would increase the call rate. Bulk setting of hint
5028  * bits would reduce the call rate...
5029  *
5030  * We must issue the WAL record before we mark the buffer dirty.
5031  * Otherwise we might write the page before we write the WAL. That
5032  * causes a race condition, since a checkpoint might occur between
5033  * writing the WAL record and marking the buffer dirty. We solve
5034  * that with a kluge, but one that is already in use during
5035  * transaction commit to prevent race conditions. Basically, we
5036  * simply prevent the checkpoint WAL record from being written
5037  * until we have marked the buffer dirty. We don't start the
5038  * checkpoint flush until we have marked dirty, so our checkpoint
5039  * must flush the change to disk successfully or the checkpoint
5040  * never gets written, so crash recovery will fix.
5041  *
5042  * It's possible we may enter here without an xid, so it is
5043  * essential that CreateCheckPoint waits for virtual transactions
5044  * rather than full transactionids.
5045  */
5048  delayChkptFlags = true;
5049  lsn = XLogSaveBufferForHint(buffer, buffer_std);
5050  }
5051 
5052  buf_state = LockBufHdr(bufHdr);
5053 
5054  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5055 
5056  if (!(buf_state & BM_DIRTY))
5057  {
5058  dirtied = true; /* Means "will be dirtied by this action" */
5059 
5060  /*
5061  * Set the page LSN if we wrote a backup block. We aren't supposed
5062  * to set this when only holding a share lock but as long as we
5063  * serialise it somehow we're OK. We choose to set LSN while
5064  * holding the buffer header lock, which causes any reader of an
5065  * LSN who holds only a share lock to also obtain a buffer header
5066  * lock before using PageGetLSN(), which is enforced in
5067  * BufferGetLSNAtomic().
5068  *
5069  * If checksums are enabled, you might think we should reset the
5070  * checksum here. That will happen when the page is written
5071  * sometime later in this checkpoint cycle.
5072  */
5073  if (!XLogRecPtrIsInvalid(lsn))
5074  PageSetLSN(page, lsn);
5075  }
5076 
5077  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
5078  UnlockBufHdr(bufHdr, buf_state);
5079 
5080  if (delayChkptFlags)
5082 
5083  if (dirtied)
5084  {
5085  VacuumPageDirty++;
5087  if (VacuumCostActive)
5089  }
5090  }
5091 }
5092 
5093 /*
5094  * Release buffer content locks for shared buffers.
5095  *
5096  * Used to clean up after errors.
5097  *
5098  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
5099  * of releasing buffer content locks per se; the only thing we need to deal
5100  * with here is clearing any PIN_COUNT request that was in progress.
5101  */
5102 void
5104 {
5106 
5107  if (buf)
5108  {
5109  uint32 buf_state;
5110 
5111  buf_state = LockBufHdr(buf);
5112 
5113  /*
5114  * Don't complain if flag bit not set; it could have been reset but we
5115  * got a cancel/die interrupt before getting the signal.
5116  */
5117  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5118  buf->wait_backend_pgprocno == MyProcNumber)
5119  buf_state &= ~BM_PIN_COUNT_WAITER;
5120 
5121  UnlockBufHdr(buf, buf_state);
5122 
5123  PinCountWaitBuf = NULL;
5124  }
5125 }
5126 
5127 /*
5128  * Acquire or release the content_lock for the buffer.
5129  */
5130 void
5132 {
5133  BufferDesc *buf;
5134 
5136  if (BufferIsLocal(buffer))
5137  return; /* local buffers need no lock */
5138 
5140 
5141  if (mode == BUFFER_LOCK_UNLOCK)
5143  else if (mode == BUFFER_LOCK_SHARE)
5145  else if (mode == BUFFER_LOCK_EXCLUSIVE)
5147  else
5148  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
5149 }
5150 
5151 /*
5152  * Acquire the content_lock for the buffer, but only if we don't have to wait.
5153  *
5154  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
5155  */
5156 bool
5158 {
5159  BufferDesc *buf;
5160 
5162  if (BufferIsLocal(buffer))
5163  return true; /* act as though we got it */
5164 
5166 
5168  LW_EXCLUSIVE);
5169 }
5170 
5171 /*
5172  * Verify that this backend is pinning the buffer exactly once.
5173  *
5174  * NOTE: Like in BufferIsPinned(), what we check here is that *this* backend
5175  * holds a pin on the buffer. We do not care whether some other backend does.
5176  */
5177 void
5179 {
5180  if (BufferIsLocal(buffer))
5181  {
5182  if (LocalRefCount[-buffer - 1] != 1)
5183  elog(ERROR, "incorrect local pin count: %d",
5184  LocalRefCount[-buffer - 1]);
5185  }
5186  else
5187  {
5188  if (GetPrivateRefCount(buffer) != 1)
5189  elog(ERROR, "incorrect local pin count: %d",
5191  }
5192 }
5193 
5194 /*
5195  * LockBufferForCleanup - lock a buffer in preparation for deleting items
5196  *
5197  * Items may be deleted from a disk page only when the caller (a) holds an
5198  * exclusive lock on the buffer and (b) has observed that no other backend
5199  * holds a pin on the buffer. If there is a pin, then the other backend
5200  * might have a pointer into the buffer (for example, a heapscan reference
5201  * to an item --- see README for more details). It's OK if a pin is added
5202  * after the cleanup starts, however; the newly-arrived backend will be
5203  * unable to look at the page until we release the exclusive lock.
5204  *
5205  * To implement this protocol, a would-be deleter must pin the buffer and
5206  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
5207  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
5208  * it has successfully observed pin count = 1.
5209  */
5210 void
5212 {
5213  BufferDesc *bufHdr;
5214  TimestampTz waitStart = 0;
5215  bool waiting = false;
5216  bool logged_recovery_conflict = false;
5217 
5219  Assert(PinCountWaitBuf == NULL);
5220 
5222 
5223  /* Nobody else to wait for */
5224  if (BufferIsLocal(buffer))
5225  return;
5226 
5227  bufHdr = GetBufferDescriptor(buffer - 1);
5228 
5229  for (;;)
5230  {
5231  uint32 buf_state;
5232 
5233  /* Try to acquire lock */
5235  buf_state = LockBufHdr(bufHdr);
5236 
5237  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5238  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5239  {
5240  /* Successfully acquired exclusive lock with pincount 1 */
5241  UnlockBufHdr(bufHdr, buf_state);
5242 
5243  /*
5244  * Emit the log message if recovery conflict on buffer pin was
5245  * resolved but the startup process waited longer than
5246  * deadlock_timeout for it.
5247  */
5248  if (logged_recovery_conflict)
5250  waitStart, GetCurrentTimestamp(),
5251  NULL, false);
5252 
5253  if (waiting)
5254  {
5255  /* reset ps display to remove the suffix if we added one */
5257  waiting = false;
5258  }
5259  return;
5260  }
5261  /* Failed, so mark myself as waiting for pincount 1 */
5262  if (buf_state & BM_PIN_COUNT_WAITER)
5263  {
5264  UnlockBufHdr(bufHdr, buf_state);
5266  elog(ERROR, "multiple backends attempting to wait for pincount 1");
5267  }
5269  PinCountWaitBuf = bufHdr;
5270  buf_state |= BM_PIN_COUNT_WAITER;
5271  UnlockBufHdr(bufHdr, buf_state);
5273 
5274  /* Wait to be signaled by UnpinBuffer() */
5275  if (InHotStandby)
5276  {
5277  if (!waiting)
5278  {
5279  /* adjust the process title to indicate that it's waiting */
5280  set_ps_display_suffix("waiting");
5281  waiting = true;
5282  }
5283 
5284  /*
5285  * Emit the log message if the startup process is waiting longer
5286  * than deadlock_timeout for recovery conflict on buffer pin.
5287  *
5288  * Skip this if first time through because the startup process has
5289  * not started waiting yet in this case. So, the wait start
5290  * timestamp is set after this logic.
5291  */
5292  if (waitStart != 0 && !logged_recovery_conflict)
5293  {
5295 
5296  if (TimestampDifferenceExceeds(waitStart, now,
5297  DeadlockTimeout))
5298  {
5300  waitStart, now, NULL, true);
5301  logged_recovery_conflict = true;
5302  }
5303  }
5304 
5305  /*
5306  * Set the wait start timestamp if logging is enabled and first
5307  * time through.
5308  */
5309  if (log_recovery_conflict_waits && waitStart == 0)
5310  waitStart = GetCurrentTimestamp();
5311 
5312  /* Publish the bufid that Startup process waits on */
5314  /* Set alarm and then wait to be signaled by UnpinBuffer() */
5316  /* Reset the published bufid */
5318  }
5319  else
5320  ProcWaitForSignal(WAIT_EVENT_BUFFER_PIN);
5321 
5322  /*
5323  * Remove flag marking us as waiter. Normally this will not be set
5324  * anymore, but ProcWaitForSignal() can return for other signals as
5325  * well. We take care to only reset the flag if we're the waiter, as
5326  * theoretically another backend could have started waiting. That's
5327  * impossible with the current usages due to table level locking, but
5328  * better be safe.
5329  */
5330  buf_state = LockBufHdr(bufHdr);
5331  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
5333  buf_state &= ~BM_PIN_COUNT_WAITER;
5334  UnlockBufHdr(bufHdr, buf_state);
5335 
5336  PinCountWaitBuf = NULL;
5337  /* Loop back and try again */
5338  }
5339 }
5340 
5341 /*
5342  * Check called from ProcessRecoveryConflictInterrupts() when Startup process
5343  * requests cancellation of all pin holders that are blocking it.
5344  */
5345 bool
5347 {
5348  int bufid = GetStartupBufferPinWaitBufId();
5349 
5350  /*
5351  * If we get woken slowly then it's possible that the Startup process was
5352  * already woken by other backends before we got here. Also possible that
5353  * we get here by multiple interrupts or interrupts at inappropriate
5354  * times, so make sure we do nothing if the bufid is not set.
5355  */
5356  if (bufid < 0)
5357  return false;
5358 
5359  if (GetPrivateRefCount(bufid + 1) > 0)
5360  return true;
5361 
5362  return false;
5363 }
5364 
5365 /*
5366  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
5367  *
5368  * We won't loop, but just check once to see if the pin count is OK. If
5369  * not, return false with no lock held.
5370  */
5371 bool
5373 {
5374  BufferDesc *bufHdr;
5375  uint32 buf_state,
5376  refcount;
5377 
5379 
5380  if (BufferIsLocal(buffer))
5381  {
5382  refcount = LocalRefCount[-buffer - 1];
5383  /* There should be exactly one pin */
5384  Assert(refcount > 0);
5385  if (refcount != 1)
5386  return false;
5387  /* Nobody else to wait for */
5388  return true;
5389  }
5390 
5391  /* There should be exactly one local pin */
5393  Assert(refcount);
5394  if (refcount != 1)
5395  return false;
5396 
5397  /* Try to acquire lock */
5399  return false;
5400 
5401  bufHdr = GetBufferDescriptor(buffer - 1);
5402  buf_state = LockBufHdr(bufHdr);
5403  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
5404 
5405  Assert(refcount > 0);
5406  if (refcount == 1)
5407  {
5408  /* Successfully acquired exclusive lock with pincount 1 */
5409  UnlockBufHdr(bufHdr, buf_state);
5410  return true;
5411  }
5412 
5413  /* Failed, so release the lock */
5414  UnlockBufHdr(bufHdr, buf_state);
5416  return false;
5417 }
5418 
5419 /*
5420  * IsBufferCleanupOK - as above, but we already have the lock
5421  *
5422  * Check whether it's OK to perform cleanup on a buffer we've already
5423  * locked. If we observe that the pin count is 1, our exclusive lock
5424  * happens to be a cleanup lock, and we can proceed with anything that
5425  * would have been allowable had we sought a cleanup lock originally.
5426  */
5427 bool
5429 {
5430  BufferDesc *bufHdr;
5431  uint32 buf_state;
5432 
5434 
5435  if (BufferIsLocal(buffer))
5436  {
5437  /* There should be exactly one pin */
5438  if (LocalRefCount[-buffer - 1] != 1)
5439  return false;
5440  /* Nobody else to wait for */
5441  return true;
5442  }
5443 
5444  /* There should be exactly one local pin */
5445  if (GetPrivateRefCount(buffer) != 1)
5446  return false;
5447 
5448  bufHdr = GetBufferDescriptor(buffer - 1);
5449 
5450  /* caller must hold exclusive lock on buffer */
5452  LW_EXCLUSIVE));
5453 
5454  buf_state = LockBufHdr(bufHdr);
5455 
5456  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
5457  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
5458  {
5459  /* pincount is OK. */
5460  UnlockBufHdr(bufHdr, buf_state);
5461  return true;
5462  }
5463 
5464  UnlockBufHdr(bufHdr, buf_state);
5465  return false;
5466 }
5467 
5468 
5469 /*
5470  * Functions for buffer I/O handling
5471  *
5472  * Note: We assume that nested buffer I/O never occurs.
5473  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
5474  *
5475  * Also note that these are used only for shared buffers, not local ones.
5476  */
5477 
5478 /*
5479  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
5480  */
5481 static void
5483 {
5485 
5487  for (;;)
5488  {
5489  uint32 buf_state;
5490 
5491  /*
5492  * It may not be necessary to acquire the spinlock to check the flag
5493  * here, but since this test is essential for correctness, we'd better
5494  * play it safe.
5495  */
5496  buf_state = LockBufHdr(buf);
5497  UnlockBufHdr(buf, buf_state);
5498 
5499  if (!(buf_state & BM_IO_IN_PROGRESS))
5500  break;
5501  ConditionVariableSleep(cv, WAIT_EVENT_BUFFER_IO);
5502  }
5504 }
5505 
5506 /*
5507  * StartBufferIO: begin I/O on this buffer
5508  * (Assumptions)
5509  * My process is executing no IO
5510  * The buffer is Pinned
5511  *
5512  * In some scenarios there are race conditions in which multiple backends
5513  * could attempt the same I/O operation concurrently. If someone else
5514  * has already started I/O on this buffer then we will block on the
5515  * I/O condition variable until he's done.
5516  *
5517  * Input operations are only attempted on buffers that are not BM_VALID,
5518  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
5519  * so we can always tell if the work is already done.
5520  *
5521  * Returns true if we successfully marked the buffer as I/O busy,
5522  * false if someone else already did the work.
5523  *
5524  * If nowait is true, then we don't wait for an I/O to be finished by another
5525  * backend. In that case, false indicates either that the I/O was already
5526  * finished, or is still in progress. This is useful for callers that want to
5527  * find out if they can perform the I/O as part of a larger operation, without
5528  * waiting for the answer or distinguishing the reasons why not.
5529  */
5530 static bool
5531 StartBufferIO(BufferDesc *buf, bool forInput, bool nowait)
5532 {
5533  uint32 buf_state;
5534 
5536 
5537  for (;;)
5538  {
5539  buf_state = LockBufHdr(buf);
5540 
5541  if (!(buf_state & BM_IO_IN_PROGRESS))
5542  break;
5543  UnlockBufHdr(buf, buf_state);
5544  if (nowait)
5545  return false;
5546  WaitIO(buf);
5547  }
5548 
5549  /* Once we get here, there is definitely no I/O active on this buffer */
5550 
5551  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
5552  {
5553  /* someone else already did the I/O */
5554  UnlockBufHdr(buf, buf_state);
5555  return false;
5556  }
5557 
5558  buf_state |= BM_IO_IN_PROGRESS;
5559  UnlockBufHdr(buf, buf_state);
5560 
5563 
5564  return true;
5565 }
5566 
5567 /*
5568  * TerminateBufferIO: release a buffer we were doing I/O on
5569  * (Assumptions)
5570  * My process is executing IO for the buffer
5571  * BM_IO_IN_PROGRESS bit is set for the buffer
5572  * The buffer is Pinned
5573  *
5574  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
5575  * buffer's BM_DIRTY flag. This is appropriate when terminating a
5576  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
5577  * marking the buffer clean if it was re-dirtied while we were writing.
5578  *
5579  * set_flag_bits gets ORed into the buffer's flags. It must include
5580  * BM_IO_ERROR in a failure case. For successful completion it could
5581  * be 0, or BM_VALID if we just finished reading in the page.
5582  *
5583  * If forget_owner is true, we release the buffer I/O from the current
5584  * resource owner. (forget_owner=false is used when the resource owner itself
5585  * is being released)
5586  */
5587 static void
5588 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits,
5589  bool forget_owner)
5590 {
5591  uint32 buf_state;
5592 
5593  buf_state = LockBufHdr(buf);
5594 
5595  Assert(buf_state & BM_IO_IN_PROGRESS);
5596 
5597  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
5598  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
5599  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
5600 
5601  buf_state |= set_flag_bits;
5602  UnlockBufHdr(buf, buf_state);
5603 
5604  if (forget_owner)
5607 
5609 }
5610 
5611 /*
5612  * AbortBufferIO: Clean up active buffer I/O after an error.
5613  *
5614  * All LWLocks we might have held have been released,
5615  * but we haven't yet released buffer pins, so the buffer is still pinned.
5616  *
5617  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
5618  * possible the error condition wasn't related to the I/O.
5619  *
5620  * Note: this does not remove the buffer I/O from the resource owner.
5621  * That's correct when we're releasing the whole resource owner, but
5622  * beware if you use this in other contexts.
5623  */
5624 static void
5626 {
5627  BufferDesc *buf_hdr = GetBufferDescriptor(buffer - 1);
5628  uint32 buf_state;
5629 
5630  buf_state = LockBufHdr(buf_hdr);
5631  Assert(buf_state & (BM_IO_IN_PROGRESS | BM_TAG_VALID));
5632 
5633  if (!(buf_state & BM_VALID))
5634  {
5635  Assert(!(buf_state & BM_DIRTY));
5636  UnlockBufHdr(buf_hdr, buf_state);
5637  }
5638  else
5639  {
5640  Assert(buf_state & BM_DIRTY);
5641  UnlockBufHdr(buf_hdr, buf_state);
5642 
5643  /* Issue notice if this is not the first failure... */
5644  if (buf_state & BM_IO_ERROR)
5645  {
5646  /* Buffer is pinned, so we can read tag without spinlock */
5647  char *path;
5648 
5649  path = relpathperm(BufTagGetRelFileLocator(&buf_hdr->tag),
5650  BufTagGetForkNum(&buf_hdr->tag));
5651  ereport(WARNING,
5652  (errcode(ERRCODE_IO_ERROR),
5653  errmsg("could not write block %u of %s",
5654  buf_hdr->tag.blockNum, path),
5655  errdetail("Multiple failures --- write error might be permanent.")));
5656  pfree(path);
5657  }
5658  }
5659 
5660  TerminateBufferIO(buf_hdr, false, BM_IO_ERROR, false);
5661 }
5662 
5663 /*
5664  * Error context callback for errors occurring during shared buffer writes.
5665  */
5666 static void
5668 {
5669  BufferDesc *bufHdr = (BufferDesc *) arg;
5670 
5671  /* Buffer is pinned, so we can read the tag without locking the spinlock */
5672  if (bufHdr != NULL)
5673  {
5674  char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
5675  BufTagGetForkNum(&bufHdr->tag));
5676 
5677  errcontext("writing block %u of relation %s",
5678  bufHdr->tag.blockNum, path);
5679  pfree(path);
5680  }
5681 }
5682 
5683 /*
5684  * Error context callback for errors occurring during local buffer writes.
5685  */
5686 static void
5688 {
5689  BufferDesc *bufHdr = (BufferDesc *) arg;
5690 
5691  if (bufHdr != NULL)
5692  {
5693  char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
5694  MyProcNumber,
5695  BufTagGetForkNum(&bufHdr->tag));
5696 
5697  errcontext("writing block %u of relation %s",
5698  bufHdr->tag.blockNum, path);
5699  pfree(path);
5700  }
5701 }
5702 
5703 /*
5704  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
5705  */
5706 static int
5707 rlocator_comparator(const void *p1, const void *p2)
5708 {
5709  RelFileLocator n1 = *(const RelFileLocator *) p1;
5710  RelFileLocator n2 = *(const RelFileLocator *) p2;
5711 
5712  if (n1.relNumber < n2.relNumber)
5713  return -1;
5714  else if (n1.relNumber > n2.relNumber)
5715  return 1;
5716 
5717  if (n1.dbOid < n2.dbOid)
5718  return -1;
5719  else if (n1.dbOid > n2.dbOid)
5720  return 1;
5721 
5722  if (n1.spcOid < n2.spcOid)
5723  return -1;
5724  else if (n1.spcOid > n2.spcOid)
5725  return 1;
5726  else
5727  return 0;
5728 }
5729 
5730 /*
5731  * Lock buffer header - set BM_LOCKED in buffer state.
5732  */
5733 uint32
5735 {
5736  SpinDelayStatus delayStatus;
5737  uint32 old_buf_state;
5738 
5740 
5741  init_local_spin_delay(&delayStatus);
5742 
5743  while (true)
5744  {
5745  /* set BM_LOCKED flag */
5746  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
5747  /* if it wasn't set before we're OK */
5748  if (!(old_buf_state & BM_LOCKED))
5749  break;
5750  perform_spin_delay(&delayStatus);
5751  }
5752  finish_spin_delay(&delayStatus);
5753  return old_buf_state | BM_LOCKED;
5754 }
5755 
5756 /*
5757  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
5758  * state at that point.
5759  *
5760  * Obviously the buffer could be locked by the time the value is returned, so
5761  * this is primarily useful in CAS style loops.
5762  */
5763 static uint32
5765 {
5766  SpinDelayStatus delayStatus;
5767  uint32 buf_state;
5768 
5769  init_local_spin_delay(&delayStatus);
5770 
5771  buf_state = pg_atomic_read_u32(&buf->state);
5772 
5773  while (buf_state & BM_LOCKED)
5774  {
5775  perform_spin_delay(&delayStatus);
5776  buf_state = pg_atomic_read_u32(&buf->state);
5777  }
5778 
5779  finish_spin_delay(&delayStatus);
5780 
5781  return buf_state;
5782 }
5783 
5784 /*
5785  * BufferTag comparator.
5786  */
5787 static inline int