PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * ReleaseBuffer() -- unpin a buffer
23  *
24  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
25  * The disk write is delayed until buffer replacement or checkpoint.
26  *
27  * See also these files:
28  * freelist.c -- chooses victim for buffer replacement
29  * buf_table.c -- manages the buffer lookup table
30  */
31 #include "postgres.h"
32 
33 #include <sys/file.h>
34 #include <unistd.h>
35 
36 #include "access/tableam.h"
37 #include "access/xloginsert.h"
38 #include "access/xlogutils.h"
39 #include "catalog/catalog.h"
40 #include "catalog/storage.h"
41 #include "catalog/storage_xlog.h"
42 #include "executor/instrument.h"
43 #include "lib/binaryheap.h"
44 #include "miscadmin.h"
45 #include "pg_trace.h"
46 #include "pgstat.h"
47 #include "postmaster/bgwriter.h"
48 #include "storage/buf_internals.h"
49 #include "storage/bufmgr.h"
50 #include "storage/ipc.h"
51 #include "storage/proc.h"
52 #include "storage/smgr.h"
53 #include "storage/standby.h"
54 #include "utils/memdebug.h"
55 #include "utils/ps_status.h"
56 #include "utils/rel.h"
57 #include "utils/resowner_private.h"
58 #include "utils/timestamp.h"
59 
60 
61 /* Note: these two macros only work on shared buffers, not local ones! */
62 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
63 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
64 
65 /* Note: this macro only works on local buffers, not shared ones! */
66 #define LocalBufHdrGetBlock(bufHdr) \
67  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
68 
69 /* Bits in SyncOneBuffer's return value */
70 #define BUF_WRITTEN 0x01
71 #define BUF_REUSABLE 0x02
72 
73 #define RELS_BSEARCH_THRESHOLD 20
74 
75 /*
76  * This is the size (in the number of blocks) above which we scan the
77  * entire buffer pool to remove the buffers for all the pages of relation
78  * being dropped. For the relations with size below this threshold, we find
79  * the buffers by doing lookups in BufMapping table.
80  */
81 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
82 
83 typedef struct PrivateRefCountEntry
84 {
88 
89 /* 64 bytes, about the size of a cache line on common systems */
90 #define REFCOUNT_ARRAY_ENTRIES 8
91 
92 /*
93  * Status of buffers to checkpoint for a particular tablespace, used
94  * internally in BufferSync.
95  */
96 typedef struct CkptTsStatus
97 {
98  /* oid of the tablespace */
100 
101  /*
102  * Checkpoint progress for this tablespace. To make progress comparable
103  * between tablespaces the progress is, for each tablespace, measured as a
104  * number between 0 and the total number of to-be-checkpointed pages. Each
105  * page checkpointed in this tablespace increments this space's progress
106  * by progress_slice.
107  */
110 
111  /* number of to-be checkpointed pages in this tablespace */
113  /* already processed pages in this tablespace */
115 
116  /* current offset in CkptBufferIds for this tablespace */
117  int index;
119 
120 /*
121  * Type for array used to sort SMgrRelations
122  *
123  * FlushRelationsAllBuffers shares the same comparator function with
124  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
125  * compatible.
126  */
127 typedef struct SMgrSortArray
128 {
129  RelFileLocator rlocator; /* This must be the first member */
132 
133 /* GUC variables */
134 bool zero_damaged_pages = false;
137 bool track_io_timing = false;
138 
139 /*
140  * How many buffers PrefetchBuffer callers should try to stay ahead of their
141  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
142  * for buffers not belonging to tablespaces that have their
143  * effective_io_concurrency parameter set.
144  */
146 
147 /*
148  * Like effective_io_concurrency, but used by maintenance code paths that might
149  * benefit from a higher setting because they work on behalf of many sessions.
150  * Overridden by the tablespace setting of the same name.
151  */
153 
154 /*
155  * GUC variables about triggering kernel writeback for buffers written; OS
156  * dependent defaults are set via the GUC mechanism.
157  */
161 
162 /* local state for StartBufferIO and related functions */
163 static BufferDesc *InProgressBuf = NULL;
164 static bool IsForInput;
165 
166 /* local state for LockBufferForCleanup */
168 
169 /*
170  * Backend-Private refcount management:
171  *
172  * Each buffer also has a private refcount that keeps track of the number of
173  * times the buffer is pinned in the current process. This is so that the
174  * shared refcount needs to be modified only once if a buffer is pinned more
175  * than once by an individual backend. It's also used to check that no buffers
176  * are still pinned at the end of transactions and when exiting.
177  *
178  *
179  * To avoid - as we used to - requiring an array with NBuffers entries to keep
180  * track of local buffers, we use a small sequentially searched array
181  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
182  * keep track of backend local pins.
183  *
184  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
185  * refcounts are kept track of in the array; after that, new array entries
186  * displace old ones into the hash table. That way a frequently used entry
187  * can't get "stuck" in the hashtable while infrequent ones clog the array.
188  *
189  * Note that in most scenarios the number of pinned buffers will not exceed
190  * REFCOUNT_ARRAY_ENTRIES.
191  *
192  *
193  * To enter a buffer into the refcount tracking mechanism first reserve a free
194  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
195  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
196  * memory allocations in NewPrivateRefCountEntry() which can be important
197  * because in some scenarios it's called with a spinlock held...
198  */
200 static HTAB *PrivateRefCountHash = NULL;
204 
205 static void ReservePrivateRefCountEntry(void);
208 static inline int32 GetPrivateRefCount(Buffer buffer);
210 
211 /*
212  * Ensure that the PrivateRefCountArray has sufficient space to store one more
213  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
214  * a new entry - but it's perfectly fine to not use a reserved entry.
215  */
216 static void
218 {
219  /* Already reserved (or freed), nothing to do */
220  if (ReservedRefCountEntry != NULL)
221  return;
222 
223  /*
224  * First search for a free entry the array, that'll be sufficient in the
225  * majority of cases.
226  */
227  {
228  int i;
229 
230  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
231  {
233 
235 
236  if (res->buffer == InvalidBuffer)
237  {
239  return;
240  }
241  }
242  }
243 
244  /*
245  * No luck. All array entries are full. Move one array entry into the hash
246  * table.
247  */
248  {
249  /*
250  * Move entry from the current clock position in the array into the
251  * hashtable. Use that slot.
252  */
253  PrivateRefCountEntry *hashent;
254  bool found;
255 
256  /* select victim slot */
259 
260  /* Better be used, otherwise we shouldn't get here. */
262 
263  /* enter victim array entry into hashtable */
265  (void *) &(ReservedRefCountEntry->buffer),
266  HASH_ENTER,
267  &found);
268  Assert(!found);
270 
271  /* clear the now free array slot */
274 
276  }
277 }
278 
279 /*
280  * Fill a previously reserved refcount entry.
281  */
282 static PrivateRefCountEntry *
284 {
286 
287  /* only allowed to be called when a reservation has been made */
288  Assert(ReservedRefCountEntry != NULL);
289 
290  /* use up the reserved entry */
292  ReservedRefCountEntry = NULL;
293 
294  /* and fill it */
295  res->buffer = buffer;
296  res->refcount = 0;
297 
298  return res;
299 }
300 
301 /*
302  * Return the PrivateRefCount entry for the passed buffer.
303  *
304  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
305  * do_move is true, and the entry resides in the hashtable the entry is
306  * optimized for frequent access by moving it to the array.
307  */
308 static PrivateRefCountEntry *
310 {
312  int i;
313 
316 
317  /*
318  * First search for references in the array, that'll be sufficient in the
319  * majority of cases.
320  */
321  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
322  {
324 
325  if (res->buffer == buffer)
326  return res;
327  }
328 
329  /*
330  * By here we know that the buffer, if already pinned, isn't residing in
331  * the array.
332  *
333  * Only look up the buffer in the hashtable if we've previously overflowed
334  * into it.
335  */
336  if (PrivateRefCountOverflowed == 0)
337  return NULL;
338 
340  (void *) &buffer,
341  HASH_FIND,
342  NULL);
343 
344  if (res == NULL)
345  return NULL;
346  else if (!do_move)
347  {
348  /* caller doesn't want us to move the hash entry into the array */
349  return res;
350  }
351  else
352  {
353  /* move buffer from hashtable into the free array slot */
354  bool found;
356 
357  /* Ensure there's a free array slot */
359 
360  /* Use up the reserved slot */
361  Assert(ReservedRefCountEntry != NULL);
363  ReservedRefCountEntry = NULL;
364  Assert(free->buffer == InvalidBuffer);
365 
366  /* and fill it */
367  free->buffer = buffer;
368  free->refcount = res->refcount;
369 
370  /* delete from hashtable */
372  (void *) &buffer,
373  HASH_REMOVE,
374  &found);
375  Assert(found);
378 
379  return free;
380  }
381 }
382 
383 /*
384  * Returns how many times the passed buffer is pinned by this backend.
385  *
386  * Only works for shared memory buffers!
387  */
388 static inline int32
390 {
392 
395 
396  /*
397  * Not moving the entry - that's ok for the current users, but we might
398  * want to change this one day.
399  */
400  ref = GetPrivateRefCountEntry(buffer, false);
401 
402  if (ref == NULL)
403  return 0;
404  return ref->refcount;
405 }
406 
407 /*
408  * Release resources used to track the reference count of a buffer which we no
409  * longer have pinned and don't want to pin again immediately.
410  */
411 static void
413 {
414  Assert(ref->refcount == 0);
415 
416  if (ref >= &PrivateRefCountArray[0] &&
418  {
419  ref->buffer = InvalidBuffer;
420 
421  /*
422  * Mark the just used entry as reserved - in many scenarios that
423  * allows us to avoid ever having to search the array/hash for free
424  * entries.
425  */
426  ReservedRefCountEntry = ref;
427  }
428  else
429  {
430  bool found;
431  Buffer buffer = ref->buffer;
432 
434  (void *) &buffer,
435  HASH_REMOVE,
436  &found);
437  Assert(found);
440  }
441 }
442 
443 /*
444  * BufferIsPinned
445  * True iff the buffer is pinned (also checks for valid buffer number).
446  *
447  * NOTE: what we check here is that *this* backend holds a pin on
448  * the buffer. We do not care whether some other backend does.
449  */
450 #define BufferIsPinned(bufnum) \
451 ( \
452  !BufferIsValid(bufnum) ? \
453  false \
454  : \
455  BufferIsLocal(bufnum) ? \
456  (LocalRefCount[-(bufnum) - 1] > 0) \
457  : \
458  (GetPrivateRefCount(bufnum) > 0) \
459 )
460 
461 
462 static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence,
463  ForkNumber forkNum, BlockNumber blockNum,
465  bool *hit);
466 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
467 static void PinBuffer_Locked(BufferDesc *buf);
468 static void UnpinBuffer(BufferDesc *buf, bool fixOwner);
469 static void BufferSync(int flags);
471 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
472  WritebackContext *wb_context);
473 static void WaitIO(BufferDesc *buf);
474 static bool StartBufferIO(BufferDesc *buf, bool forInput);
475 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
476  uint32 set_flag_bits);
477 static void shared_buffer_write_error_callback(void *arg);
478 static void local_buffer_write_error_callback(void *arg);
479 static BufferDesc *BufferAlloc(SMgrRelation smgr,
480  char relpersistence,
481  ForkNumber forkNum,
482  BlockNumber blockNum,
483  BufferAccessStrategy strategy,
484  bool *foundPtr);
485 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
486 static void FindAndDropRelationBuffers(RelFileLocator rlocator,
487  ForkNumber forkNum,
488  BlockNumber nForkBlock,
489  BlockNumber firstDelBlock);
490 static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
491  RelFileLocator dstlocator,
492  ForkNumber forkNum, bool permanent);
493 static void AtProcExit_Buffers(int code, Datum arg);
494 static void CheckForBufferLeaks(void);
495 static int rlocator_comparator(const void *p1, const void *p2);
496 static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
497 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
498 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
499 
500 
501 /*
502  * Implementation of PrefetchBuffer() for shared buffers.
503  */
506  ForkNumber forkNum,
507  BlockNumber blockNum)
508 {
509  PrefetchBufferResult result = {InvalidBuffer, false};
510  BufferTag newTag; /* identity of requested block */
511  uint32 newHash; /* hash value for newTag */
512  LWLock *newPartitionLock; /* buffer partition lock for it */
513  int buf_id;
514 
515  Assert(BlockNumberIsValid(blockNum));
516 
517  /* create a tag so we can lookup the buffer */
518  InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
519  forkNum, blockNum);
520 
521  /* determine its hash code and partition lock ID */
522  newHash = BufTableHashCode(&newTag);
523  newPartitionLock = BufMappingPartitionLock(newHash);
524 
525  /* see if the block is in the buffer pool already */
526  LWLockAcquire(newPartitionLock, LW_SHARED);
527  buf_id = BufTableLookup(&newTag, newHash);
528  LWLockRelease(newPartitionLock);
529 
530  /* If not in buffers, initiate prefetch */
531  if (buf_id < 0)
532  {
533 #ifdef USE_PREFETCH
534  /*
535  * Try to initiate an asynchronous read. This returns false in
536  * recovery if the relation file doesn't exist.
537  */
538  if (smgrprefetch(smgr_reln, forkNum, blockNum))
539  result.initiated_io = true;
540 #endif /* USE_PREFETCH */
541  }
542  else
543  {
544  /*
545  * Report the buffer it was in at that time. The caller may be able
546  * to avoid a buffer table lookup, but it's not pinned and it must be
547  * rechecked!
548  */
549  result.recent_buffer = buf_id + 1;
550  }
551 
552  /*
553  * If the block *is* in buffers, we do nothing. This is not really ideal:
554  * the block might be just about to be evicted, which would be stupid
555  * since we know we are going to need it soon. But the only easy answer
556  * is to bump the usage_count, which does not seem like a great solution:
557  * when the caller does ultimately touch the block, usage_count would get
558  * bumped again, resulting in too much favoritism for blocks that are
559  * involved in a prefetch sequence. A real fix would involve some
560  * additional per-buffer state, and it's not clear that there's enough of
561  * a problem to justify that.
562  */
563 
564  return result;
565 }
566 
567 /*
568  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
569  *
570  * This is named by analogy to ReadBuffer but doesn't actually allocate a
571  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
572  * block will not be delayed by the I/O. Prefetching is optional.
573  *
574  * There are three possible outcomes:
575  *
576  * 1. If the block is already cached, the result includes a valid buffer that
577  * could be used by the caller to avoid the need for a later buffer lookup, but
578  * it's not pinned, so the caller must recheck it.
579  *
580  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
581  * true. Currently there is no way to know if the data was already cached by
582  * the kernel and therefore didn't really initiate I/O, and no way to know when
583  * the I/O completes other than using synchronous ReadBuffer().
584  *
585  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and either
586  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
587  * lack of a kernel facility), or the underlying relation file wasn't found and
588  * we are in recovery. (If the relation file wasn't found and we are not in
589  * recovery, an error is raised).
590  */
593 {
594  Assert(RelationIsValid(reln));
595  Assert(BlockNumberIsValid(blockNum));
596 
597  if (RelationUsesLocalBuffers(reln))
598  {
599  /* see comments in ReadBufferExtended */
600  if (RELATION_IS_OTHER_TEMP(reln))
601  ereport(ERROR,
602  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
603  errmsg("cannot access temporary tables of other sessions")));
604 
605  /* pass it off to localbuf.c */
606  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
607  }
608  else
609  {
610  /* pass it to the shared buffer version */
611  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
612  }
613 }
614 
615 /*
616  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
617  *
618  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
619  * successful. Return true if the buffer is valid and still has the expected
620  * tag. In that case, the buffer is pinned and the usage count is bumped.
621  */
622 bool
624  Buffer recent_buffer)
625 {
626  BufferDesc *bufHdr;
627  BufferTag tag;
628  uint32 buf_state;
629  bool have_private_ref;
630 
631  Assert(BufferIsValid(recent_buffer));
632 
635  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
636 
637  if (BufferIsLocal(recent_buffer))
638  {
639  int b = -recent_buffer - 1;
640 
641  bufHdr = GetLocalBufferDescriptor(b);
642  buf_state = pg_atomic_read_u32(&bufHdr->state);
643 
644  /* Is it still valid and holding the right tag? */
645  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
646  {
647  /*
648  * Bump buffer's ref and usage counts. This is equivalent of
649  * PinBuffer for a shared buffer.
650  */
651  if (LocalRefCount[b] == 0)
652  {
654  {
655  buf_state += BUF_USAGECOUNT_ONE;
656  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
657  }
658  }
659  LocalRefCount[b]++;
661 
663 
664  return true;
665  }
666  }
667  else
668  {
669  bufHdr = GetBufferDescriptor(recent_buffer - 1);
670  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
671 
672  /*
673  * Do we already have this buffer pinned with a private reference? If
674  * so, it must be valid and it is safe to check the tag without
675  * locking. If not, we have to lock the header first and then check.
676  */
677  if (have_private_ref)
678  buf_state = pg_atomic_read_u32(&bufHdr->state);
679  else
680  buf_state = LockBufHdr(bufHdr);
681 
682  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
683  {
684  /*
685  * It's now safe to pin the buffer. We can't pin first and ask
686  * questions later, because it might confuse code paths like
687  * InvalidateBuffer() if we pinned a random non-matching buffer.
688  */
689  if (have_private_ref)
690  PinBuffer(bufHdr, NULL); /* bump pin count */
691  else
692  PinBuffer_Locked(bufHdr); /* pin for first time */
693 
695 
696  return true;
697  }
698 
699  /* If we locked the header above, now unlock. */
700  if (!have_private_ref)
701  UnlockBufHdr(bufHdr, buf_state);
702  }
703 
704  return false;
705 }
706 
707 /*
708  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
709  * fork with RBM_NORMAL mode and default strategy.
710  */
711 Buffer
713 {
714  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
715 }
716 
717 /*
718  * ReadBufferExtended -- returns a buffer containing the requested
719  * block of the requested relation. If the blknum
720  * requested is P_NEW, extend the relation file and
721  * allocate a new block. (Caller is responsible for
722  * ensuring that only one backend tries to extend a
723  * relation at the same time!)
724  *
725  * Returns: the buffer number for the buffer containing
726  * the block read. The returned buffer has been pinned.
727  * Does not return on error --- elog's instead.
728  *
729  * Assume when this function is called, that reln has been opened already.
730  *
731  * In RBM_NORMAL mode, the page is read from disk, and the page header is
732  * validated. An error is thrown if the page header is not valid. (But
733  * note that an all-zero page is considered "valid"; see
734  * PageIsVerifiedExtended().)
735  *
736  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
737  * valid, the page is zeroed instead of throwing an error. This is intended
738  * for non-critical data, where the caller is prepared to repair errors.
739  *
740  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
741  * filled with zeros instead of reading it from disk. Useful when the caller
742  * is going to fill the page from scratch, since this saves I/O and avoids
743  * unnecessary failure if the page-on-disk has corrupt page headers.
744  * The page is returned locked to ensure that the caller has a chance to
745  * initialize the page before it's made visible to others.
746  * Caution: do not use this mode to read a page that is beyond the relation's
747  * current physical EOF; that is likely to cause problems in md.c when
748  * the page is modified and written out. P_NEW is OK, though.
749  *
750  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
751  * a cleanup-strength lock on the page.
752  *
753  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
754  *
755  * If strategy is not NULL, a nondefault buffer access strategy is used.
756  * See buffer/README for details.
757  */
758 Buffer
761 {
762  bool hit;
763  Buffer buf;
764 
765  /*
766  * Reject attempts to read non-local temporary relations; we would be
767  * likely to get wrong data since we have no visibility into the owning
768  * session's local buffers.
769  */
770  if (RELATION_IS_OTHER_TEMP(reln))
771  ereport(ERROR,
772  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
773  errmsg("cannot access temporary tables of other sessions")));
774 
775  /*
776  * Read the buffer, and update pgstat counters to reflect a cache hit or
777  * miss.
778  */
780  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
781  forkNum, blockNum, mode, strategy, &hit);
782  if (hit)
784  return buf;
785 }
786 
787 
788 /*
789  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
790  * a relcache entry for the relation.
791  *
792  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
793  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
794  * cannot be used for temporary relations (and making that work might be
795  * difficult, unless we only want to read temporary relations for our own
796  * BackendId).
797  */
798 Buffer
800  BlockNumber blockNum, ReadBufferMode mode,
801  BufferAccessStrategy strategy, bool permanent)
802 {
803  bool hit;
804 
805  SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
806 
807  return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
808  RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
809  mode, strategy, &hit);
810 }
811 
812 
813 /*
814  * ReadBuffer_common -- common logic for all ReadBuffer variants
815  *
816  * *hit is set to true if the request was satisfied from shared buffer cache.
817  */
818 static Buffer
819 ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
820  BlockNumber blockNum, ReadBufferMode mode,
821  BufferAccessStrategy strategy, bool *hit)
822 {
823  BufferDesc *bufHdr;
824  Block bufBlock;
825  bool found;
826  bool isExtend;
827  bool isLocalBuf = SmgrIsTemp(smgr);
828 
829  *hit = false;
830 
831  /* Make sure we will have room to remember the buffer pin */
833 
834  isExtend = (blockNum == P_NEW);
835 
836  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
840  smgr->smgr_rlocator.backend,
841  isExtend);
842 
843  /* Substitute proper block number if caller asked for P_NEW */
844  if (isExtend)
845  {
846  blockNum = smgrnblocks(smgr, forkNum);
847  /* Fail if relation is already at maximum possible length */
848  if (blockNum == P_NEW)
849  ereport(ERROR,
850  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
851  errmsg("cannot extend relation %s beyond %u blocks",
852  relpath(smgr->smgr_rlocator, forkNum),
853  P_NEW)));
854  }
855 
856  if (isLocalBuf)
857  {
858  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
859  if (found)
861  else if (isExtend)
863  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
866  }
867  else
868  {
869  /*
870  * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
871  * not currently in memory.
872  */
873  bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
874  strategy, &found);
875  if (found)
877  else if (isExtend)
879  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
882  }
883 
884  /* At this point we do NOT hold any locks. */
885 
886  /* if it was already in the buffer pool, we're done */
887  if (found)
888  {
889  if (!isExtend)
890  {
891  /* Just need to update stats before we exit */
892  *hit = true;
893  VacuumPageHit++;
894 
895  if (VacuumCostActive)
897 
898  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
902  smgr->smgr_rlocator.backend,
903  isExtend,
904  found);
905 
906  /*
907  * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
908  * locked on return.
909  */
910  if (!isLocalBuf)
911  {
912  if (mode == RBM_ZERO_AND_LOCK)
914  LW_EXCLUSIVE);
915  else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
917  }
918 
919  return BufferDescriptorGetBuffer(bufHdr);
920  }
921 
922  /*
923  * We get here only in the corner case where we are trying to extend
924  * the relation but we found a pre-existing buffer marked BM_VALID.
925  * This can happen because mdread doesn't complain about reads beyond
926  * EOF (when zero_damaged_pages is ON) and so a previous attempt to
927  * read a block beyond EOF could have left a "valid" zero-filled
928  * buffer. Unfortunately, we have also seen this case occurring
929  * because of buggy Linux kernels that sometimes return an
930  * lseek(SEEK_END) result that doesn't account for a recent write. In
931  * that situation, the pre-existing buffer would contain valid data
932  * that we don't want to overwrite. Since the legitimate case should
933  * always have left a zero-filled buffer, complain if not PageIsNew.
934  */
935  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
936  if (!PageIsNew((Page) bufBlock))
937  ereport(ERROR,
938  (errmsg("unexpected data beyond EOF in block %u of relation %s",
939  blockNum, relpath(smgr->smgr_rlocator, forkNum)),
940  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
941 
942  /*
943  * We *must* do smgrextend before succeeding, else the page will not
944  * be reserved by the kernel, and the next P_NEW call will decide to
945  * return the same page. Clear the BM_VALID bit, do the StartBufferIO
946  * call that BufferAlloc didn't, and proceed.
947  */
948  if (isLocalBuf)
949  {
950  /* Only need to adjust flags */
951  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
952 
953  Assert(buf_state & BM_VALID);
954  buf_state &= ~BM_VALID;
955  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
956  }
957  else
958  {
959  /*
960  * Loop to handle the very small possibility that someone re-sets
961  * BM_VALID between our clearing it and StartBufferIO inspecting
962  * it.
963  */
964  do
965  {
966  uint32 buf_state = LockBufHdr(bufHdr);
967 
968  Assert(buf_state & BM_VALID);
969  buf_state &= ~BM_VALID;
970  UnlockBufHdr(bufHdr, buf_state);
971  } while (!StartBufferIO(bufHdr, true));
972  }
973  }
974 
975  /*
976  * if we have gotten to this point, we have allocated a buffer for the
977  * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
978  * if it's a shared buffer.
979  *
980  * Note: if smgrextend fails, we will end up with a buffer that is
981  * allocated but not marked BM_VALID. P_NEW will still select the same
982  * block number (because the relation didn't get any longer on disk) and
983  * so future attempts to extend the relation will find the same buffer (if
984  * it's not been recycled) but come right back here to try smgrextend
985  * again.
986  */
987  Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
988 
989  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
990 
991  if (isExtend)
992  {
993  /* new buffers are zero-filled */
994  MemSet((char *) bufBlock, 0, BLCKSZ);
995  /* don't set checksum for all-zero page */
996  smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false);
997 
998  /*
999  * NB: we're *not* doing a ScheduleBufferTagForWriteback here;
1000  * although we're essentially performing a write. At least on linux
1001  * doing so defeats the 'delayed allocation' mechanism, leading to
1002  * increased file fragmentation.
1003  */
1004  }
1005  else
1006  {
1007  /*
1008  * Read in the page, unless the caller intends to overwrite it and
1009  * just wants us to allocate a buffer.
1010  */
1012  MemSet((char *) bufBlock, 0, BLCKSZ);
1013  else
1014  {
1015  instr_time io_start,
1016  io_time;
1017 
1018  if (track_io_timing)
1019  INSTR_TIME_SET_CURRENT(io_start);
1020 
1021  smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
1022 
1023  if (track_io_timing)
1024  {
1025  INSTR_TIME_SET_CURRENT(io_time);
1026  INSTR_TIME_SUBTRACT(io_time, io_start);
1029  }
1030 
1031  /* check for garbage data */
1032  if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
1034  {
1036  {
1037  ereport(WARNING,
1039  errmsg("invalid page in block %u of relation %s; zeroing out page",
1040  blockNum,
1041  relpath(smgr->smgr_rlocator, forkNum))));
1042  MemSet((char *) bufBlock, 0, BLCKSZ);
1043  }
1044  else
1045  ereport(ERROR,
1047  errmsg("invalid page in block %u of relation %s",
1048  blockNum,
1049  relpath(smgr->smgr_rlocator, forkNum))));
1050  }
1051  }
1052  }
1053 
1054  /*
1055  * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
1056  * the page as valid, to make sure that no other backend sees the zeroed
1057  * page before the caller has had a chance to initialize it.
1058  *
1059  * Since no-one else can be looking at the page contents yet, there is no
1060  * difference between an exclusive lock and a cleanup-strength lock. (Note
1061  * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
1062  * they assert that the buffer is already valid.)
1063  */
1065  !isLocalBuf)
1066  {
1068  }
1069 
1070  if (isLocalBuf)
1071  {
1072  /* Only need to adjust flags */
1073  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1074 
1075  buf_state |= BM_VALID;
1076  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1077  }
1078  else
1079  {
1080  /* Set BM_VALID, terminate IO, and wake up any waiters */
1081  TerminateBufferIO(bufHdr, false, BM_VALID);
1082  }
1083 
1084  VacuumPageMiss++;
1085  if (VacuumCostActive)
1087 
1088  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1090  smgr->smgr_rlocator.locator.dbOid,
1092  smgr->smgr_rlocator.backend,
1093  isExtend,
1094  found);
1095 
1096  return BufferDescriptorGetBuffer(bufHdr);
1097 }
1098 
1099 /*
1100  * BufferAlloc -- subroutine for ReadBuffer. Handles lookup of a shared
1101  * buffer. If no buffer exists already, selects a replacement
1102  * victim and evicts the old page, but does NOT read in new page.
1103  *
1104  * "strategy" can be a buffer replacement strategy object, or NULL for
1105  * the default strategy. The selected buffer's usage_count is advanced when
1106  * using the default strategy, but otherwise possibly not (see PinBuffer).
1107  *
1108  * The returned buffer is pinned and is already marked as holding the
1109  * desired page. If it already did have the desired page, *foundPtr is
1110  * set true. Otherwise, *foundPtr is set false and the buffer is marked
1111  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
1112  *
1113  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
1114  * we keep it for simplicity in ReadBuffer.
1115  *
1116  * No locks are held either at entry or exit.
1117  */
1118 static BufferDesc *
1119 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1120  BlockNumber blockNum,
1121  BufferAccessStrategy strategy,
1122  bool *foundPtr)
1123 {
1124  BufferTag newTag; /* identity of requested block */
1125  uint32 newHash; /* hash value for newTag */
1126  LWLock *newPartitionLock; /* buffer partition lock for it */
1127  BufferTag oldTag; /* previous identity of selected buffer */
1128  uint32 oldHash; /* hash value for oldTag */
1129  LWLock *oldPartitionLock; /* buffer partition lock for it */
1130  uint32 oldFlags;
1131  int buf_id;
1132  BufferDesc *buf;
1133  bool valid;
1134  uint32 buf_state;
1135 
1136  /* create a tag so we can lookup the buffer */
1137  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
1138 
1139  /* determine its hash code and partition lock ID */
1140  newHash = BufTableHashCode(&newTag);
1141  newPartitionLock = BufMappingPartitionLock(newHash);
1142 
1143  /* see if the block is in the buffer pool already */
1144  LWLockAcquire(newPartitionLock, LW_SHARED);
1145  buf_id = BufTableLookup(&newTag, newHash);
1146  if (buf_id >= 0)
1147  {
1148  /*
1149  * Found it. Now, pin the buffer so no one can steal it from the
1150  * buffer pool, and check to see if the correct data has been loaded
1151  * into the buffer.
1152  */
1153  buf = GetBufferDescriptor(buf_id);
1154 
1155  valid = PinBuffer(buf, strategy);
1156 
1157  /* Can release the mapping lock as soon as we've pinned it */
1158  LWLockRelease(newPartitionLock);
1159 
1160  *foundPtr = true;
1161 
1162  if (!valid)
1163  {
1164  /*
1165  * We can only get here if (a) someone else is still reading in
1166  * the page, or (b) a previous read attempt failed. We have to
1167  * wait for any active read attempt to finish, and then set up our
1168  * own read attempt if the page is still not BM_VALID.
1169  * StartBufferIO does it all.
1170  */
1171  if (StartBufferIO(buf, true))
1172  {
1173  /*
1174  * If we get here, previous attempts to read the buffer must
1175  * have failed ... but we shall bravely try again.
1176  */
1177  *foundPtr = false;
1178  }
1179  }
1180 
1181  return buf;
1182  }
1183 
1184  /*
1185  * Didn't find it in the buffer pool. We'll have to initialize a new
1186  * buffer. Remember to unlock the mapping lock while doing the work.
1187  */
1188  LWLockRelease(newPartitionLock);
1189 
1190  /* Loop here in case we have to try another victim buffer */
1191  for (;;)
1192  {
1193  /*
1194  * Ensure, while the spinlock's not yet held, that there's a free
1195  * refcount entry.
1196  */
1198 
1199  /*
1200  * Select a victim buffer. The buffer is returned with its header
1201  * spinlock still held!
1202  */
1203  buf = StrategyGetBuffer(strategy, &buf_state);
1204 
1205  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1206 
1207  /* Must copy buffer flags while we still hold the spinlock */
1208  oldFlags = buf_state & BUF_FLAG_MASK;
1209 
1210  /* Pin the buffer and then release the buffer spinlock */
1212 
1213  /*
1214  * If the buffer was dirty, try to write it out. There is a race
1215  * condition here, in that someone might dirty it after we released it
1216  * above, or even while we are writing it out (since our share-lock
1217  * won't prevent hint-bit updates). We will recheck the dirty bit
1218  * after re-locking the buffer header.
1219  */
1220  if (oldFlags & BM_DIRTY)
1221  {
1222  /*
1223  * We need a share-lock on the buffer contents to write it out
1224  * (else we might write invalid data, eg because someone else is
1225  * compacting the page contents while we write). We must use a
1226  * conditional lock acquisition here to avoid deadlock. Even
1227  * though the buffer was not pinned (and therefore surely not
1228  * locked) when StrategyGetBuffer returned it, someone else could
1229  * have pinned and exclusive-locked it by the time we get here. If
1230  * we try to get the lock unconditionally, we'd block waiting for
1231  * them; if they later block waiting for us, deadlock ensues.
1232  * (This has been observed to happen when two backends are both
1233  * trying to split btree index pages, and the second one just
1234  * happens to be trying to split the page the first one got from
1235  * StrategyGetBuffer.)
1236  */
1238  LW_SHARED))
1239  {
1240  /*
1241  * If using a nondefault strategy, and writing the buffer
1242  * would require a WAL flush, let the strategy decide whether
1243  * to go ahead and write/reuse the buffer or to choose another
1244  * victim. We need lock to inspect the page LSN, so this
1245  * can't be done inside StrategyGetBuffer.
1246  */
1247  if (strategy != NULL)
1248  {
1249  XLogRecPtr lsn;
1250 
1251  /* Read the LSN while holding buffer header lock */
1252  buf_state = LockBufHdr(buf);
1253  lsn = BufferGetLSN(buf);
1254  UnlockBufHdr(buf, buf_state);
1255 
1256  if (XLogNeedsFlush(lsn) &&
1257  StrategyRejectBuffer(strategy, buf))
1258  {
1259  /* Drop lock/pin and loop around for another buffer */
1261  UnpinBuffer(buf, true);
1262  continue;
1263  }
1264  }
1265 
1266  /* OK, do the I/O */
1267  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
1269  smgr->smgr_rlocator.locator.dbOid,
1271 
1272  FlushBuffer(buf, NULL);
1274 
1276  &buf->tag);
1277 
1278  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
1280  smgr->smgr_rlocator.locator.dbOid,
1282  }
1283  else
1284  {
1285  /*
1286  * Someone else has locked the buffer, so give it up and loop
1287  * back to get another one.
1288  */
1289  UnpinBuffer(buf, true);
1290  continue;
1291  }
1292  }
1293 
1294  /*
1295  * To change the association of a valid buffer, we'll need to have
1296  * exclusive lock on both the old and new mapping partitions.
1297  */
1298  if (oldFlags & BM_TAG_VALID)
1299  {
1300  /*
1301  * Need to compute the old tag's hashcode and partition lock ID.
1302  * XXX is it worth storing the hashcode in BufferDesc so we need
1303  * not recompute it here? Probably not.
1304  */
1305  oldTag = buf->tag;
1306  oldHash = BufTableHashCode(&oldTag);
1307  oldPartitionLock = BufMappingPartitionLock(oldHash);
1308 
1309  /*
1310  * Must lock the lower-numbered partition first to avoid
1311  * deadlocks.
1312  */
1313  if (oldPartitionLock < newPartitionLock)
1314  {
1315  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1316  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1317  }
1318  else if (oldPartitionLock > newPartitionLock)
1319  {
1320  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1321  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1322  }
1323  else
1324  {
1325  /* only one partition, only one lock */
1326  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1327  }
1328  }
1329  else
1330  {
1331  /* if it wasn't valid, we need only the new partition */
1332  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1333  /* remember we have no old-partition lock or tag */
1334  oldPartitionLock = NULL;
1335  /* keep the compiler quiet about uninitialized variables */
1336  oldHash = 0;
1337  }
1338 
1339  /*
1340  * Try to make a hashtable entry for the buffer under its new tag.
1341  * This could fail because while we were writing someone else
1342  * allocated another buffer for the same block we want to read in.
1343  * Note that we have not yet removed the hashtable entry for the old
1344  * tag.
1345  */
1346  buf_id = BufTableInsert(&newTag, newHash, buf->buf_id);
1347 
1348  if (buf_id >= 0)
1349  {
1350  /*
1351  * Got a collision. Someone has already done what we were about to
1352  * do. We'll just handle this as if it were found in the buffer
1353  * pool in the first place. First, give up the buffer we were
1354  * planning to use.
1355  */
1356  UnpinBuffer(buf, true);
1357 
1358  /* Can give up that buffer's mapping partition lock now */
1359  if (oldPartitionLock != NULL &&
1360  oldPartitionLock != newPartitionLock)
1361  LWLockRelease(oldPartitionLock);
1362 
1363  /* remaining code should match code at top of routine */
1364 
1365  buf = GetBufferDescriptor(buf_id);
1366 
1367  valid = PinBuffer(buf, strategy);
1368 
1369  /* Can release the mapping lock as soon as we've pinned it */
1370  LWLockRelease(newPartitionLock);
1371 
1372  *foundPtr = true;
1373 
1374  if (!valid)
1375  {
1376  /*
1377  * We can only get here if (a) someone else is still reading
1378  * in the page, or (b) a previous read attempt failed. We
1379  * have to wait for any active read attempt to finish, and
1380  * then set up our own read attempt if the page is still not
1381  * BM_VALID. StartBufferIO does it all.
1382  */
1383  if (StartBufferIO(buf, true))
1384  {
1385  /*
1386  * If we get here, previous attempts to read the buffer
1387  * must have failed ... but we shall bravely try again.
1388  */
1389  *foundPtr = false;
1390  }
1391  }
1392 
1393  return buf;
1394  }
1395 
1396  /*
1397  * Need to lock the buffer header too in order to change its tag.
1398  */
1399  buf_state = LockBufHdr(buf);
1400 
1401  /*
1402  * Somebody could have pinned or re-dirtied the buffer while we were
1403  * doing the I/O and making the new hashtable entry. If so, we can't
1404  * recycle this buffer; we must undo everything we've done and start
1405  * over with a new victim buffer.
1406  */
1407  oldFlags = buf_state & BUF_FLAG_MASK;
1408  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1 && !(oldFlags & BM_DIRTY))
1409  break;
1410 
1411  UnlockBufHdr(buf, buf_state);
1412  BufTableDelete(&newTag, newHash);
1413  if (oldPartitionLock != NULL &&
1414  oldPartitionLock != newPartitionLock)
1415  LWLockRelease(oldPartitionLock);
1416  LWLockRelease(newPartitionLock);
1417  UnpinBuffer(buf, true);
1418  }
1419 
1420  /*
1421  * Okay, it's finally safe to rename the buffer.
1422  *
1423  * Clearing BM_VALID here is necessary, clearing the dirtybits is just
1424  * paranoia. We also reset the usage_count since any recency of use of
1425  * the old content is no longer relevant. (The usage_count starts out at
1426  * 1 so that the buffer can survive one clock-sweep pass.)
1427  *
1428  * Make sure BM_PERMANENT is set for buffers that must be written at every
1429  * checkpoint. Unlogged buffers only need to be written at shutdown
1430  * checkpoints, except for their "init" forks, which need to be treated
1431  * just like permanent relations.
1432  */
1433  buf->tag = newTag;
1434  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED |
1437  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1438  buf_state |= BM_TAG_VALID | BM_PERMANENT | BUF_USAGECOUNT_ONE;
1439  else
1440  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1441 
1442  UnlockBufHdr(buf, buf_state);
1443 
1444  if (oldPartitionLock != NULL)
1445  {
1446  BufTableDelete(&oldTag, oldHash);
1447  if (oldPartitionLock != newPartitionLock)
1448  LWLockRelease(oldPartitionLock);
1449  }
1450 
1451  LWLockRelease(newPartitionLock);
1452 
1453  /*
1454  * Buffer contents are currently invalid. Try to obtain the right to
1455  * start I/O. If StartBufferIO returns false, then someone else managed
1456  * to read it before we did, so there's nothing left for BufferAlloc() to
1457  * do.
1458  */
1459  if (StartBufferIO(buf, true))
1460  *foundPtr = false;
1461  else
1462  *foundPtr = true;
1463 
1464  return buf;
1465 }
1466 
1467 /*
1468  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1469  * freelist.
1470  *
1471  * The buffer header spinlock must be held at entry. We drop it before
1472  * returning. (This is sane because the caller must have locked the
1473  * buffer in order to be sure it should be dropped.)
1474  *
1475  * This is used only in contexts such as dropping a relation. We assume
1476  * that no other backend could possibly be interested in using the page,
1477  * so the only reason the buffer might be pinned is if someone else is
1478  * trying to write it out. We have to let them finish before we can
1479  * reclaim the buffer.
1480  *
1481  * The buffer could get reclaimed by someone else while we are waiting
1482  * to acquire the necessary locks; if so, don't mess it up.
1483  */
1484 static void
1486 {
1487  BufferTag oldTag;
1488  uint32 oldHash; /* hash value for oldTag */
1489  LWLock *oldPartitionLock; /* buffer partition lock for it */
1490  uint32 oldFlags;
1491  uint32 buf_state;
1492 
1493  /* Save the original buffer tag before dropping the spinlock */
1494  oldTag = buf->tag;
1495 
1496  buf_state = pg_atomic_read_u32(&buf->state);
1497  Assert(buf_state & BM_LOCKED);
1498  UnlockBufHdr(buf, buf_state);
1499 
1500  /*
1501  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1502  * worth storing the hashcode in BufferDesc so we need not recompute it
1503  * here? Probably not.
1504  */
1505  oldHash = BufTableHashCode(&oldTag);
1506  oldPartitionLock = BufMappingPartitionLock(oldHash);
1507 
1508 retry:
1509 
1510  /*
1511  * Acquire exclusive mapping lock in preparation for changing the buffer's
1512  * association.
1513  */
1514  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1515 
1516  /* Re-lock the buffer header */
1517  buf_state = LockBufHdr(buf);
1518 
1519  /* If it's changed while we were waiting for lock, do nothing */
1520  if (!BufferTagsEqual(&buf->tag, &oldTag))
1521  {
1522  UnlockBufHdr(buf, buf_state);
1523  LWLockRelease(oldPartitionLock);
1524  return;
1525  }
1526 
1527  /*
1528  * We assume the only reason for it to be pinned is that someone else is
1529  * flushing the page out. Wait for them to finish. (This could be an
1530  * infinite loop if the refcount is messed up... it would be nice to time
1531  * out after awhile, but there seems no way to be sure how many loops may
1532  * be needed. Note that if the other guy has pinned the buffer but not
1533  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1534  * be busy-looping here.)
1535  */
1536  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1537  {
1538  UnlockBufHdr(buf, buf_state);
1539  LWLockRelease(oldPartitionLock);
1540  /* safety check: should definitely not be our *own* pin */
1542  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1543  WaitIO(buf);
1544  goto retry;
1545  }
1546 
1547  /*
1548  * Clear out the buffer's tag and flags. We must do this to ensure that
1549  * linear scans of the buffer array don't think the buffer is valid.
1550  */
1551  oldFlags = buf_state & BUF_FLAG_MASK;
1552  ClearBufferTag(&buf->tag);
1553  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1554  UnlockBufHdr(buf, buf_state);
1555 
1556  /*
1557  * Remove the buffer from the lookup hashtable, if it was in there.
1558  */
1559  if (oldFlags & BM_TAG_VALID)
1560  BufTableDelete(&oldTag, oldHash);
1561 
1562  /*
1563  * Done with mapping lock.
1564  */
1565  LWLockRelease(oldPartitionLock);
1566 
1567  /*
1568  * Insert the buffer at the head of the list of free buffers.
1569  */
1571 }
1572 
1573 /*
1574  * MarkBufferDirty
1575  *
1576  * Marks buffer contents as dirty (actual write happens later).
1577  *
1578  * Buffer must be pinned and exclusive-locked. (If caller does not hold
1579  * exclusive lock, then somebody could be in process of writing the buffer,
1580  * leading to risk of bad data written to disk.)
1581  */
1582 void
1584 {
1585  BufferDesc *bufHdr;
1586  uint32 buf_state;
1587  uint32 old_buf_state;
1588 
1589  if (!BufferIsValid(buffer))
1590  elog(ERROR, "bad buffer ID: %d", buffer);
1591 
1592  if (BufferIsLocal(buffer))
1593  {
1595  return;
1596  }
1597 
1598  bufHdr = GetBufferDescriptor(buffer - 1);
1599 
1602  LW_EXCLUSIVE));
1603 
1604  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1605  for (;;)
1606  {
1607  if (old_buf_state & BM_LOCKED)
1608  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1609 
1610  buf_state = old_buf_state;
1611 
1612  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1613  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1614 
1615  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1616  buf_state))
1617  break;
1618  }
1619 
1620  /*
1621  * If the buffer was not dirty already, do vacuum accounting.
1622  */
1623  if (!(old_buf_state & BM_DIRTY))
1624  {
1625  VacuumPageDirty++;
1627  if (VacuumCostActive)
1629  }
1630 }
1631 
1632 /*
1633  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
1634  *
1635  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
1636  * compared to calling the two routines separately. Now it's mainly just
1637  * a convenience function. However, if the passed buffer is valid and
1638  * already contains the desired block, we just return it as-is; and that
1639  * does save considerable work compared to a full release and reacquire.
1640  *
1641  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
1642  * buffer actually needs to be released. This case is the same as ReadBuffer,
1643  * but can save some tests in the caller.
1644  */
1645 Buffer
1647  Relation relation,
1648  BlockNumber blockNum)
1649 {
1650  ForkNumber forkNum = MAIN_FORKNUM;
1651  BufferDesc *bufHdr;
1652 
1653  if (BufferIsValid(buffer))
1654  {
1656  if (BufferIsLocal(buffer))
1657  {
1658  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1659  if (bufHdr->tag.blockNum == blockNum &&
1660  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
1661  BufTagGetForkNum(&bufHdr->tag) == forkNum)
1662  return buffer;
1664  LocalRefCount[-buffer - 1]--;
1665  }
1666  else
1667  {
1668  bufHdr = GetBufferDescriptor(buffer - 1);
1669  /* we have pin, so it's ok to examine tag without spinlock */
1670  if (bufHdr->tag.blockNum == blockNum &&
1671  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
1672  BufTagGetForkNum(&bufHdr->tag) == forkNum)
1673  return buffer;
1674  UnpinBuffer(bufHdr, true);
1675  }
1676  }
1677 
1678  return ReadBuffer(relation, blockNum);
1679 }
1680 
1681 /*
1682  * PinBuffer -- make buffer unavailable for replacement.
1683  *
1684  * For the default access strategy, the buffer's usage_count is incremented
1685  * when we first pin it; for other strategies we just make sure the usage_count
1686  * isn't zero. (The idea of the latter is that we don't want synchronized
1687  * heap scans to inflate the count, but we need it to not be zero to discourage
1688  * other backends from stealing buffers from our ring. As long as we cycle
1689  * through the ring faster than the global clock-sweep cycles, buffers in
1690  * our ring won't be chosen as victims for replacement by other backends.)
1691  *
1692  * This should be applied only to shared buffers, never local ones.
1693  *
1694  * Since buffers are pinned/unpinned very frequently, pin buffers without
1695  * taking the buffer header lock; instead update the state variable in loop of
1696  * CAS operations. Hopefully it's just a single CAS.
1697  *
1698  * Note that ResourceOwnerEnlargeBuffers must have been done already.
1699  *
1700  * Returns true if buffer is BM_VALID, else false. This provision allows
1701  * some callers to avoid an extra spinlock cycle.
1702  */
1703 static bool
1705 {
1707  bool result;
1708  PrivateRefCountEntry *ref;
1709 
1710  ref = GetPrivateRefCountEntry(b, true);
1711 
1712  if (ref == NULL)
1713  {
1714  uint32 buf_state;
1715  uint32 old_buf_state;
1716 
1718  ref = NewPrivateRefCountEntry(b);
1719 
1720  old_buf_state = pg_atomic_read_u32(&buf->state);
1721  for (;;)
1722  {
1723  if (old_buf_state & BM_LOCKED)
1724  old_buf_state = WaitBufHdrUnlocked(buf);
1725 
1726  buf_state = old_buf_state;
1727 
1728  /* increase refcount */
1729  buf_state += BUF_REFCOUNT_ONE;
1730 
1731  if (strategy == NULL)
1732  {
1733  /* Default case: increase usagecount unless already max. */
1735  buf_state += BUF_USAGECOUNT_ONE;
1736  }
1737  else
1738  {
1739  /*
1740  * Ring buffers shouldn't evict others from pool. Thus we
1741  * don't make usagecount more than 1.
1742  */
1743  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
1744  buf_state += BUF_USAGECOUNT_ONE;
1745  }
1746 
1747  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1748  buf_state))
1749  {
1750  result = (buf_state & BM_VALID) != 0;
1751 
1752  /*
1753  * Assume that we acquired a buffer pin for the purposes of
1754  * Valgrind buffer client checks (even in !result case) to
1755  * keep things simple. Buffers that are unsafe to access are
1756  * not generally guaranteed to be marked undefined or
1757  * non-accessible in any case.
1758  */
1760  break;
1761  }
1762  }
1763  }
1764  else
1765  {
1766  /*
1767  * If we previously pinned the buffer, it must surely be valid.
1768  *
1769  * Note: We deliberately avoid a Valgrind client request here.
1770  * Individual access methods can optionally superimpose buffer page
1771  * client requests on top of our client requests to enforce that
1772  * buffers are only accessed while locked (and pinned). It's possible
1773  * that the buffer page is legitimately non-accessible here. We
1774  * cannot meddle with that.
1775  */
1776  result = true;
1777  }
1778 
1779  ref->refcount++;
1780  Assert(ref->refcount > 0);
1782  return result;
1783 }
1784 
1785 /*
1786  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
1787  * The spinlock is released before return.
1788  *
1789  * As this function is called with the spinlock held, the caller has to
1790  * previously call ReservePrivateRefCountEntry().
1791  *
1792  * Currently, no callers of this function want to modify the buffer's
1793  * usage_count at all, so there's no need for a strategy parameter.
1794  * Also we don't bother with a BM_VALID test (the caller could check that for
1795  * itself).
1796  *
1797  * Also all callers only ever use this function when it's known that the
1798  * buffer can't have a preexisting pin by this backend. That allows us to skip
1799  * searching the private refcount array & hash, which is a boon, because the
1800  * spinlock is still held.
1801  *
1802  * Note: use of this routine is frequently mandatory, not just an optimization
1803  * to save a spin lock/unlock cycle, because we need to pin a buffer before
1804  * its state can change under us.
1805  */
1806 static void
1808 {
1809  Buffer b;
1810  PrivateRefCountEntry *ref;
1811  uint32 buf_state;
1812 
1813  /*
1814  * As explained, We don't expect any preexisting pins. That allows us to
1815  * manipulate the PrivateRefCount after releasing the spinlock
1816  */
1818 
1819  /*
1820  * Buffer can't have a preexisting pin, so mark its page as defined to
1821  * Valgrind (this is similar to the PinBuffer() case where the backend
1822  * doesn't already have a buffer pin)
1823  */
1825 
1826  /*
1827  * Since we hold the buffer spinlock, we can update the buffer state and
1828  * release the lock in one operation.
1829  */
1830  buf_state = pg_atomic_read_u32(&buf->state);
1831  Assert(buf_state & BM_LOCKED);
1832  buf_state += BUF_REFCOUNT_ONE;
1833  UnlockBufHdr(buf, buf_state);
1834 
1836 
1837  ref = NewPrivateRefCountEntry(b);
1838  ref->refcount++;
1839 
1841 }
1842 
1843 /*
1844  * UnpinBuffer -- make buffer available for replacement.
1845  *
1846  * This should be applied only to shared buffers, never local ones.
1847  *
1848  * Most but not all callers want CurrentResourceOwner to be adjusted.
1849  * Those that don't should pass fixOwner = false.
1850  */
1851 static void
1852 UnpinBuffer(BufferDesc *buf, bool fixOwner)
1853 {
1854  PrivateRefCountEntry *ref;
1856 
1857  /* not moving as we're likely deleting it soon anyway */
1858  ref = GetPrivateRefCountEntry(b, false);
1859  Assert(ref != NULL);
1860 
1861  if (fixOwner)
1863 
1864  Assert(ref->refcount > 0);
1865  ref->refcount--;
1866  if (ref->refcount == 0)
1867  {
1868  uint32 buf_state;
1869  uint32 old_buf_state;
1870 
1871  /*
1872  * Mark buffer non-accessible to Valgrind.
1873  *
1874  * Note that the buffer may have already been marked non-accessible
1875  * within access method code that enforces that buffers are only
1876  * accessed while a buffer lock is held.
1877  */
1879 
1880  /* I'd better not still hold the buffer content lock */
1882 
1883  /*
1884  * Decrement the shared reference count.
1885  *
1886  * Since buffer spinlock holder can update status using just write,
1887  * it's not safe to use atomic decrement here; thus use a CAS loop.
1888  */
1889  old_buf_state = pg_atomic_read_u32(&buf->state);
1890  for (;;)
1891  {
1892  if (old_buf_state & BM_LOCKED)
1893  old_buf_state = WaitBufHdrUnlocked(buf);
1894 
1895  buf_state = old_buf_state;
1896 
1897  buf_state -= BUF_REFCOUNT_ONE;
1898 
1899  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1900  buf_state))
1901  break;
1902  }
1903 
1904  /* Support LockBufferForCleanup() */
1905  if (buf_state & BM_PIN_COUNT_WAITER)
1906  {
1907  /*
1908  * Acquire the buffer header lock, re-check that there's a waiter.
1909  * Another backend could have unpinned this buffer, and already
1910  * woken up the waiter. There's no danger of the buffer being
1911  * replaced after we unpinned it above, as it's pinned by the
1912  * waiter.
1913  */
1914  buf_state = LockBufHdr(buf);
1915 
1916  if ((buf_state & BM_PIN_COUNT_WAITER) &&
1917  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
1918  {
1919  /* we just released the last pin other than the waiter's */
1920  int wait_backend_pgprocno = buf->wait_backend_pgprocno;
1921 
1922  buf_state &= ~BM_PIN_COUNT_WAITER;
1923  UnlockBufHdr(buf, buf_state);
1924  ProcSendSignal(wait_backend_pgprocno);
1925  }
1926  else
1927  UnlockBufHdr(buf, buf_state);
1928  }
1930  }
1931 }
1932 
1933 #define ST_SORT sort_checkpoint_bufferids
1934 #define ST_ELEMENT_TYPE CkptSortItem
1935 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
1936 #define ST_SCOPE static
1937 #define ST_DEFINE
1938 #include <lib/sort_template.h>
1939 
1940 /*
1941  * BufferSync -- Write out all dirty buffers in the pool.
1942  *
1943  * This is called at checkpoint time to write out all dirty shared buffers.
1944  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
1945  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
1946  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
1947  * unlogged buffers, which are otherwise skipped. The remaining flags
1948  * currently have no effect here.
1949  */
1950 static void
1951 BufferSync(int flags)
1952 {
1953  uint32 buf_state;
1954  int buf_id;
1955  int num_to_scan;
1956  int num_spaces;
1957  int num_processed;
1958  int num_written;
1959  CkptTsStatus *per_ts_stat = NULL;
1960  Oid last_tsid;
1961  binaryheap *ts_heap;
1962  int i;
1963  int mask = BM_DIRTY;
1964  WritebackContext wb_context;
1965 
1966  /* Make sure we can handle the pin inside SyncOneBuffer */
1968 
1969  /*
1970  * Unless this is a shutdown checkpoint or we have been explicitly told,
1971  * we write only permanent, dirty buffers. But at shutdown or end of
1972  * recovery, we write all dirty buffers.
1973  */
1976  mask |= BM_PERMANENT;
1977 
1978  /*
1979  * Loop over all buffers, and mark the ones that need to be written with
1980  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
1981  * can estimate how much work needs to be done.
1982  *
1983  * This allows us to write only those pages that were dirty when the
1984  * checkpoint began, and not those that get dirtied while it proceeds.
1985  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
1986  * later in this function, or by normal backends or the bgwriter cleaning
1987  * scan, the flag is cleared. Any buffer dirtied after this point won't
1988  * have the flag set.
1989  *
1990  * Note that if we fail to write some buffer, we may leave buffers with
1991  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
1992  * certainly need to be written for the next checkpoint attempt, too.
1993  */
1994  num_to_scan = 0;
1995  for (buf_id = 0; buf_id < NBuffers; buf_id++)
1996  {
1997  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
1998 
1999  /*
2000  * Header spinlock is enough to examine BM_DIRTY, see comment in
2001  * SyncOneBuffer.
2002  */
2003  buf_state = LockBufHdr(bufHdr);
2004 
2005  if ((buf_state & mask) == mask)
2006  {
2007  CkptSortItem *item;
2008 
2009  buf_state |= BM_CHECKPOINT_NEEDED;
2010 
2011  item = &CkptBufferIds[num_to_scan++];
2012  item->buf_id = buf_id;
2013  item->tsId = bufHdr->tag.spcOid;
2014  item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
2015  item->forkNum = BufTagGetForkNum(&bufHdr->tag);
2016  item->blockNum = bufHdr->tag.blockNum;
2017  }
2018 
2019  UnlockBufHdr(bufHdr, buf_state);
2020 
2021  /* Check for barrier events in case NBuffers is large. */
2024  }
2025 
2026  if (num_to_scan == 0)
2027  return; /* nothing to do */
2028 
2030 
2031  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2032 
2033  /*
2034  * Sort buffers that need to be written to reduce the likelihood of random
2035  * IO. The sorting is also important for the implementation of balancing
2036  * writes between tablespaces. Without balancing writes we'd potentially
2037  * end up writing to the tablespaces one-by-one; possibly overloading the
2038  * underlying system.
2039  */
2040  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2041 
2042  num_spaces = 0;
2043 
2044  /*
2045  * Allocate progress status for each tablespace with buffers that need to
2046  * be flushed. This requires the to-be-flushed array to be sorted.
2047  */
2048  last_tsid = InvalidOid;
2049  for (i = 0; i < num_to_scan; i++)
2050  {
2051  CkptTsStatus *s;
2052  Oid cur_tsid;
2053 
2054  cur_tsid = CkptBufferIds[i].tsId;
2055 
2056  /*
2057  * Grow array of per-tablespace status structs, every time a new
2058  * tablespace is found.
2059  */
2060  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
2061  {
2062  Size sz;
2063 
2064  num_spaces++;
2065 
2066  /*
2067  * Not worth adding grow-by-power-of-2 logic here - even with a
2068  * few hundred tablespaces this should be fine.
2069  */
2070  sz = sizeof(CkptTsStatus) * num_spaces;
2071 
2072  if (per_ts_stat == NULL)
2073  per_ts_stat = (CkptTsStatus *) palloc(sz);
2074  else
2075  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
2076 
2077  s = &per_ts_stat[num_spaces - 1];
2078  memset(s, 0, sizeof(*s));
2079  s->tsId = cur_tsid;
2080 
2081  /*
2082  * The first buffer in this tablespace. As CkptBufferIds is sorted
2083  * by tablespace all (s->num_to_scan) buffers in this tablespace
2084  * will follow afterwards.
2085  */
2086  s->index = i;
2087 
2088  /*
2089  * progress_slice will be determined once we know how many buffers
2090  * are in each tablespace, i.e. after this loop.
2091  */
2092 
2093  last_tsid = cur_tsid;
2094  }
2095  else
2096  {
2097  s = &per_ts_stat[num_spaces - 1];
2098  }
2099 
2100  s->num_to_scan++;
2101 
2102  /* Check for barrier events. */
2105  }
2106 
2107  Assert(num_spaces > 0);
2108 
2109  /*
2110  * Build a min-heap over the write-progress in the individual tablespaces,
2111  * and compute how large a portion of the total progress a single
2112  * processed buffer is.
2113  */
2114  ts_heap = binaryheap_allocate(num_spaces,
2116  NULL);
2117 
2118  for (i = 0; i < num_spaces; i++)
2119  {
2120  CkptTsStatus *ts_stat = &per_ts_stat[i];
2121 
2122  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
2123 
2124  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
2125  }
2126 
2127  binaryheap_build(ts_heap);
2128 
2129  /*
2130  * Iterate through to-be-checkpointed buffers and write the ones (still)
2131  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
2132  * tablespaces; otherwise the sorting would lead to only one tablespace
2133  * receiving writes at a time, making inefficient use of the hardware.
2134  */
2135  num_processed = 0;
2136  num_written = 0;
2137  while (!binaryheap_empty(ts_heap))
2138  {
2139  BufferDesc *bufHdr = NULL;
2140  CkptTsStatus *ts_stat = (CkptTsStatus *)
2142 
2143  buf_id = CkptBufferIds[ts_stat->index].buf_id;
2144  Assert(buf_id != -1);
2145 
2146  bufHdr = GetBufferDescriptor(buf_id);
2147 
2148  num_processed++;
2149 
2150  /*
2151  * We don't need to acquire the lock here, because we're only looking
2152  * at a single bit. It's possible that someone else writes the buffer
2153  * and clears the flag right after we check, but that doesn't matter
2154  * since SyncOneBuffer will then do nothing. However, there is a
2155  * further race condition: it's conceivable that between the time we
2156  * examine the bit here and the time SyncOneBuffer acquires the lock,
2157  * someone else not only wrote the buffer but replaced it with another
2158  * page and dirtied it. In that improbable case, SyncOneBuffer will
2159  * write the buffer though we didn't need to. It doesn't seem worth
2160  * guarding against this, though.
2161  */
2163  {
2164  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
2165  {
2166  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
2168  num_written++;
2169  }
2170  }
2171 
2172  /*
2173  * Measure progress independent of actually having to flush the buffer
2174  * - otherwise writing become unbalanced.
2175  */
2176  ts_stat->progress += ts_stat->progress_slice;
2177  ts_stat->num_scanned++;
2178  ts_stat->index++;
2179 
2180  /* Have all the buffers from the tablespace been processed? */
2181  if (ts_stat->num_scanned == ts_stat->num_to_scan)
2182  {
2183  binaryheap_remove_first(ts_heap);
2184  }
2185  else
2186  {
2187  /* update heap with the new progress */
2188  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
2189  }
2190 
2191  /*
2192  * Sleep to throttle our I/O rate.
2193  *
2194  * (This will check for barrier events even if it doesn't sleep.)
2195  */
2196  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
2197  }
2198 
2199  /* issue all pending flushes */
2200  IssuePendingWritebacks(&wb_context);
2201 
2202  pfree(per_ts_stat);
2203  per_ts_stat = NULL;
2204  binaryheap_free(ts_heap);
2205 
2206  /*
2207  * Update checkpoint statistics. As noted above, this doesn't include
2208  * buffers written by other backends or bgwriter scan.
2209  */
2210  CheckpointStats.ckpt_bufs_written += num_written;
2211 
2212  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
2213 }
2214 
2215 /*
2216  * BgBufferSync -- Write out some dirty buffers in the pool.
2217  *
2218  * This is called periodically by the background writer process.
2219  *
2220  * Returns true if it's appropriate for the bgwriter process to go into
2221  * low-power hibernation mode. (This happens if the strategy clock sweep
2222  * has been "lapped" and no buffer allocations have occurred recently,
2223  * or if the bgwriter has been effectively disabled by setting
2224  * bgwriter_lru_maxpages to 0.)
2225  */
2226 bool
2228 {
2229  /* info obtained from freelist.c */
2230  int strategy_buf_id;
2231  uint32 strategy_passes;
2232  uint32 recent_alloc;
2233 
2234  /*
2235  * Information saved between calls so we can determine the strategy
2236  * point's advance rate and avoid scanning already-cleaned buffers.
2237  */
2238  static bool saved_info_valid = false;
2239  static int prev_strategy_buf_id;
2240  static uint32 prev_strategy_passes;
2241  static int next_to_clean;
2242  static uint32 next_passes;
2243 
2244  /* Moving averages of allocation rate and clean-buffer density */
2245  static float smoothed_alloc = 0;
2246  static float smoothed_density = 10.0;
2247 
2248  /* Potentially these could be tunables, but for now, not */
2249  float smoothing_samples = 16;
2250  float scan_whole_pool_milliseconds = 120000.0;
2251 
2252  /* Used to compute how far we scan ahead */
2253  long strategy_delta;
2254  int bufs_to_lap;
2255  int bufs_ahead;
2256  float scans_per_alloc;
2257  int reusable_buffers_est;
2258  int upcoming_alloc_est;
2259  int min_scan_buffers;
2260 
2261  /* Variables for the scanning loop proper */
2262  int num_to_scan;
2263  int num_written;
2264  int reusable_buffers;
2265 
2266  /* Variables for final smoothed_density update */
2267  long new_strategy_delta;
2268  uint32 new_recent_alloc;
2269 
2270  /*
2271  * Find out where the freelist clock sweep currently is, and how many
2272  * buffer allocations have happened since our last call.
2273  */
2274  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2275 
2276  /* Report buffer alloc counts to pgstat */
2277  PendingBgWriterStats.buf_alloc += recent_alloc;
2278 
2279  /*
2280  * If we're not running the LRU scan, just stop after doing the stats
2281  * stuff. We mark the saved state invalid so that we can recover sanely
2282  * if LRU scan is turned back on later.
2283  */
2284  if (bgwriter_lru_maxpages <= 0)
2285  {
2286  saved_info_valid = false;
2287  return true;
2288  }
2289 
2290  /*
2291  * Compute strategy_delta = how many buffers have been scanned by the
2292  * clock sweep since last time. If first time through, assume none. Then
2293  * see if we are still ahead of the clock sweep, and if so, how many
2294  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2295  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2296  * behavior when the passes counts wrap around.
2297  */
2298  if (saved_info_valid)
2299  {
2300  int32 passes_delta = strategy_passes - prev_strategy_passes;
2301 
2302  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2303  strategy_delta += (long) passes_delta * NBuffers;
2304 
2305  Assert(strategy_delta >= 0);
2306 
2307  if ((int32) (next_passes - strategy_passes) > 0)
2308  {
2309  /* we're one pass ahead of the strategy point */
2310  bufs_to_lap = strategy_buf_id - next_to_clean;
2311 #ifdef BGW_DEBUG
2312  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2313  next_passes, next_to_clean,
2314  strategy_passes, strategy_buf_id,
2315  strategy_delta, bufs_to_lap);
2316 #endif
2317  }
2318  else if (next_passes == strategy_passes &&
2319  next_to_clean >= strategy_buf_id)
2320  {
2321  /* on same pass, but ahead or at least not behind */
2322  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2323 #ifdef BGW_DEBUG
2324  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2325  next_passes, next_to_clean,
2326  strategy_passes, strategy_buf_id,
2327  strategy_delta, bufs_to_lap);
2328 #endif
2329  }
2330  else
2331  {
2332  /*
2333  * We're behind, so skip forward to the strategy point and start
2334  * cleaning from there.
2335  */
2336 #ifdef BGW_DEBUG
2337  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2338  next_passes, next_to_clean,
2339  strategy_passes, strategy_buf_id,
2340  strategy_delta);
2341 #endif
2342  next_to_clean = strategy_buf_id;
2343  next_passes = strategy_passes;
2344  bufs_to_lap = NBuffers;
2345  }
2346  }
2347  else
2348  {
2349  /*
2350  * Initializing at startup or after LRU scanning had been off. Always
2351  * start at the strategy point.
2352  */
2353 #ifdef BGW_DEBUG
2354  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2355  strategy_passes, strategy_buf_id);
2356 #endif
2357  strategy_delta = 0;
2358  next_to_clean = strategy_buf_id;
2359  next_passes = strategy_passes;
2360  bufs_to_lap = NBuffers;
2361  }
2362 
2363  /* Update saved info for next time */
2364  prev_strategy_buf_id = strategy_buf_id;
2365  prev_strategy_passes = strategy_passes;
2366  saved_info_valid = true;
2367 
2368  /*
2369  * Compute how many buffers had to be scanned for each new allocation, ie,
2370  * 1/density of reusable buffers, and track a moving average of that.
2371  *
2372  * If the strategy point didn't move, we don't update the density estimate
2373  */
2374  if (strategy_delta > 0 && recent_alloc > 0)
2375  {
2376  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2377  smoothed_density += (scans_per_alloc - smoothed_density) /
2378  smoothing_samples;
2379  }
2380 
2381  /*
2382  * Estimate how many reusable buffers there are between the current
2383  * strategy point and where we've scanned ahead to, based on the smoothed
2384  * density estimate.
2385  */
2386  bufs_ahead = NBuffers - bufs_to_lap;
2387  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2388 
2389  /*
2390  * Track a moving average of recent buffer allocations. Here, rather than
2391  * a true average we want a fast-attack, slow-decline behavior: we
2392  * immediately follow any increase.
2393  */
2394  if (smoothed_alloc <= (float) recent_alloc)
2395  smoothed_alloc = recent_alloc;
2396  else
2397  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2398  smoothing_samples;
2399 
2400  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2401  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2402 
2403  /*
2404  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2405  * eventually underflow to zero, and the underflows produce annoying
2406  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2407  * zero, there's no point in tracking smaller and smaller values of
2408  * smoothed_alloc, so just reset it to exactly zero to avoid this
2409  * syndrome. It will pop back up as soon as recent_alloc increases.
2410  */
2411  if (upcoming_alloc_est == 0)
2412  smoothed_alloc = 0;
2413 
2414  /*
2415  * Even in cases where there's been little or no buffer allocation
2416  * activity, we want to make a small amount of progress through the buffer
2417  * cache so that as many reusable buffers as possible are clean after an
2418  * idle period.
2419  *
2420  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2421  * the BGW will be called during the scan_whole_pool time; slice the
2422  * buffer pool into that many sections.
2423  */
2424  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2425 
2426  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2427  {
2428 #ifdef BGW_DEBUG
2429  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2430  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2431 #endif
2432  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2433  }
2434 
2435  /*
2436  * Now write out dirty reusable buffers, working forward from the
2437  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2438  * enough buffers to match our estimate of the next cycle's allocation
2439  * requirements, or hit the bgwriter_lru_maxpages limit.
2440  */
2441 
2442  /* Make sure we can handle the pin inside SyncOneBuffer */
2444 
2445  num_to_scan = bufs_to_lap;
2446  num_written = 0;
2447  reusable_buffers = reusable_buffers_est;
2448 
2449  /* Execute the LRU scan */
2450  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2451  {
2452  int sync_state = SyncOneBuffer(next_to_clean, true,
2453  wb_context);
2454 
2455  if (++next_to_clean >= NBuffers)
2456  {
2457  next_to_clean = 0;
2458  next_passes++;
2459  }
2460  num_to_scan--;
2461 
2462  if (sync_state & BUF_WRITTEN)
2463  {
2464  reusable_buffers++;
2465  if (++num_written >= bgwriter_lru_maxpages)
2466  {
2468  break;
2469  }
2470  }
2471  else if (sync_state & BUF_REUSABLE)
2472  reusable_buffers++;
2473  }
2474 
2475  PendingBgWriterStats.buf_written_clean += num_written;
2476 
2477 #ifdef BGW_DEBUG
2478  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2479  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2480  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2481  bufs_to_lap - num_to_scan,
2482  num_written,
2483  reusable_buffers - reusable_buffers_est);
2484 #endif
2485 
2486  /*
2487  * Consider the above scan as being like a new allocation scan.
2488  * Characterize its density and update the smoothed one based on it. This
2489  * effectively halves the moving average period in cases where both the
2490  * strategy and the background writer are doing some useful scanning,
2491  * which is helpful because a long memory isn't as desirable on the
2492  * density estimates.
2493  */
2494  new_strategy_delta = bufs_to_lap - num_to_scan;
2495  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2496  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2497  {
2498  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2499  smoothed_density += (scans_per_alloc - smoothed_density) /
2500  smoothing_samples;
2501 
2502 #ifdef BGW_DEBUG
2503  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2504  new_recent_alloc, new_strategy_delta,
2505  scans_per_alloc, smoothed_density);
2506 #endif
2507  }
2508 
2509  /* Return true if OK to hibernate */
2510  return (bufs_to_lap == 0 && recent_alloc == 0);
2511 }
2512 
2513 /*
2514  * SyncOneBuffer -- process a single buffer during syncing.
2515  *
2516  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
2517  * buffers marked recently used, as these are not replacement candidates.
2518  *
2519  * Returns a bitmask containing the following flag bits:
2520  * BUF_WRITTEN: we wrote the buffer.
2521  * BUF_REUSABLE: buffer is available for replacement, ie, it has
2522  * pin count 0 and usage count 0.
2523  *
2524  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
2525  * after locking it, but we don't care all that much.)
2526  *
2527  * Note: caller must have done ResourceOwnerEnlargeBuffers.
2528  */
2529 static int
2530 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
2531 {
2532  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2533  int result = 0;
2534  uint32 buf_state;
2535  BufferTag tag;
2536 
2538 
2539  /*
2540  * Check whether buffer needs writing.
2541  *
2542  * We can make this check without taking the buffer content lock so long
2543  * as we mark pages dirty in access methods *before* logging changes with
2544  * XLogInsert(): if someone marks the buffer dirty just after our check we
2545  * don't worry because our checkpoint.redo points before log record for
2546  * upcoming changes and so we are not required to write such dirty buffer.
2547  */
2548  buf_state = LockBufHdr(bufHdr);
2549 
2550  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
2551  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2552  {
2553  result |= BUF_REUSABLE;
2554  }
2555  else if (skip_recently_used)
2556  {
2557  /* Caller told us not to write recently-used buffers */
2558  UnlockBufHdr(bufHdr, buf_state);
2559  return result;
2560  }
2561 
2562  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
2563  {
2564  /* It's clean, so nothing to do */
2565  UnlockBufHdr(bufHdr, buf_state);
2566  return result;
2567  }
2568 
2569  /*
2570  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
2571  * buffer is clean by the time we've locked it.)
2572  */
2573  PinBuffer_Locked(bufHdr);
2575 
2576  FlushBuffer(bufHdr, NULL);
2577 
2579 
2580  tag = bufHdr->tag;
2581 
2582  UnpinBuffer(bufHdr, true);
2583 
2584  ScheduleBufferTagForWriteback(wb_context, &tag);
2585 
2586  return result | BUF_WRITTEN;
2587 }
2588 
2589 /*
2590  * AtEOXact_Buffers - clean up at end of transaction.
2591  *
2592  * As of PostgreSQL 8.0, buffer pins should get released by the
2593  * ResourceOwner mechanism. This routine is just a debugging
2594  * cross-check that no pins remain.
2595  */
2596 void
2597 AtEOXact_Buffers(bool isCommit)
2598 {
2600 
2601  AtEOXact_LocalBuffers(isCommit);
2602 
2604 }
2605 
2606 /*
2607  * Initialize access to shared buffer pool
2608  *
2609  * This is called during backend startup (whether standalone or under the
2610  * postmaster). It sets up for this backend's access to the already-existing
2611  * buffer pool.
2612  */
2613 void
2615 {
2616  HASHCTL hash_ctl;
2617 
2618  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2619 
2620  hash_ctl.keysize = sizeof(int32);
2621  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2622 
2623  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2624  HASH_ELEM | HASH_BLOBS);
2625 
2626  /*
2627  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
2628  * the corresponding phase of backend shutdown.
2629  */
2630  Assert(MyProc != NULL);
2632 }
2633 
2634 /*
2635  * During backend exit, ensure that we released all shared-buffer locks and
2636  * assert that we have no remaining pins.
2637  */
2638 static void
2640 {
2641  AbortBufferIO();
2642  UnlockBuffers();
2643 
2645 
2646  /* localbuf.c needs a chance too */
2648 }
2649 
2650 /*
2651  * CheckForBufferLeaks - ensure this backend holds no buffer pins
2652  *
2653  * As of PostgreSQL 8.0, buffer pins should get released by the
2654  * ResourceOwner mechanism. This routine is just a debugging
2655  * cross-check that no pins remain.
2656  */
2657 static void
2659 {
2660 #ifdef USE_ASSERT_CHECKING
2661  int RefCountErrors = 0;
2663  int i;
2664 
2665  /* check the array */
2666  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
2667  {
2669 
2670  if (res->buffer != InvalidBuffer)
2671  {
2672  PrintBufferLeakWarning(res->buffer);
2673  RefCountErrors++;
2674  }
2675  }
2676 
2677  /* if necessary search the hash */
2679  {
2680  HASH_SEQ_STATUS hstat;
2681 
2683  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
2684  {
2685  PrintBufferLeakWarning(res->buffer);
2686  RefCountErrors++;
2687  }
2688  }
2689 
2690  Assert(RefCountErrors == 0);
2691 #endif
2692 }
2693 
2694 /*
2695  * Helper routine to issue warnings when a buffer is unexpectedly pinned
2696  */
2697 void
2699 {
2700  BufferDesc *buf;
2701  int32 loccount;
2702  char *path;
2703  BackendId backend;
2704  uint32 buf_state;
2705 
2707  if (BufferIsLocal(buffer))
2708  {
2710  loccount = LocalRefCount[-buffer - 1];
2711  backend = MyBackendId;
2712  }
2713  else
2714  {
2716  loccount = GetPrivateRefCount(buffer);
2717  backend = InvalidBackendId;
2718  }
2719 
2720  /* theoretically we should lock the bufhdr here */
2721  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
2722  BufTagGetForkNum(&buf->tag));
2723  buf_state = pg_atomic_read_u32(&buf->state);
2724  elog(WARNING,
2725  "buffer refcount leak: [%03d] "
2726  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2727  buffer, path,
2728  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2729  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2730  pfree(path);
2731 }
2732 
2733 /*
2734  * CheckPointBuffers
2735  *
2736  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
2737  *
2738  * Note: temporary relations do not participate in checkpoints, so they don't
2739  * need to be flushed.
2740  */
2741 void
2743 {
2744  BufferSync(flags);
2745 }
2746 
2747 
2748 /*
2749  * Do whatever is needed to prepare for commit at the bufmgr and smgr levels
2750  */
2751 void
2753 {
2754  /* Nothing to do in bufmgr anymore... */
2755 }
2756 
2757 /*
2758  * BufferGetBlockNumber
2759  * Returns the block number associated with a buffer.
2760  *
2761  * Note:
2762  * Assumes that the buffer is valid and pinned, else the
2763  * value may be obsolete immediately...
2764  */
2767 {
2768  BufferDesc *bufHdr;
2769 
2771 
2772  if (BufferIsLocal(buffer))
2773  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2774  else
2775  bufHdr = GetBufferDescriptor(buffer - 1);
2776 
2777  /* pinned, so OK to read tag without spinlock */
2778  return bufHdr->tag.blockNum;
2779 }
2780 
2781 /*
2782  * BufferGetTag
2783  * Returns the relfilelocator, fork number and block number associated with
2784  * a buffer.
2785  */
2786 void
2788  BlockNumber *blknum)
2789 {
2790  BufferDesc *bufHdr;
2791 
2792  /* Do the same checks as BufferGetBlockNumber. */
2794 
2795  if (BufferIsLocal(buffer))
2796  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2797  else
2798  bufHdr = GetBufferDescriptor(buffer - 1);
2799 
2800  /* pinned, so OK to read tag without spinlock */
2801  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
2802  *forknum = BufTagGetForkNum(&bufHdr->tag);
2803  *blknum = bufHdr->tag.blockNum;
2804 }
2805 
2806 /*
2807  * FlushBuffer
2808  * Physically write out a shared buffer.
2809  *
2810  * NOTE: this actually just passes the buffer contents to the kernel; the
2811  * real write to disk won't happen until the kernel feels like it. This
2812  * is okay from our point of view since we can redo the changes from WAL.
2813  * However, we will need to force the changes to disk via fsync before
2814  * we can checkpoint WAL.
2815  *
2816  * The caller must hold a pin on the buffer and have share-locked the
2817  * buffer contents. (Note: a share-lock does not prevent updates of
2818  * hint bits in the buffer, so the page could change while the write
2819  * is in progress, but we assume that that will not invalidate the data
2820  * written.)
2821  *
2822  * If the caller has an smgr reference for the buffer's relation, pass it
2823  * as the second parameter. If not, pass NULL.
2824  */
2825 static void
2827 {
2828  XLogRecPtr recptr;
2829  ErrorContextCallback errcallback;
2830  instr_time io_start,
2831  io_time;
2832  Block bufBlock;
2833  char *bufToWrite;
2834  uint32 buf_state;
2835 
2836  /*
2837  * Try to start an I/O operation. If StartBufferIO returns false, then
2838  * someone else flushed the buffer before we could, so we need not do
2839  * anything.
2840  */
2841  if (!StartBufferIO(buf, false))
2842  return;
2843 
2844  /* Setup error traceback support for ereport() */
2846  errcallback.arg = (void *) buf;
2847  errcallback.previous = error_context_stack;
2848  error_context_stack = &errcallback;
2849 
2850  /* Find smgr relation for buffer */
2851  if (reln == NULL)
2853 
2854  TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
2855  buf->tag.blockNum,
2857  reln->smgr_rlocator.locator.dbOid,
2859 
2860  buf_state = LockBufHdr(buf);
2861 
2862  /*
2863  * Run PageGetLSN while holding header lock, since we don't have the
2864  * buffer locked exclusively in all cases.
2865  */
2866  recptr = BufferGetLSN(buf);
2867 
2868  /* To check if block content changes while flushing. - vadim 01/17/97 */
2869  buf_state &= ~BM_JUST_DIRTIED;
2870  UnlockBufHdr(buf, buf_state);
2871 
2872  /*
2873  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
2874  * rule that log updates must hit disk before any of the data-file changes
2875  * they describe do.
2876  *
2877  * However, this rule does not apply to unlogged relations, which will be
2878  * lost after a crash anyway. Most unlogged relation pages do not bear
2879  * LSNs since we never emit WAL records for them, and therefore flushing
2880  * up through the buffer LSN would be useless, but harmless. However,
2881  * GiST indexes use LSNs internally to track page-splits, and therefore
2882  * unlogged GiST pages bear "fake" LSNs generated by
2883  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
2884  * LSN counter could advance past the WAL insertion point; and if it did
2885  * happen, attempting to flush WAL through that location would fail, with
2886  * disastrous system-wide consequences. To make sure that can't happen,
2887  * skip the flush if the buffer isn't permanent.
2888  */
2889  if (buf_state & BM_PERMANENT)
2890  XLogFlush(recptr);
2891 
2892  /*
2893  * Now it's safe to write buffer to disk. Note that no one else should
2894  * have been able to write it while we were busy with log flushing because
2895  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
2896  */
2897  bufBlock = BufHdrGetBlock(buf);
2898 
2899  /*
2900  * Update page checksum if desired. Since we have only shared lock on the
2901  * buffer, other processes might be updating hint bits in it, so we must
2902  * copy the page to private storage if we do checksumming.
2903  */
2904  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
2905 
2906  if (track_io_timing)
2907  INSTR_TIME_SET_CURRENT(io_start);
2908 
2909  /*
2910  * bufToWrite is either the shared buffer or a copy, as appropriate.
2911  */
2912  smgrwrite(reln,
2913  BufTagGetForkNum(&buf->tag),
2914  buf->tag.blockNum,
2915  bufToWrite,
2916  false);
2917 
2918  if (track_io_timing)
2919  {
2920  INSTR_TIME_SET_CURRENT(io_time);
2921  INSTR_TIME_SUBTRACT(io_time, io_start);
2924  }
2925 
2927 
2928  /*
2929  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
2930  * end the BM_IO_IN_PROGRESS state.
2931  */
2932  TerminateBufferIO(buf, true, 0);
2933 
2934  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
2935  buf->tag.blockNum,
2937  reln->smgr_rlocator.locator.dbOid,
2939 
2940  /* Pop the error context stack */
2941  error_context_stack = errcallback.previous;
2942 }
2943 
2944 /*
2945  * RelationGetNumberOfBlocksInFork
2946  * Determines the current number of pages in the specified relation fork.
2947  *
2948  * Note that the accuracy of the result will depend on the details of the
2949  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
2950  * it might not be.
2951  */
2954 {
2955  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
2956  {
2957  /*
2958  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
2959  * tableam returns the size in bytes - but for the purpose of this
2960  * routine, we want the number of blocks. Therefore divide, rounding
2961  * up.
2962  */
2963  uint64 szbytes;
2964 
2965  szbytes = table_relation_size(relation, forkNum);
2966 
2967  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2968  }
2969  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
2970  {
2971  return smgrnblocks(RelationGetSmgr(relation), forkNum);
2972  }
2973  else
2974  Assert(false);
2975 
2976  return 0; /* keep compiler quiet */
2977 }
2978 
2979 /*
2980  * BufferIsPermanent
2981  * Determines whether a buffer will potentially still be around after
2982  * a crash. Caller must hold a buffer pin.
2983  */
2984 bool
2986 {
2987  BufferDesc *bufHdr;
2988 
2989  /* Local buffers are used only for temp relations. */
2990  if (BufferIsLocal(buffer))
2991  return false;
2992 
2993  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2996 
2997  /*
2998  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2999  * need not bother with the buffer header spinlock. Even if someone else
3000  * changes the buffer header state while we're doing this, the state is
3001  * changed atomically, so we'll read the old value or the new value, but
3002  * not random garbage.
3003  */
3004  bufHdr = GetBufferDescriptor(buffer - 1);
3005  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
3006 }
3007 
3008 /*
3009  * BufferGetLSNAtomic
3010  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3011  * This is necessary for some callers who may not have an exclusive lock
3012  * on the buffer.
3013  */
3014 XLogRecPtr
3016 {
3017  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3018  char *page = BufferGetPage(buffer);
3019  XLogRecPtr lsn;
3020  uint32 buf_state;
3021 
3022  /*
3023  * If we don't need locking for correctness, fastpath out.
3024  */
3026  return PageGetLSN(page);
3027 
3028  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3031 
3032  buf_state = LockBufHdr(bufHdr);
3033  lsn = PageGetLSN(page);
3034  UnlockBufHdr(bufHdr, buf_state);
3035 
3036  return lsn;
3037 }
3038 
3039 /* ---------------------------------------------------------------------
3040  * DropRelationBuffers
3041  *
3042  * This function removes from the buffer pool all the pages of the
3043  * specified relation forks that have block numbers >= firstDelBlock.
3044  * (In particular, with firstDelBlock = 0, all pages are removed.)
3045  * Dirty pages are simply dropped, without bothering to write them
3046  * out first. Therefore, this is NOT rollback-able, and so should be
3047  * used only with extreme caution!
3048  *
3049  * Currently, this is called only from smgr.c when the underlying file
3050  * is about to be deleted or truncated (firstDelBlock is needed for
3051  * the truncation case). The data in the affected pages would therefore
3052  * be deleted momentarily anyway, and there is no point in writing it.
3053  * It is the responsibility of higher-level code to ensure that the
3054  * deletion or truncation does not lose any data that could be needed
3055  * later. It is also the responsibility of higher-level code to ensure
3056  * that no other process could be trying to load more pages of the
3057  * relation into buffers.
3058  * --------------------------------------------------------------------
3059  */
3060 void
3062  int nforks, BlockNumber *firstDelBlock)
3063 {
3064  int i;
3065  int j;
3066  RelFileLocatorBackend rlocator;
3067  BlockNumber nForkBlock[MAX_FORKNUM];
3068  uint64 nBlocksToInvalidate = 0;
3069 
3070  rlocator = smgr_reln->smgr_rlocator;
3071 
3072  /* If it's a local relation, it's localbuf.c's problem. */
3073  if (RelFileLocatorBackendIsTemp(rlocator))
3074  {
3075  if (rlocator.backend == MyBackendId)
3076  {
3077  for (j = 0; j < nforks; j++)
3078  DropRelationLocalBuffers(rlocator.locator, forkNum[j],
3079  firstDelBlock[j]);
3080  }
3081  return;
3082  }
3083 
3084  /*
3085  * To remove all the pages of the specified relation forks from the buffer
3086  * pool, we need to scan the entire buffer pool but we can optimize it by
3087  * finding the buffers from BufMapping table provided we know the exact
3088  * size of each fork of the relation. The exact size is required to ensure
3089  * that we don't leave any buffer for the relation being dropped as
3090  * otherwise the background writer or checkpointer can lead to a PANIC
3091  * error while flushing buffers corresponding to files that don't exist.
3092  *
3093  * To know the exact size, we rely on the size cached for each fork by us
3094  * during recovery which limits the optimization to recovery and on
3095  * standbys but we can easily extend it once we have shared cache for
3096  * relation size.
3097  *
3098  * In recovery, we cache the value returned by the first lseek(SEEK_END)
3099  * and the future writes keeps the cached value up-to-date. See
3100  * smgrextend. It is possible that the value of the first lseek is smaller
3101  * than the actual number of existing blocks in the file due to buggy
3102  * Linux kernels that might not have accounted for the recent write. But
3103  * that should be fine because there must not be any buffers after that
3104  * file size.
3105  */
3106  for (i = 0; i < nforks; i++)
3107  {
3108  /* Get the number of blocks for a relation's fork */
3109  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
3110 
3111  if (nForkBlock[i] == InvalidBlockNumber)
3112  {
3113  nBlocksToInvalidate = InvalidBlockNumber;
3114  break;
3115  }
3116 
3117  /* calculate the number of blocks to be invalidated */
3118  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
3119  }
3120 
3121  /*
3122  * We apply the optimization iff the total number of blocks to invalidate
3123  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3124  */
3125  if (BlockNumberIsValid(nBlocksToInvalidate) &&
3126  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3127  {
3128  for (j = 0; j < nforks; j++)
3129  FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
3130  nForkBlock[j], firstDelBlock[j]);
3131  return;
3132  }
3133 
3134  for (i = 0; i < NBuffers; i++)
3135  {
3136  BufferDesc *bufHdr = GetBufferDescriptor(i);
3137  uint32 buf_state;
3138 
3139  /*
3140  * We can make this a tad faster by prechecking the buffer tag before
3141  * we attempt to lock the buffer; this saves a lot of lock
3142  * acquisitions in typical cases. It should be safe because the
3143  * caller must have AccessExclusiveLock on the relation, or some other
3144  * reason to be certain that no one is loading new pages of the rel
3145  * into the buffer pool. (Otherwise we might well miss such pages
3146  * entirely.) Therefore, while the tag might be changing while we
3147  * look at it, it can't be changing *to* a value we care about, only
3148  * *away* from such a value. So false negatives are impossible, and
3149  * false positives are safe because we'll recheck after getting the
3150  * buffer lock.
3151  *
3152  * We could check forkNum and blockNum as well as the rlocator, but
3153  * the incremental win from doing so seems small.
3154  */
3155  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
3156  continue;
3157 
3158  buf_state = LockBufHdr(bufHdr);
3159 
3160  for (j = 0; j < nforks; j++)
3161  {
3162  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
3163  BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
3164  bufHdr->tag.blockNum >= firstDelBlock[j])
3165  {
3166  InvalidateBuffer(bufHdr); /* releases spinlock */
3167  break;
3168  }
3169  }
3170  if (j >= nforks)
3171  UnlockBufHdr(bufHdr, buf_state);
3172  }
3173 }
3174 
3175 /* ---------------------------------------------------------------------
3176  * DropRelationsAllBuffers
3177  *
3178  * This function removes from the buffer pool all the pages of all
3179  * forks of the specified relations. It's equivalent to calling
3180  * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
3181  * --------------------------------------------------------------------
3182  */
3183 void
3184 DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3185 {
3186  int i;
3187  int n = 0;
3188  SMgrRelation *rels;
3189  BlockNumber (*block)[MAX_FORKNUM + 1];
3190  uint64 nBlocksToInvalidate = 0;
3191  RelFileLocator *locators;
3192  bool cached = true;
3193  bool use_bsearch;
3194 
3195  if (nlocators == 0)
3196  return;
3197 
3198  rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
3199 
3200  /* If it's a local relation, it's localbuf.c's problem. */
3201  for (i = 0; i < nlocators; i++)
3202  {
3203  if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
3204  {
3205  if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
3206  DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
3207  }
3208  else
3209  rels[n++] = smgr_reln[i];
3210  }
3211 
3212  /*
3213  * If there are no non-local relations, then we're done. Release the
3214  * memory and return.
3215  */
3216  if (n == 0)
3217  {
3218  pfree(rels);
3219  return;
3220  }
3221 
3222  /*
3223  * This is used to remember the number of blocks for all the relations
3224  * forks.
3225  */
3226  block = (BlockNumber (*)[MAX_FORKNUM + 1])
3227  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
3228 
3229  /*
3230  * We can avoid scanning the entire buffer pool if we know the exact size
3231  * of each of the given relation forks. See DropRelationBuffers.
3232  */
3233  for (i = 0; i < n && cached; i++)
3234  {
3235  for (int j = 0; j <= MAX_FORKNUM; j++)
3236  {
3237  /* Get the number of blocks for a relation's fork. */
3238  block[i][j] = smgrnblocks_cached(rels[i], j);
3239 
3240  /* We need to only consider the relation forks that exists. */
3241  if (block[i][j] == InvalidBlockNumber)
3242  {
3243  if (!smgrexists(rels[i], j))
3244  continue;
3245  cached = false;
3246  break;
3247  }
3248 
3249  /* calculate the total number of blocks to be invalidated */
3250  nBlocksToInvalidate += block[i][j];
3251  }
3252  }
3253 
3254  /*
3255  * We apply the optimization iff the total number of blocks to invalidate
3256  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3257  */
3258  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3259  {
3260  for (i = 0; i < n; i++)
3261  {
3262  for (int j = 0; j <= MAX_FORKNUM; j++)
3263  {
3264  /* ignore relation forks that doesn't exist */
3265  if (!BlockNumberIsValid(block[i][j]))
3266  continue;
3267 
3268  /* drop all the buffers for a particular relation fork */
3269  FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
3270  j, block[i][j], 0);
3271  }
3272  }
3273 
3274  pfree(block);
3275  pfree(rels);
3276  return;
3277  }
3278 
3279  pfree(block);
3280  locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
3281  for (i = 0; i < n; i++)
3282  locators[i] = rels[i]->smgr_rlocator.locator;
3283 
3284  /*
3285  * For low number of relations to drop just use a simple walk through, to
3286  * save the bsearch overhead. The threshold to use is rather a guess than
3287  * an exactly determined value, as it depends on many factors (CPU and RAM
3288  * speeds, amount of shared buffers etc.).
3289  */
3290  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3291 
3292  /* sort the list of rlocators if necessary */
3293  if (use_bsearch)
3294  pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
3295 
3296  for (i = 0; i < NBuffers; i++)
3297  {
3298  RelFileLocator *rlocator = NULL;
3299  BufferDesc *bufHdr = GetBufferDescriptor(i);
3300  uint32 buf_state;
3301 
3302  /*
3303  * As in DropRelationBuffers, an unlocked precheck should be
3304  * safe and saves some cycles.
3305  */
3306 
3307  if (!use_bsearch)
3308  {
3309  int j;
3310 
3311  for (j = 0; j < n; j++)
3312  {
3313  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
3314  {
3315  rlocator = &locators[j];
3316  break;
3317  }
3318  }
3319  }
3320  else
3321  {
3322  RelFileLocator locator;
3323 
3324  locator = BufTagGetRelFileLocator(&bufHdr->tag);
3325  rlocator = bsearch((const void *) &(locator),
3326  locators, n, sizeof(RelFileLocator),
3328  }
3329 
3330  /* buffer doesn't belong to any of the given relfilelocators; skip it */
3331  if (rlocator == NULL)
3332  continue;
3333 
3334  buf_state = LockBufHdr(bufHdr);
3335  if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
3336  InvalidateBuffer(bufHdr); /* releases spinlock */
3337  else
3338  UnlockBufHdr(bufHdr, buf_state);
3339  }
3340 
3341  pfree(locators);
3342  pfree(rels);
3343 }
3344 
3345 /* ---------------------------------------------------------------------
3346  * FindAndDropRelationBuffers
3347  *
3348  * This function performs look up in BufMapping table and removes from the
3349  * buffer pool all the pages of the specified relation fork that has block
3350  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
3351  * pages are removed.)
3352  * --------------------------------------------------------------------
3353  */
3354 static void
3356  BlockNumber nForkBlock,
3357  BlockNumber firstDelBlock)
3358 {
3359  BlockNumber curBlock;
3360 
3361  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
3362  {
3363  uint32 bufHash; /* hash value for tag */
3364  BufferTag bufTag; /* identity of requested block */
3365  LWLock *bufPartitionLock; /* buffer partition lock for it */
3366  int buf_id;
3367  BufferDesc *bufHdr;
3368  uint32 buf_state;
3369 
3370  /* create a tag so we can lookup the buffer */
3371  InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
3372 
3373  /* determine its hash code and partition lock ID */
3374  bufHash = BufTableHashCode(&bufTag);
3375  bufPartitionLock = BufMappingPartitionLock(bufHash);
3376 
3377  /* Check that it is in the buffer pool. If not, do nothing. */
3378  LWLockAcquire(bufPartitionLock, LW_SHARED);
3379  buf_id = BufTableLookup(&bufTag, bufHash);
3380  LWLockRelease(bufPartitionLock);
3381 
3382  if (buf_id < 0)
3383  continue;
3384 
3385  bufHdr = GetBufferDescriptor(buf_id);
3386 
3387  /*
3388  * We need to lock the buffer header and recheck if the buffer is
3389  * still associated with the same block because the buffer could be
3390  * evicted by some other backend loading blocks for a different
3391  * relation after we release lock on the BufMapping table.
3392  */
3393  buf_state = LockBufHdr(bufHdr);
3394 
3395  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
3396  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
3397  bufHdr->tag.blockNum >= firstDelBlock)
3398  InvalidateBuffer(bufHdr); /* releases spinlock */
3399  else
3400  UnlockBufHdr(bufHdr, buf_state);
3401  }
3402 }
3403 
3404 /* ---------------------------------------------------------------------
3405  * DropDatabaseBuffers
3406  *
3407  * This function removes all the buffers in the buffer cache for a
3408  * particular database. Dirty pages are simply dropped, without
3409  * bothering to write them out first. This is used when we destroy a
3410  * database, to avoid trying to flush data to disk when the directory
3411  * tree no longer exists. Implementation is pretty similar to
3412  * DropRelationBuffers() which is for destroying just one relation.
3413  * --------------------------------------------------------------------
3414  */
3415 void
3417 {
3418  int i;
3419 
3420  /*
3421  * We needn't consider local buffers, since by assumption the target
3422  * database isn't our own.
3423  */
3424 
3425  for (i = 0; i < NBuffers; i++)
3426  {
3427  BufferDesc *bufHdr = GetBufferDescriptor(i);
3428  uint32 buf_state;
3429 
3430  /*
3431  * As in DropRelationBuffers, an unlocked precheck should be
3432  * safe and saves some cycles.
3433  */
3434  if (bufHdr->tag.dbOid != dbid)
3435  continue;
3436 
3437  buf_state = LockBufHdr(bufHdr);
3438  if (bufHdr->tag.dbOid == dbid)
3439  InvalidateBuffer(bufHdr); /* releases spinlock */
3440  else
3441  UnlockBufHdr(bufHdr, buf_state);
3442  }
3443 }
3444 
3445 /* -----------------------------------------------------------------
3446  * PrintBufferDescs
3447  *
3448  * this function prints all the buffer descriptors, for debugging
3449  * use only.
3450  * -----------------------------------------------------------------
3451  */
3452 #ifdef NOT_USED
3453 void
3454 PrintBufferDescs(void)
3455 {
3456  int i;
3457 
3458  for (i = 0; i < NBuffers; ++i)
3459  {
3462 
3463  /* theoretically we should lock the bufhdr here */
3464  elog(LOG,
3465  "[%02d] (freeNext=%d, rel=%s, "
3466  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3467  i, buf->freeNext,
3470  buf->tag.blockNum, buf->flags,
3471  buf->refcount, GetPrivateRefCount(b));
3472  }
3473 }
3474 #endif
3475 
3476 #ifdef NOT_USED
3477 void
3478 PrintPinnedBufs(void)
3479 {
3480  int i;
3481 
3482  for (i = 0; i < NBuffers; ++i)
3483  {
3486 
3487  if (GetPrivateRefCount(b) > 0)
3488  {
3489  /* theoretically we should lock the bufhdr here */
3490  elog(LOG,
3491  "[%02d] (freeNext=%d, rel=%s, "
3492  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3493  i, buf->freeNext,
3495  BufTagGetForkNum(&buf->tag)),
3496  buf->tag.blockNum, buf->flags,
3497  buf->refcount, GetPrivateRefCount(b));
3498  }
3499  }
3500 }
3501 #endif
3502 
3503 /* ---------------------------------------------------------------------
3504  * FlushRelationBuffers
3505  *
3506  * This function writes all dirty pages of a relation out to disk
3507  * (or more accurately, out to kernel disk buffers), ensuring that the
3508  * kernel has an up-to-date view of the relation.
3509  *
3510  * Generally, the caller should be holding AccessExclusiveLock on the
3511  * target relation to ensure that no other backend is busy dirtying
3512  * more blocks of the relation; the effects can't be expected to last
3513  * after the lock is released.
3514  *
3515  * XXX currently it sequentially searches the buffer pool, should be
3516  * changed to more clever ways of searching. This routine is not
3517  * used in any performance-critical code paths, so it's not worth
3518  * adding additional overhead to normal paths to make it go faster.
3519  * --------------------------------------------------------------------
3520  */
3521 void
3523 {
3524  int i;
3525  BufferDesc *bufHdr;
3526 
3527  if (RelationUsesLocalBuffers(rel))
3528  {
3529  for (i = 0; i < NLocBuffer; i++)
3530  {
3531  uint32 buf_state;
3532 
3533  bufHdr = GetLocalBufferDescriptor(i);
3534  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3535  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3536  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3537  {
3538  ErrorContextCallback errcallback;
3539  Page localpage;
3540 
3541  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3542 
3543  /* Setup error traceback support for ereport() */
3545  errcallback.arg = (void *) bufHdr;
3546  errcallback.previous = error_context_stack;
3547  error_context_stack = &errcallback;
3548 
3549  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3550 
3552  BufTagGetForkNum(&bufHdr->tag),
3553  bufHdr->tag.blockNum,
3554  localpage,
3555  false);
3556 
3557  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3558  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3559 
3560  /* Pop the error context stack */
3561  error_context_stack = errcallback.previous;
3562  }
3563  }
3564 
3565  return;
3566  }
3567 
3568  /* Make sure we can handle the pin inside the loop */
3570 
3571  for (i = 0; i < NBuffers; i++)
3572  {
3573  uint32 buf_state;
3574 
3575  bufHdr = GetBufferDescriptor(i);
3576 
3577  /*
3578  * As in DropRelationBuffers, an unlocked precheck should be
3579  * safe and saves some cycles.
3580  */
3581  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
3582  continue;
3583 
3585 
3586  buf_state = LockBufHdr(bufHdr);
3587  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3588  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3589  {
3590  PinBuffer_Locked(bufHdr);
3592  FlushBuffer(bufHdr, RelationGetSmgr(rel));
3594  UnpinBuffer(bufHdr, true);
3595  }
3596  else
3597  UnlockBufHdr(bufHdr, buf_state);
3598  }
3599 }
3600 
3601 /* ---------------------------------------------------------------------
3602  * FlushRelationsAllBuffers
3603  *
3604  * This function flushes out of the buffer pool all the pages of all
3605  * forks of the specified smgr relations. It's equivalent to calling
3606  * FlushRelationBuffers once per relation. The relations are assumed not
3607  * to use local buffers.
3608  * --------------------------------------------------------------------
3609  */
3610 void
3612 {
3613  int i;
3614  SMgrSortArray *srels;
3615  bool use_bsearch;
3616 
3617  if (nrels == 0)
3618  return;
3619 
3620  /* fill-in array for qsort */
3621  srels = palloc(sizeof(SMgrSortArray) * nrels);
3622 
3623  for (i = 0; i < nrels; i++)
3624  {
3625  Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
3626 
3627  srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
3628  srels[i].srel = smgrs[i];
3629  }
3630 
3631  /*
3632  * Save the bsearch overhead for low number of relations to sync. See
3633  * DropRelationsAllBuffers for details.
3634  */
3635  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
3636 
3637  /* sort the list of SMgrRelations if necessary */
3638  if (use_bsearch)
3639  pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
3640 
3641  /* Make sure we can handle the pin inside the loop */
3643 
3644  for (i = 0; i < NBuffers; i++)
3645  {
3646  SMgrSortArray *srelent = NULL;
3647  BufferDesc *bufHdr = GetBufferDescriptor(i);
3648  uint32 buf_state;
3649 
3650  /*
3651  * As in DropRelationBuffers, an unlocked precheck should be
3652  * safe and saves some cycles.
3653  */
3654 
3655  if (!use_bsearch)
3656  {
3657  int j;
3658 
3659  for (j = 0; j < nrels; j++)
3660  {
3661  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
3662  {
3663  srelent = &srels[j];
3664  break;
3665  }
3666  }
3667  }
3668  else
3669  {
3670  RelFileLocator rlocator;
3671 
3672  rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3673  srelent = bsearch((const void *) &(rlocator),
3674  srels, nrels, sizeof(SMgrSortArray),
3676  }
3677 
3678  /* buffer doesn't belong to any of the given relfilelocators; skip it */
3679  if (srelent == NULL)
3680  continue;
3681 
3683 
3684  buf_state = LockBufHdr(bufHdr);
3685  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
3686  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3687  {
3688  PinBuffer_Locked(bufHdr);
3690  FlushBuffer(bufHdr, srelent->srel);
3692  UnpinBuffer(bufHdr, true);
3693  }
3694  else
3695  UnlockBufHdr(bufHdr, buf_state);
3696  }
3697 
3698  pfree(srels);
3699 }
3700 
3701 /* ---------------------------------------------------------------------
3702  * RelationCopyStorageUsingBuffer
3703  *
3704  * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
3705  * of using smgrread and smgrextend this will copy using bufmgr APIs.
3706  *
3707  * Refer comments atop CreateAndCopyRelationData() for details about
3708  * 'permanent' parameter.
3709  * --------------------------------------------------------------------
3710  */
3711 static void
3713  RelFileLocator dstlocator,
3714  ForkNumber forkNum, bool permanent)
3715 {
3716  Buffer srcBuf;
3717  Buffer dstBuf;
3718  Page srcPage;
3719  Page dstPage;
3720  bool use_wal;
3721  BlockNumber nblocks;
3722  BlockNumber blkno;
3724  BufferAccessStrategy bstrategy_src;
3725  BufferAccessStrategy bstrategy_dst;
3726 
3727  /*
3728  * In general, we want to write WAL whenever wal_level > 'minimal', but we
3729  * can skip it when copying any fork of an unlogged relation other than
3730  * the init fork.
3731  */
3732  use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
3733 
3734  /* Get number of blocks in the source relation. */
3735  nblocks = smgrnblocks(smgropen(srclocator, InvalidBackendId),
3736  forkNum);
3737 
3738  /* Nothing to copy; just return. */
3739  if (nblocks == 0)
3740  return;
3741 
3742  /*
3743  * Bulk extend the destination relation of the same size as the source
3744  * relation before starting to copy block by block.
3745  */
3746  memset(buf.data, 0, BLCKSZ);
3747  smgrextend(smgropen(dstlocator, InvalidBackendId), forkNum, nblocks - 1,
3748  buf.data, true);
3749 
3750  /* This is a bulk operation, so use buffer access strategies. */
3751  bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
3752  bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
3753 
3754  /* Iterate over each block of the source relation file. */
3755  for (blkno = 0; blkno < nblocks; blkno++)
3756  {
3758 
3759  /* Read block from source relation. */
3760  srcBuf = ReadBufferWithoutRelcache(srclocator, forkNum, blkno,
3761  RBM_NORMAL, bstrategy_src,
3762  permanent);
3763  LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
3764  srcPage = BufferGetPage(srcBuf);
3765 
3766  /* Use P_NEW to extend the destination relation. */
3767  dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum, blkno,
3768  RBM_NORMAL, bstrategy_dst,
3769  permanent);
3771  dstPage = BufferGetPage(dstBuf);
3772 
3774 
3775  /* Copy page data from the source to the destination. */
3776  memcpy(dstPage, srcPage, BLCKSZ);
3777  MarkBufferDirty(dstBuf);
3778 
3779  /* WAL-log the copied page. */
3780  if (use_wal)
3781  log_newpage_buffer(dstBuf, true);
3782 
3783  END_CRIT_SECTION();
3784 
3785  UnlockReleaseBuffer(dstBuf);
3786  UnlockReleaseBuffer(srcBuf);
3787  }
3788 }
3789 
3790 /* ---------------------------------------------------------------------
3791  * CreateAndCopyRelationData
3792  *
3793  * Create destination relation storage and copy all forks from the
3794  * source relation to the destination.
3795  *
3796  * Pass permanent as true for permanent relations and false for
3797  * unlogged relations. Currently this API is not supported for
3798  * temporary relations.
3799  * --------------------------------------------------------------------
3800  */
3801 void
3803  RelFileLocator dst_rlocator, bool permanent)
3804 {
3805  RelFileLocatorBackend rlocator;
3806  char relpersistence;
3807 
3808  /* Set the relpersistence. */
3809  relpersistence = permanent ?
3810  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
3811 
3812  /*
3813  * Create and copy all forks of the relation. During create database we
3814  * have a separate cleanup mechanism which deletes complete database
3815  * directory. Therefore, each individual relation doesn't need to be
3816  * registered for cleanup.
3817  */
3818  RelationCreateStorage(dst_rlocator, relpersistence, false);
3819 
3820  /* copy main fork. */
3821  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
3822  permanent);
3823 
3824  /* copy those extra forks that exist */
3825  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
3826  forkNum <= MAX_FORKNUM; forkNum++)
3827  {
3828  if (smgrexists(smgropen(src_rlocator, InvalidBackendId), forkNum))
3829  {
3830  smgrcreate(smgropen(dst_rlocator, InvalidBackendId), forkNum, false);
3831 
3832  /*
3833  * WAL log creation if the relation is persistent, or this is the
3834  * init fork of an unlogged relation.
3835  */
3836  if (permanent || forkNum == INIT_FORKNUM)
3837  log_smgrcreate(&dst_rlocator, forkNum);
3838 
3839  /* Copy a fork's data, block by block. */
3840  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
3841  permanent);
3842  }
3843  }
3844 
3845  /* close source and destination smgr if exists. */
3846  rlocator.backend = InvalidBackendId;
3847 
3848  rlocator.locator = src_rlocator;
3849  smgrcloserellocator(rlocator);
3850 
3851  rlocator.locator = dst_rlocator;
3852  smgrcloserellocator(rlocator);
3853 }
3854 
3855 /* ---------------------------------------------------------------------
3856  * FlushDatabaseBuffers
3857  *
3858  * This function writes all dirty pages of a database out to disk
3859  * (or more accurately, out to kernel disk buffers), ensuring that the
3860  * kernel has an up-to-date view of the database.
3861  *
3862  * Generally, the caller should be holding an appropriate lock to ensure
3863  * no other backend is active in the target database; otherwise more
3864  * pages could get dirtied.
3865  *
3866  * Note we don't worry about flushing any pages of temporary relations.
3867  * It's assumed these wouldn't be interesting.
3868  * --------------------------------------------------------------------
3869  */
3870 void
3872 {
3873  int i;
3874  BufferDesc *bufHdr;
3875 
3876  /* Make sure we can handle the pin inside the loop */
3878 
3879  for (i = 0; i < NBuffers; i++)
3880  {
3881  uint32 buf_state;
3882 
3883  bufHdr = GetBufferDescriptor(i);
3884 
3885  /*
3886  * As in DropRelationBuffers, an unlocked precheck should be
3887  * safe and saves some cycles.
3888  */
3889  if (bufHdr->tag.dbOid != dbid)
3890  continue;
3891 
3893 
3894  buf_state = LockBufHdr(bufHdr);
3895  if (bufHdr->tag.dbOid == dbid &&
3896  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3897  {
3898  PinBuffer_Locked(bufHdr);
3900  FlushBuffer(bufHdr, NULL);
3902  UnpinBuffer(bufHdr, true);
3903  }
3904  else
3905  UnlockBufHdr(bufHdr, buf_state);
3906  }
3907 }
3908 
3909 /*
3910  * Flush a previously, shared or exclusively, locked and pinned buffer to the
3911  * OS.
3912  */
3913 void
3915 {
3916  BufferDesc *bufHdr;
3917 
3918  /* currently not needed, but no fundamental reason not to support */
3920 
3922 
3923  bufHdr = GetBufferDescriptor(buffer - 1);
3924 
3926 
3927  FlushBuffer(bufHdr, NULL);
3928 }
3929 
3930 /*
3931  * ReleaseBuffer -- release the pin on a buffer
3932  */
3933 void
3935 {
3936  if (!BufferIsValid(buffer))
3937  elog(ERROR, "bad buffer ID: %d", buffer);
3938 
3939  if (BufferIsLocal(buffer))
3940  {
3942 
3943  Assert(LocalRefCount[-buffer - 1] > 0);
3944  LocalRefCount[-buffer - 1]--;
3945  return;
3946  }
3947 
3949 }
3950 
3951 /*
3952  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
3953  *
3954  * This is just a shorthand for a common combination.
3955  */
3956 void
3958 {
3961 }
3962 
3963 /*
3964  * IncrBufferRefCount
3965  * Increment the pin count on a buffer that we have *already* pinned
3966  * at least once.
3967  *
3968  * This function cannot be used on a buffer we do not have pinned,
3969  * because it doesn't change the shared buffer state.
3970  */
3971 void
3973 {
3976  if (BufferIsLocal(buffer))
3977  LocalRefCount[-buffer - 1]++;
3978  else
3979  {
3980  PrivateRefCountEntry *ref;
3981 
3982  ref = GetPrivateRefCountEntry(buffer, true);
3983  Assert(ref != NULL);
3984  ref->refcount++;
3985  }
3987 }
3988 
3989 /*
3990  * MarkBufferDirtyHint
3991  *
3992  * Mark a buffer dirty for non-critical changes.
3993  *
3994  * This is essentially the same as MarkBufferDirty, except:
3995  *
3996  * 1. The caller does not write WAL; so if checksums are enabled, we may need
3997  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
3998  * 2. The caller might have only share-lock instead of exclusive-lock on the
3999  * buffer's content lock.
4000  * 3. This function does not guarantee that the buffer is always marked dirty
4001  * (due to a race condition), so it cannot be used for important changes.
4002  */
4003 void
4005 {
4006  BufferDesc *bufHdr;
4007  Page page = BufferGetPage(buffer);
4008 
4009  if (!BufferIsValid(buffer))
4010  elog(ERROR, "bad buffer ID: %d", buffer);
4011 
4012  if (BufferIsLocal(buffer))
4013  {
4015  return;
4016  }
4017 
4018  bufHdr = GetBufferDescriptor(buffer - 1);
4019 
4021  /* here, either share or exclusive lock is OK */
4023 
4024  /*
4025  * This routine might get called many times on the same page, if we are
4026  * making the first scan after commit of an xact that added/deleted many
4027  * tuples. So, be as quick as we can if the buffer is already dirty. We
4028  * do this by not acquiring spinlock if it looks like the status bits are
4029  * already set. Since we make this test unlocked, there's a chance we
4030  * might fail to notice that the flags have just been cleared, and failed
4031  * to reset them, due to memory-ordering issues. But since this function
4032  * is only intended to be used in cases where failing to write out the
4033  * data would be harmless anyway, it doesn't really matter.
4034  */
4035  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4037  {
4039  bool dirtied = false;
4040  bool delayChkptFlags = false;
4041  uint32 buf_state;
4042 
4043  /*
4044  * If we need to protect hint bit updates from torn writes, WAL-log a
4045  * full page image of the page. This full page image is only necessary
4046  * if the hint bit update is the first change to the page since the
4047  * last checkpoint.
4048  *
4049  * We don't check full_page_writes here because that logic is included
4050  * when we call XLogInsert() since the value changes dynamically.
4051  */
4052  if (XLogHintBitIsNeeded() &&
4053  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
4054  {
4055  /*
4056  * If we must not write WAL, due to a relfilelocator-specific
4057  * condition or being in recovery, don't dirty the page. We can
4058  * set the hint, just not dirty the page as a result so the hint
4059  * is lost when we evict the page or shutdown.
4060  *
4061  * See src/backend/storage/page/README for longer discussion.
4062  */
4063  if (RecoveryInProgress() ||
4065  return;
4066 
4067  /*
4068  * If the block is already dirty because we either made a change
4069  * or set a hint already, then we don't need to write a full page
4070  * image. Note that aggressive cleaning of blocks dirtied by hint
4071  * bit setting would increase the call rate. Bulk setting of hint
4072  * bits would reduce the call rate...
4073  *
4074  * We must issue the WAL record before we mark the buffer dirty.
4075  * Otherwise we might write the page before we write the WAL. That
4076  * causes a race condition, since a checkpoint might occur between
4077  * writing the WAL record and marking the buffer dirty. We solve
4078  * that with a kluge, but one that is already in use during
4079  * transaction commit to prevent race conditions. Basically, we
4080  * simply prevent the checkpoint WAL record from being written
4081  * until we have marked the buffer dirty. We don't start the
4082  * checkpoint flush until we have marked dirty, so our checkpoint
4083  * must flush the change to disk successfully or the checkpoint
4084  * never gets written, so crash recovery will fix.
4085  *
4086  * It's possible we may enter here without an xid, so it is
4087  * essential that CreateCheckPoint waits for virtual transactions
4088  * rather than full transactionids.
4089  */
4092  delayChkptFlags = true;
4093  lsn = XLogSaveBufferForHint(buffer, buffer_std);
4094  }
4095 
4096  buf_state = LockBufHdr(bufHdr);
4097 
4098  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4099 
4100  if (!(buf_state & BM_DIRTY))
4101  {
4102  dirtied = true; /* Means "will be dirtied by this action" */
4103 
4104  /*
4105  * Set the page LSN if we wrote a backup block. We aren't supposed
4106  * to set this when only holding a share lock but as long as we
4107  * serialise it somehow we're OK. We choose to set LSN while
4108  * holding the buffer header lock, which causes any reader of an
4109  * LSN who holds only a share lock to also obtain a buffer header
4110  * lock before using PageGetLSN(), which is enforced in
4111  * BufferGetLSNAtomic().
4112  *
4113  * If checksums are enabled, you might think we should reset the
4114  * checksum here. That will happen when the page is written
4115  * sometime later in this checkpoint cycle.
4116  */
4117  if (!XLogRecPtrIsInvalid(lsn))
4118  PageSetLSN(page, lsn);
4119  }
4120 
4121  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
4122  UnlockBufHdr(bufHdr, buf_state);
4123 
4124  if (delayChkptFlags)
4126 
4127  if (dirtied)
4128  {
4129  VacuumPageDirty++;
4131  if (VacuumCostActive)
4133  }
4134  }
4135 }
4136 
4137 /*
4138  * Release buffer content locks for shared buffers.
4139  *
4140  * Used to clean up after errors.
4141  *
4142  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
4143  * of releasing buffer content locks per se; the only thing we need to deal
4144  * with here is clearing any PIN_COUNT request that was in progress.
4145  */
4146 void
4148 {
4150 
4151  if (buf)
4152  {
4153  uint32 buf_state;
4154 
4155  buf_state = LockBufHdr(buf);
4156 
4157  /*
4158  * Don't complain if flag bit not set; it could have been reset but we
4159  * got a cancel/die interrupt before getting the signal.
4160  */
4161  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4162  buf->wait_backend_pgprocno == MyProc->pgprocno)
4163  buf_state &= ~BM_PIN_COUNT_WAITER;
4164 
4165  UnlockBufHdr(buf, buf_state);
4166 
4167  PinCountWaitBuf = NULL;
4168  }
4169 }
4170 
4171 /*
4172  * Acquire or release the content_lock for the buffer.
4173  */
4174 void
4176 {
4177  BufferDesc *buf;
4178 
4180  if (BufferIsLocal(buffer))
4181  return; /* local buffers need no lock */
4182 
4184 
4185  if (mode == BUFFER_LOCK_UNLOCK)
4187  else if (mode == BUFFER_LOCK_SHARE)
4189  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4191  else
4192  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4193 }
4194 
4195 /*
4196  * Acquire the content_lock for the buffer, but only if we don't have to wait.
4197  *
4198  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
4199  */
4200 bool
4202 {
4203  BufferDesc *buf;
4204 
4206  if (BufferIsLocal(buffer))
4207  return true; /* act as though we got it */
4208 
4210 
4212  LW_EXCLUSIVE);
4213 }
4214 
4215 /*
4216  * LockBufferForCleanup - lock a buffer in preparation for deleting items
4217  *
4218  * Items may be deleted from a disk page only when the caller (a) holds an
4219  * exclusive lock on the buffer and (b) has observed that no other backend
4220  * holds a pin on the buffer. If there is a pin, then the other backend
4221  * might have a pointer into the buffer (for example, a heapscan reference
4222  * to an item --- see README for more details). It's OK if a pin is added
4223  * after the cleanup starts, however; the newly-arrived backend will be
4224  * unable to look at the page until we release the exclusive lock.
4225  *
4226  * To implement this protocol, a would-be deleter must pin the buffer and
4227  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
4228  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
4229  * it has successfully observed pin count = 1.
4230  */
4231 void
4233 {
4234  BufferDesc *bufHdr;
4235  char *new_status = NULL;
4236  TimestampTz waitStart = 0;
4237  bool logged_recovery_conflict = false;
4238 
4240  Assert(PinCountWaitBuf == NULL);
4241 
4242  if (BufferIsLocal(buffer))
4243  {
4244  /* There should be exactly one pin */
4245  if (LocalRefCount[-buffer - 1] != 1)
4246  elog(ERROR, "incorrect local pin count: %d",
4247  LocalRefCount[-buffer - 1]);
4248  /* Nobody else to wait for */
4249  return;
4250  }
4251 
4252  /* There should be exactly one local pin */
4253  if (GetPrivateRefCount(buffer) != 1)
4254  elog(ERROR, "incorrect local pin count: %d",
4256 
4257  bufHdr = GetBufferDescriptor(buffer - 1);
4258 
4259  for (;;)
4260  {
4261  uint32 buf_state;
4262 
4263  /* Try to acquire lock */
4265  buf_state = LockBufHdr(bufHdr);
4266 
4267  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4268  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4269  {
4270  /* Successfully acquired exclusive lock with pincount 1 */
4271  UnlockBufHdr(bufHdr, buf_state);
4272 
4273  /*
4274  * Emit the log message if recovery conflict on buffer pin was
4275  * resolved but the startup process waited longer than
4276  * deadlock_timeout for it.
4277  */
4278  if (logged_recovery_conflict)
4280  waitStart, GetCurrentTimestamp(),
4281  NULL, false);
4282 
4283  /* Report change to non-waiting status */
4284  if (new_status)
4285  {
4286  set_ps_display(new_status);
4287  pfree(new_status);
4288  }
4289  return;
4290  }
4291  /* Failed, so mark myself as waiting for pincount 1 */
4292  if (buf_state & BM_PIN_COUNT_WAITER)
4293  {
4294  UnlockBufHdr(bufHdr, buf_state);
4296  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4297  }
4299  PinCountWaitBuf = bufHdr;
4300  buf_state |= BM_PIN_COUNT_WAITER;
4301  UnlockBufHdr(bufHdr, buf_state);
4303 
4304  /* Wait to be signaled by UnpinBuffer() */
4305  if (InHotStandby)
4306  {
4307  /* Report change to waiting status */
4308  if (update_process_title && new_status == NULL)
4309  {
4310  const char *old_status;
4311  int len;
4312 
4313  old_status = get_ps_display(&len);
4314  new_status = (char *) palloc(len + 8 + 1);
4315  memcpy(new_status, old_status, len);
4316  strcpy(new_status + len, " waiting");
4317  set_ps_display(new_status);
4318  new_status[len] = '\0'; /* truncate off " waiting" */
4319  }
4320 
4321  /*
4322  * Emit the log message if the startup process is waiting longer
4323  * than deadlock_timeout for recovery conflict on buffer pin.
4324  *
4325  * Skip this if first time through because the startup process has
4326  * not started waiting yet in this case. So, the wait start
4327  * timestamp is set after this logic.
4328  */
4329  if (waitStart != 0 && !logged_recovery_conflict)
4330  {
4332 
4333  if (TimestampDifferenceExceeds(waitStart, now,
4334  DeadlockTimeout))
4335  {
4337  waitStart, now, NULL, true);
4338  logged_recovery_conflict = true;
4339  }
4340  }
4341 
4342  /*
4343  * Set the wait start timestamp if logging is enabled and first
4344  * time through.
4345  */
4346  if (log_recovery_conflict_waits && waitStart == 0)
4347  waitStart = GetCurrentTimestamp();
4348 
4349  /* Publish the bufid that Startup process waits on */
4351  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4353  /* Reset the published bufid */
4355  }
4356  else
4358 
4359  /*
4360  * Remove flag marking us as waiter. Normally this will not be set
4361  * anymore, but ProcWaitForSignal() can return for other signals as
4362  * well. We take care to only reset the flag if we're the waiter, as
4363  * theoretically another backend could have started waiting. That's
4364  * impossible with the current usages due to table level locking, but
4365  * better be safe.
4366  */
4367  buf_state = LockBufHdr(bufHdr);
4368  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4370  buf_state &= ~BM_PIN_COUNT_WAITER;
4371  UnlockBufHdr(bufHdr, buf_state);
4372 
4373  PinCountWaitBuf = NULL;
4374  /* Loop back and try again */
4375  }
4376 }
4377 
4378 /*
4379  * Check called from RecoveryConflictInterrupt handler when Startup
4380  * process requests cancellation of all pin holders that are blocking it.
4381  */
4382 bool
4384 {
4385  int bufid = GetStartupBufferPinWaitBufId();
4386 
4387  /*
4388  * If we get woken slowly then it's possible that the Startup process was
4389  * already woken by other backends before we got here. Also possible that
4390  * we get here by multiple interrupts or interrupts at inappropriate
4391  * times, so make sure we do nothing if the bufid is not set.
4392  */
4393  if (bufid < 0)
4394  return false;
4395 
4396  if (GetPrivateRefCount(bufid + 1) > 0)
4397  return true;
4398 
4399  return false;
4400 }
4401 
4402 /*
4403  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
4404  *
4405  * We won't loop, but just check once to see if the pin count is OK. If
4406  * not, return false with no lock held.
4407  */
4408 bool
4410 {
4411  BufferDesc *bufHdr;
4412  uint32 buf_state,
4413  refcount;
4414 
4416 
4417  if (BufferIsLocal(buffer))
4418  {
4419  refcount = LocalRefCount[-buffer - 1];
4420  /* There should be exactly one pin */
4421  Assert(refcount > 0);
4422  if (refcount != 1)
4423  return false;
4424  /* Nobody else to wait for */
4425  return true;
4426  }
4427 
4428  /* There should be exactly one local pin */
4430  Assert(refcount);
4431  if (refcount != 1)
4432  return false;
4433 
4434  /* Try to acquire lock */
4436  return false;
4437 
4438  bufHdr = GetBufferDescriptor(buffer - 1);
4439  buf_state = LockBufHdr(bufHdr);
4440  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4441 
4442  Assert(refcount > 0);
4443  if (refcount == 1)
4444  {
4445  /* Successfully acquired exclusive lock with pincount 1 */
4446  UnlockBufHdr(bufHdr, buf_state);
4447  return true;
4448  }
4449 
4450  /* Failed, so release the lock */
4451  UnlockBufHdr(bufHdr, buf_state);
4453  return false;
4454 }
4455 
4456 /*
4457  * IsBufferCleanupOK - as above, but we already have the lock
4458  *
4459  * Check whether it's OK to perform cleanup on a buffer we've already
4460  * locked. If we observe that the pin count is 1, our exclusive lock
4461  * happens to be a cleanup lock, and we can proceed with anything that
4462  * would have been allowable had we sought a cleanup lock originally.
4463  */
4464 bool
4466 {
4467  BufferDesc *bufHdr;
4468  uint32 buf_state;
4469 
4471 
4472  if (BufferIsLocal(buffer))
4473  {
4474  /* There should be exactly one pin */
4475  if (LocalRefCount[-buffer - 1] != 1)
4476  return false;
4477  /* Nobody else to wait for */
4478  return true;
4479  }
4480 
4481  /* There should be exactly one local pin */
4482  if (GetPrivateRefCount(buffer) != 1)
4483  return false;
4484 
4485  bufHdr = GetBufferDescriptor(buffer - 1);
4486 
4487  /* caller must hold exclusive lock on buffer */
4489  LW_EXCLUSIVE));
4490 
4491  buf_state = LockBufHdr(bufHdr);
4492 
4493  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4494  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4495  {
4496  /* pincount is OK. */
4497  UnlockBufHdr(bufHdr, buf_state);
4498  return true;
4499  }
4500 
4501  UnlockBufHdr(bufHdr, buf_state);
4502  return false;
4503 }
4504 
4505 
4506 /*
4507  * Functions for buffer I/O handling
4508  *
4509  * Note: We assume that nested buffer I/O never occurs.
4510  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
4511  *
4512  * Also note that these are used only for shared buffers, not local ones.
4513  */
4514 
4515 /*
4516  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
4517  */
4518 static void
4520 {
4522 
4524  for (;;)
4525  {
4526  uint32 buf_state;
4527 
4528  /*
4529  * It may not be necessary to acquire the spinlock to check the flag
4530  * here, but since this test is essential for correctness, we'd better
4531  * play it safe.
4532  */
4533  buf_state = LockBufHdr(buf);
4534  UnlockBufHdr(buf, buf_state);
4535 
4536  if (!(buf_state & BM_IO_IN_PROGRESS))
4537  break;
4539  }
4541 }
4542 
4543 /*
4544  * StartBufferIO: begin I/O on this buffer
4545  * (Assumptions)
4546  * My process is executing no IO
4547  * The buffer is Pinned
4548  *
4549  * In some scenarios there are race conditions in which multiple backends
4550  * could attempt the same I/O operation concurrently. If someone else
4551  * has already started I/O on this buffer then we will block on the
4552  * I/O condition variable until he's done.
4553  *
4554  * Input operations are only attempted on buffers that are not BM_VALID,
4555  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
4556  * so we can always tell if the work is already done.
4557  *
4558  * Returns true if we successfully marked the buffer as I/O busy,
4559  * false if someone else already did the work.
4560  */
4561 static bool
4562 StartBufferIO(BufferDesc *buf, bool forInput)
4563 {
4564  uint32 buf_state;
4565 
4567 
4568  for (;;)
4569  {
4570  buf_state = LockBufHdr(buf);
4571 
4572  if (!(buf_state & BM_IO_IN_PROGRESS))
4573  break;
4574  UnlockBufHdr(buf, buf_state);
4575  WaitIO(buf);
4576  }
4577 
4578  /* Once we get here, there is definitely no I/O active on this buffer */
4579 
4580  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
4581  {
4582  /* someone else already did the I/O */
4583  UnlockBufHdr(buf, buf_state);
4584  return false;
4585  }
4586 
4587  buf_state |= BM_IO_IN_PROGRESS;
4588  UnlockBufHdr(buf, buf_state);
4589 
4590  InProgressBuf = buf;
4591  IsForInput = forInput;
4592 
4593  return true;
4594 }
4595 
4596 /*
4597  * TerminateBufferIO: release a buffer we were doing I/O on
4598  * (Assumptions)
4599  * My process is executing IO for the buffer
4600  * BM_IO_IN_PROGRESS bit is set for the buffer
4601  * The buffer is Pinned
4602  *
4603  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
4604  * buffer's BM_DIRTY flag. This is appropriate when terminating a
4605  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
4606  * marking the buffer clean if it was re-dirtied while we were writing.
4607  *
4608  * set_flag_bits gets ORed into the buffer's flags. It must include
4609  * BM_IO_ERROR in a failure case. For successful completion it could
4610  * be 0, or BM_VALID if we just finished reading in the page.
4611  */
4612 static void
4613 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
4614 {
4615  uint32 buf_state;
4616 
4617  Assert(buf == InProgressBuf);
4618 
4619  buf_state = LockBufHdr(buf);
4620 
4621  Assert(buf_state & BM_IO_IN_PROGRESS);
4622 
4623  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
4624  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
4625  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
4626 
4627  buf_state |= set_flag_bits;
4628  UnlockBufHdr(buf, buf_state);
4629 
4630  InProgressBuf = NULL;
4631 
4633 }
4634 
4635 /*
4636  * AbortBufferIO: Clean up any active buffer I/O after an error.
4637  *
4638  * All LWLocks we might have held have been released,
4639  * but we haven't yet released buffer pins, so the buffer is still pinned.
4640  *
4641  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
4642  * possible the error condition wasn't related to the I/O.
4643  */
4644 void
4646 {
4648 
4649  if (buf)
4650  {
4651  uint32 buf_state;
4652 
4653  buf_state = LockBufHdr(buf);
4654  Assert(buf_state & BM_IO_IN_PROGRESS);
4655  if (IsForInput)
4656  {
4657  Assert(!(buf_state & BM_DIRTY));
4658 
4659  /* We'd better not think buffer is valid yet */
4660  Assert(!(buf_state & BM_VALID));
4661  UnlockBufHdr(buf, buf_state);
4662  }
4663  else
4664  {
4665  Assert(buf_state & BM_DIRTY);
4666  UnlockBufHdr(buf, buf_state);
4667  /* Issue notice if this is not the first failure... */
4668  if (buf_state & BM_IO_ERROR)
4669  {
4670  /* Buffer is pinned, so we can read tag without spinlock */
4671  char *path;
4672 
4673  path = relpathperm(BufTagGetRelFileLocator(&buf->tag),
4674  BufTagGetForkNum(&buf->tag));
4675  ereport(WARNING,
4676  (errcode(ERRCODE_IO_ERROR),
4677  errmsg("could not write block %u of %s",
4678  buf->tag.blockNum, path),
4679  errdetail("Multiple failures --- write error might be permanent.")));
4680  pfree(path);
4681  }
4682  }
4684  }
4685 }
4686 
4687 /*
4688  * Error context callback for errors occurring during shared buffer writes.
4689  */
4690 static void
4692 {
4693  BufferDesc *bufHdr = (BufferDesc *) arg;
4694 
4695  /* Buffer is pinned, so we can read the tag without locking the spinlock */
4696  if (bufHdr != NULL)
4697  {
4698  char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
4699  BufTagGetForkNum(&bufHdr->tag));
4700 
4701  errcontext("writing block %u of relation %s",
4702  bufHdr->tag.blockNum, path);
4703  pfree(path);
4704  }
4705 }
4706 
4707 /*
4708  * Error context callback for errors occurring during local buffer writes.
4709  */
4710 static void
4712 {
4713  BufferDesc *bufHdr = (BufferDesc *) arg;
4714 
4715  if (bufHdr != NULL)
4716  {
4717  char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
4718  MyBackendId,
4719  BufTagGetForkNum(&bufHdr->tag));
4720 
4721  errcontext("writing block %u of relation %s",
4722  bufHdr->tag.blockNum, path);
4723  pfree(path);
4724  }
4725 }
4726 
4727 /*
4728  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
4729  */
4730 static int
4731 rlocator_comparator(const void *p1, const void *p2)
4732 {
4733  RelFileLocator n1 = *(const RelFileLocator *) p1;
4734  RelFileLocator n2 = *(const RelFileLocator *) p2;
4735 
4736  if (n1.relNumber < n2.relNumber)
4737  return -1;
4738  else if (n1.relNumber > n2.relNumber)
4739  return 1;
4740 
4741  if (n1.dbOid < n2.dbOid)
4742  return -1;
4743  else if (n1.dbOid > n2.dbOid)
4744  return 1;
4745 
4746  if (n1.spcOid < n2.spcOid)
4747  return -1;
4748  else if (n1.spcOid > n2.spcOid)
4749  return 1;
4750  else
4751  return 0;
4752 }
4753 
4754 /*
4755  * Lock buffer header - set BM_LOCKED in buffer state.
4756  */
4757 uint32
4759 {
4760  SpinDelayStatus delayStatus;
4761  uint32 old_buf_state;
4762 
4763  init_local_spin_delay(&delayStatus);
4764 
4765  while (true)
4766  {
4767  /* set BM_LOCKED flag */
4768  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
4769  /* if it wasn't set before we're OK */
4770  if (!(old_buf_state & BM_LOCKED))
4771  break;
4772  perform_spin_delay(&delayStatus);
4773  }
4774  finish_spin_delay(&delayStatus);
4775  return old_buf_state | BM_LOCKED;
4776 }
4777 
4778 /*
4779  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
4780  * state at that point.
4781  *
4782  * Obviously the buffer could be locked by the time the value is returned, so
4783  * this is primarily useful in CAS style loops.
4784  */
4785 static uint32
4787 {
4788  SpinDelayStatus delayStatus;
4789  uint32 buf_state;
4790 
4791  init_local_spin_delay(&delayStatus);
4792 
4793  buf_state = pg_atomic_read_u32(&buf->state);
4794 
4795  while (buf_state & BM_LOCKED)
4796  {
4797  perform_spin_delay(&delayStatus);
4798  buf_state = pg_atomic_read_u32(&buf->state);
4799  }
4800 
4801  finish_spin_delay(&delayStatus);
4802 
4803  return buf_state;
4804 }
4805 
4806 /*
4807  * BufferTag comparator.
4808  */
4809 static inline int
4811 {
4812  int ret;
4813  RelFileLocator rlocatora;
4814  RelFileLocator rlocatorb;
4815 
4816  rlocatora = BufTagGetRelFileLocator(ba);
4817  rlocatorb = BufTagGetRelFileLocator(bb);
4818 
4819  ret = rlocator_comparator(&rlocatora, &rlocatorb);
4820 
4821  if (ret != 0)
4822  return ret;
4823 
4824  if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
4825  return -1;
4826  if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
4827  return 1;
4828 
4829  if (ba->blockNum < bb->blockNum)
4830  return -1;
4831  if (ba->blockNum > bb->blockNum)
4832  return 1;
4833 
4834  return 0;
4835 }
4836 
4837 /*
4838  * Comparator determining the writeout order in a checkpoint.
4839  *
4840  * It is important that tablespaces are compared first, the logic balancing
4841  * writes between tablespaces relies on it.
4842  */
4843 static inline int
4845 {
4846  /* compare tablespace */
4847  if (a->tsId < b->tsId)
4848  return -1;
4849  else if (a->tsId > b->tsId)
4850  return 1;
4851  /* compare relation */
4852  if (a->relNumber < b->relNumber)
4853  return -1;
4854  else if (a->relNumber > b->relNumber)
4855  return 1;
4856  /* compare fork */
4857  else if (a->forkNum < b->forkNum)
4858  return -1;
4859  else if (a->forkNum > b->forkNum)
4860  return 1;
4861  /* compare block number */
4862  else if (a->blockNum < b->blockNum)
4863  return -1;
4864  else if (a->blockNum > b->blockNum)
4865  return 1;
4866  /* equal page IDs are unlikely, but not impossible */
4867  return 0;
4868 }
4869 
4870 /*
4871  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
4872  * progress.
4873  */
4874 static int
4876 {
4877  CkptTsStatus *sa = (CkptTsStatus *) a;
4878  CkptTsStatus *sb = (CkptTsStatus *) b;
4879 
4880  /* we want a min-heap, so return 1 for the a < b */
4881  if (sa->progress < sb->progress)
4882  return 1;
4883  else if (sa->progress == sb->progress)
4884  return 0;
4885  else
4886  return -1;
4887 }
4888 
4889 /*
4890  * Initialize a writeback context, discarding potential previous state.
4891  *
4892  * *max_pending is a pointer instead of an immediate value, so the coalesce
4893  * limits can easily changed by the GUC mechanism, and so calling code does
4894  * not have to check the current configuration. A value of 0 means that no
4895  * writeback control will be performed.
4896  */
4897 void
4898 WritebackContextInit(WritebackContext *context, int *max_pending)
4899 {
4900  Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
4901 
4902  context->max_pending = max_pending;
4903  context->nr_pending = 0;
4904 }
4905 
4906 /*
4907  * Add buffer to list of pending writeback requests.
4908  */
4909 void
4911 {
4912  PendingWriteback *pending;
4913 
4914  /*
4915  * Add buffer to the pending writeback array, unless writeback control is
4916  * disabled.
4917  */
4918  if (*context->max_pending > 0)
4919  {
4921 
4922  pending = &context->pending_writebacks[context->nr_pending++];
4923 
4924  pending->tag = *tag;
4925  }
4926 
4927  /*
4928  * Perform pending flushes if the writeback limit is exceeded. This
4929  * includes the case where previously an item has been added, but control
4930  * is now disabled.
4931  */
4932  if (context->nr_pending >= *context->max_pending)
4933  IssuePendingWritebacks(context);
4934 }
4935 
4936 #define ST_SORT sort_pending_writebacks
4937 #define ST_ELEMENT_TYPE PendingWriteback
4938 #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
4939 #define ST_SCOPE static
4940 #define ST_DEFINE
4941 #include <lib/sort_template.h>
4942 
4943 /*
4944  * Issue all pending writeback requests, previously scheduled with
4945  * ScheduleBufferTagForWriteback, to the OS.
4946  *
4947  * Because this is only used to improve the OSs IO scheduling we try to never
4948  * error out - it's just a hint.
4949  */
4950 void
4952 {
4953  int i;
4954 
4955  if (context->nr_pending == 0)
4956  return;
4957 
4958  /*
4959  * Executing the writes in-order can make them a lot faster, and allows to
4960  * merge writeback requests to consecutive blocks into larger writebacks.
4961  */
4962  sort_pending_writebacks(context->pending_writebacks, context->nr_pending);
4963 
4964  /*
4965  * Coalesce neighbouring writes, but nothing else. For that we iterate
4966  * through the, now sorted, array of pending flushes, and look forward to
4967  * find all neighbouring (or identical) writes.
4968  */
4969  for (i = 0; i < context->nr_pending; i++)
4970  {
4973  SMgrRelation reln;
4974  int ahead;
4975  BufferTag tag;
4976  RelFileLocator currlocator;
4977  Size nblocks = 1;
4978 
4979  cur = &context->pending_writebacks[i];
4980  tag = cur->tag;
4981  currlocator = BufTagGetRelFileLocator(&tag);
4982 
4983  /*
4984  * Peek ahead, into following writeback requests, to see if they can
4985  * be combined with the current one.
4986  */
4987  for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
4988  {
4989 
4990  next = &context->pending_writebacks[i + ahead + 1];
4991 
4992  /* different file, stop */
4993  if (!RelFileLocatorEquals(currlocator,
4994  BufTagGetRelFileLocator(&next->tag)) ||
4995  BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
4996  break;
4997 
4998  /* ok, block queued twice, skip */
4999  if (cur->tag.blockNum == next->tag.blockNum)
5000  continue;
5001 
5002  /* only merge consecutive writes */
5003  if (cur->tag.blockNum + 1 != next->tag.blockNum)
5004  break;
5005 
5006  nblocks++;
5007  cur = next;
5008  }
5009 
5010  i += ahead;
5011 
5012  /* and finally tell the kernel to write the data to storage */
5013  reln = smgropen(currlocator, InvalidBackendId);
5014  smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
5015  }
5016 
5017  context->nr_pending = 0;
5018 }
5019 
5020 
5021 /*
5022  * Implement slower/larger portions of TestForOldSnapshot
5023  *
5024  * Smaller/faster portions are put inline, but the entire set of logic is too
5025  * big for that.
5026  */
5027 void
5029 {
5030  if (RelationAllowsEarlyPruning(relation)
5031  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
5032  ereport(ERROR,
5033  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
5034  errmsg("snapshot too old")));
5035 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:367
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1705
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1574
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1538
int BackendId
Definition: backendid.h:21
#define InvalidBackendId
Definition: backendid.h:23
int BgWriterDelay
Definition: bgwriter.c:61
void binaryheap_build(binaryheap *heap)
Definition: binaryheap.c:125
void binaryheap_add_unordered(binaryheap *heap, Datum d)
Definition: binaryheap.c:109
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:32
Datum binaryheap_remove_first(binaryheap *heap)
Definition: binaryheap.c:173
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:68
void binaryheap_replace_first(binaryheap *heap, Datum d)
Definition: binaryheap.c:207
Datum binaryheap_first(binaryheap *heap)
Definition: binaryheap.c:158
#define binaryheap_empty(h)
Definition: binaryheap.h:52
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
static int32 next
Definition: blutils.c:219
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:76
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:61
#define BM_PERMANENT
Definition: buf_internals.h:67
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:43
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:41
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static RelFileNumber BufTagGetRelNumber(const BufferTag *tag)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
static LWLock * BufMappingPartitionLock(uint32 hashcode)
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65
#define BM_DIRTY
Definition: buf_internals.h:59
#define BM_LOCKED
Definition: buf_internals.h:58
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:60
#define BM_IO_ERROR
Definition: buf_internals.h:63
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
#define BM_CHECKPOINT_NEEDED
Definition: buf_internals.h:66
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:149
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:91
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition: buf_table.c:79
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition: buf_table.c:119
bool track_io_timing
Definition: bufmgr.c:137
void FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
Definition: bufmgr.c:3611
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:3972
void DropDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3416
static int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
Definition: bufmgr.c:4844
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2766
static PrivateRefCountEntry * NewPrivateRefCountEntry(Buffer buffer)
Definition: bufmgr.c:283
void DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition: bufmgr.c:3061
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1646
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:592
static uint32 PrivateRefCountClock
Definition: bufmgr.c:202
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1704
bool zero_damaged_pages
Definition: bufmgr.c:134
#define BUF_DROP_FULL_SCAN_THRESHOLD
Definition: bufmgr.c:81
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1807
void BufmgrCommit(void)
Definition: bufmgr.c:2752
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4786
static int buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
Definition: bufmgr.c:4810
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:66
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:4465
#define BufferGetLSN(bufHdr)
Definition: bufmgr.c:63
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:2597
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2658
void CreateAndCopyRelationData(RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
Definition: bufmgr.c:3802
void DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
Definition: bufmgr.c:3184
static int rlocator_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4731
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:5028
struct SMgrSortArray SMgrSortArray
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2639
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
Definition: bufmgr.c:4875
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2787
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:62
#define BUF_REUSABLE
Definition: bufmgr.c:71
void ScheduleBufferTagForWriteback(WritebackContext *context, BufferTag *tag)
Definition: bufmgr.c:4910
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4711
static void BufferSync(int flags)
Definition: bufmgr.c:1951
static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool *foundPtr)
Definition: bufmgr.c:1119
static BufferDesc * InProgressBuf
Definition: bufmgr.c:163
void CheckPointBuffers(int flags)
Definition: bufmgr.c:2742
bool BgBufferSync(WritebackContext *wb_context)
Definition: bufmgr.c:2227
bool BufferIsPermanent(Buffer buffer)
Definition: bufmgr.c:2985
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:90
void UnlockBuffers(void)
Definition: bufmgr.c:4147
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:505
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4201
static bool StartBufferIO(BufferDesc *buf, bool forInput)
Definition: bufmgr.c:4562
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2953
int bgwriter_flush_after
Definition: bufmgr.c:159
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3934
static void FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber nForkBlock, BlockNumber firstDelBlock)
Definition: bufmgr.c:3355
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3015
bool HoldingBufferPinThatDelaysRecovery(void)
Definition: bufmgr.c:4383
int checkpoint_flush_after
Definition: bufmgr.c:158
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3957
static void shared_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4691
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4898
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1583
void InitBufferPoolAccess(void)
Definition: bufmgr.c:2614
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:450
double bgwriter_lru_multiplier
Definition: bufmgr.c:136
void AbortBufferIO(void)
Definition: bufmgr.c:4645
int backend_flush_after
Definition: bufmgr.c:160
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:217
static bool IsForInput
Definition: bufmgr.c:164
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:167
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:2698
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:389
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4232
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4175
static PrivateRefCountEntry * ReservedRefCountEntry
Definition: bufmgr.c:203
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:4004
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3522
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
Definition: bufmgr.c:412
Buffer ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
Definition: bufmgr.c:799
bool ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:623
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:73
int maintenance_io_concurrency
Definition: bufmgr.c:152
static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:819
void FlushDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3871
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1485
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:3712
int effective_io_concurrency
Definition: bufmgr.c:145
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:309
struct PrivateRefCountEntry PrivateRefCountEntry
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2826
struct CkptTsStatus CkptTsStatus
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:759
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4758
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:199
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4613
void IssuePendingWritebacks(WritebackContext *context)
Definition: bufmgr.c:4951
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2530
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:712
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:200
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:201
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4409
int bgwriter_lru_maxpages
Definition: bufmgr.c:135
static void WaitIO(BufferDesc *buf)
Definition: bufmgr.c:4519
static void UnpinBuffer(BufferDesc *buf, bool fixOwner)
Definition: bufmgr.c:1852
#define BUF_WRITTEN
Definition: bufmgr.c:70
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3914
@ BAS_BULKREAD
Definition: bufmgr.h:30
@ BAS_BULKWRITE
Definition: bufmgr.h:32
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define P_NEW
Definition: bufmgr.h:91
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:271
void * Block
Definition: bufmgr.h:24
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
ReadBufferMode
Definition: bufmgr.h:38
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:44
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:42
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:40
@ RBM_NORMAL
Definition: bufmgr.h:39
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:45
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:219
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1539
char * PageSetChecksumCopy(Page page, BlockNumber blkno)
Definition: bufpage.c:1510
Pointer Page
Definition: bufpage.h:78
#define PIV_LOG_WARNING
Definition: bufpage.h:465
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
#define PIV_REPORT_STAT
Definition: bufpage.h:466
unsigned int uint32
Definition: c.h:442
signed int int32
Definition: c.h:430
double float8
Definition: c.h:566
#define MemSet(start, val, len)
Definition: c.h:998
size_t Size
Definition: c.h:541
void CheckpointWriteDelay(int flags, double progress)
Definition: checkpointer.c:697
void ConditionVariableBroadcast(ConditionVariable *cv)
void ConditionVariablePrepareToSleep(ConditionVariable *cv)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
void ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
struct cursor * cur
Definition: ecpg.c:28
int errdetail(const char *fmt,...)
Definition: elog.c:1039
ErrorContextCallback * error_context_stack
Definition: elog.c:94
int errhint(const char *fmt,...)
Definition: elog.c:1153
int errcode(int sqlerrcode)
Definition: elog.c:695
int errmsg(const char *fmt,...)
Definition: elog.c:906
#define LOG
Definition: elog.h:27
#define errcontext
Definition: elog.h:192
#define WARNING
Definition: elog.h:32
#define DEBUG2
Definition: elog.h:25
#define DEBUG1
Definition: elog.h:26
#define ERROR
Definition: elog.h:35
#define ereport(elevel,...)
Definition: elog.h:145
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:541
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:201
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:685
void StrategyFreeBuffer(BufferDesc *buf)
Definition: freelist.c:363
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:37
int64 VacuumPageHit
Definition: globals.c:148
int NBuffers
Definition: globals.c:136
int VacuumCostPageMiss
Definition: globals.c:143
int64 VacuumPageMiss
Definition: globals.c:149
bool VacuumCostActive
Definition: globals.c:153
int64 VacuumPageDirty
Definition: globals.c:150
int VacuumCostBalance
Definition: globals.c:152
BackendId MyBackendId
Definition: globals.c:85
int VacuumCostPageDirty
Definition: globals.c:144
int VacuumCostPageHit
Definition: globals.c:142
#define free(a)
Definition: header.h:65
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:89
#define INSTR_TIME_ADD(x, y)
Definition: instr_time.h:91
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:103
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:138
struct timespec instr_time
Definition: instr_time.h:83
BufferUsage pgBufferUsage
Definition: instrument.c:20
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int j
Definition: isn.c:74
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
int32 * LocalRefCount
Definition: localbuf.c:45
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:109
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:598
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:326
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:609
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:286
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:375
int NLocBuffer
Definition: localbuf.c:41
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1916
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1196
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1960
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1800
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1367
@ LW_SHARED
Definition: lwlock.h:105
@ LW_EXCLUSIVE
Definition: lwlock.h:104
void pfree(void *pointer)
Definition: mcxt.c:1252
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1267
void * palloc(Size size)
Definition: mcxt.c:1145
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
void * arg
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define WRITEBACK_MAX_PENDING_FLUSHES
const void size_t len
static char * buf
Definition: pg_test_fsync.c:67
#define pgstat_count_buffer_read_time(n)
Definition: pgstat.h:468
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:550
#define pgstat_count_buffer_write_time(n)
Definition: pgstat.h:470
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:555
PgStat_BgWriterStats PendingBgWriterStats
PgStat_CheckpointerStats PendingCheckpointerStats
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:670
uintptr_t Datum
Definition: postgres.h:412
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:660
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define DELAY_CHKPT_START
Definition: proc.h:119
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:467
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:44
const char * get_ps_display(int *displen)
Definition: ps_status.c:413
bool update_process_title
Definition: ps_status.c:33
void set_ps_display(const char *activity)
Definition: ps_status.c:341
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:569
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:636
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:657
#define RelationIsValid(relation)
Definition: rel.h:474
#define RelFileLocatorBackendIsTemp(rlocator)
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition: relpath.h:48
@ MAIN_FORKNUM
Definition: relpath.h:50
@ INIT_FORKNUM
Definition: relpath.h:53
#define MAX_FORKNUM
Definition: relpath.h:62
#define relpath(rlocator, forknum)
Definition: relpath.h:94
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85
#define relpathperm(rlocator, forknum)
Definition: relpath.h:90
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:972
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:950
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:963
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:125
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:175
#define init_local_spin_delay(status)
Definition: s_lock.h:842
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:579
void smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition: smgr.c:567
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:554
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:493
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:518
void smgrcloserellocator(RelFileLocatorBackend rlocator)
Definition: smgr.c:346
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:369
BlockNumber smgrnblocks_cached(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:603
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:146
void smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: smgr.c:532
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
#define SmgrIsTemp(smgr)
Definition: smgr.h:77
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1705
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
PGPROC * MyProc
Definition: proc.c:68
void ProcSendSignal(int pgprocno)
Definition: proc.c:1885
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:645
int DeadlockTimeout
Definition: proc.c:60
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:633
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1873
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:755
bool log_recovery_conflict_waits
Definition: standby.c:43
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:251
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:550
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:120
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:185
int wait_backend_pgprocno
BufferTag tag
pg_atomic_uint32 state
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 local_blks_written
Definition: instrument.h:33
instr_time blk_write_time
Definition: instrument.h:37
int64 shared_blks_read
Definition: instrument.h:27
int64 shared_blks_written
Definition: instrument.h:29
instr_time blk_read_time
Definition: instrument.h:36
int64 local_blks_read
Definition: instrument.h:31
int64 shared_blks_hit
Definition: instrument.h:26
int ckpt_bufs_written
Definition: xlog.h:163
ForkNumber forkNum
RelFileNumber relNumber
BlockNumber blockNum
float8 progress_slice
Definition: bufmgr.c:109
int index
Definition: bufmgr.c:117
int num_scanned
Definition: bufmgr.c:114
float8 progress
Definition: bufmgr.c:108
int num_to_scan
Definition: bufmgr.c:112
Oid tsId
Definition: bufmgr.c:99
struct ErrorContextCallback * previous
Definition: elog.h:234
void(* callback)(void *arg)
Definition: elog.h:235
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76