PostgreSQL Source Code  git master
bufmgr.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * bufmgr.c
4  * buffer manager interface routines
5  *
6  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/buffer/bufmgr.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 /*
16  * Principal entry points:
17  *
18  * ReadBuffer() -- find or create a buffer holding the requested page,
19  * and pin it so that no one can destroy it while this process
20  * is using it.
21  *
22  * ReleaseBuffer() -- unpin a buffer
23  *
24  * MarkBufferDirty() -- mark a pinned buffer's contents as "dirty".
25  * The disk write is delayed until buffer replacement or checkpoint.
26  *
27  * See also these files:
28  * freelist.c -- chooses victim for buffer replacement
29  * buf_table.c -- manages the buffer lookup table
30  */
31 #include "postgres.h"
32 
33 #include <sys/file.h>
34 #include <unistd.h>
35 
36 #include "access/tableam.h"
37 #include "access/xloginsert.h"
38 #include "access/xlogutils.h"
39 #include "catalog/catalog.h"
40 #include "catalog/storage.h"
41 #include "catalog/storage_xlog.h"
42 #include "executor/instrument.h"
43 #include "lib/binaryheap.h"
44 #include "miscadmin.h"
45 #include "pg_trace.h"
46 #include "pgstat.h"
47 #include "postmaster/bgwriter.h"
48 #include "storage/buf_internals.h"
49 #include "storage/bufmgr.h"
50 #include "storage/ipc.h"
51 #include "storage/proc.h"
52 #include "storage/smgr.h"
53 #include "storage/standby.h"
54 #include "utils/memdebug.h"
55 #include "utils/ps_status.h"
56 #include "utils/rel.h"
57 #include "utils/resowner_private.h"
58 #include "utils/timestamp.h"
59 
60 
61 /* Note: these two macros only work on shared buffers, not local ones! */
62 #define BufHdrGetBlock(bufHdr) ((Block) (BufferBlocks + ((Size) (bufHdr)->buf_id) * BLCKSZ))
63 #define BufferGetLSN(bufHdr) (PageGetLSN(BufHdrGetBlock(bufHdr)))
64 
65 /* Note: this macro only works on local buffers, not shared ones! */
66 #define LocalBufHdrGetBlock(bufHdr) \
67  LocalBufferBlockPointers[-((bufHdr)->buf_id + 2)]
68 
69 /* Bits in SyncOneBuffer's return value */
70 #define BUF_WRITTEN 0x01
71 #define BUF_REUSABLE 0x02
72 
73 #define RELS_BSEARCH_THRESHOLD 20
74 
75 /*
76  * This is the size (in the number of blocks) above which we scan the
77  * entire buffer pool to remove the buffers for all the pages of relation
78  * being dropped. For the relations with size below this threshold, we find
79  * the buffers by doing lookups in BufMapping table.
80  */
81 #define BUF_DROP_FULL_SCAN_THRESHOLD (uint64) (NBuffers / 32)
82 
83 typedef struct PrivateRefCountEntry
84 {
88 
89 /* 64 bytes, about the size of a cache line on common systems */
90 #define REFCOUNT_ARRAY_ENTRIES 8
91 
92 /*
93  * Status of buffers to checkpoint for a particular tablespace, used
94  * internally in BufferSync.
95  */
96 typedef struct CkptTsStatus
97 {
98  /* oid of the tablespace */
100 
101  /*
102  * Checkpoint progress for this tablespace. To make progress comparable
103  * between tablespaces the progress is, for each tablespace, measured as a
104  * number between 0 and the total number of to-be-checkpointed pages. Each
105  * page checkpointed in this tablespace increments this space's progress
106  * by progress_slice.
107  */
110 
111  /* number of to-be checkpointed pages in this tablespace */
113  /* already processed pages in this tablespace */
115 
116  /* current offset in CkptBufferIds for this tablespace */
117  int index;
119 
120 /*
121  * Type for array used to sort SMgrRelations
122  *
123  * FlushRelationsAllBuffers shares the same comparator function with
124  * DropRelationsAllBuffers. Pointer to this struct and RelFileLocator must be
125  * compatible.
126  */
127 typedef struct SMgrSortArray
128 {
129  RelFileLocator rlocator; /* This must be the first member */
132 
133 /* GUC variables */
134 bool zero_damaged_pages = false;
137 bool track_io_timing = false;
138 
139 /*
140  * How many buffers PrefetchBuffer callers should try to stay ahead of their
141  * ReadBuffer calls by. Zero means "never prefetch". This value is only used
142  * for buffers not belonging to tablespaces that have their
143  * effective_io_concurrency parameter set.
144  */
146 
147 /*
148  * Like effective_io_concurrency, but used by maintenance code paths that might
149  * benefit from a higher setting because they work on behalf of many sessions.
150  * Overridden by the tablespace setting of the same name.
151  */
153 
154 /*
155  * GUC variables about triggering kernel writeback for buffers written; OS
156  * dependent defaults are set via the GUC mechanism.
157  */
161 
162 /* local state for StartBufferIO and related functions */
163 static BufferDesc *InProgressBuf = NULL;
164 static bool IsForInput;
165 
166 /* local state for LockBufferForCleanup */
168 
169 /*
170  * Backend-Private refcount management:
171  *
172  * Each buffer also has a private refcount that keeps track of the number of
173  * times the buffer is pinned in the current process. This is so that the
174  * shared refcount needs to be modified only once if a buffer is pinned more
175  * than once by an individual backend. It's also used to check that no buffers
176  * are still pinned at the end of transactions and when exiting.
177  *
178  *
179  * To avoid - as we used to - requiring an array with NBuffers entries to keep
180  * track of local buffers, we use a small sequentially searched array
181  * (PrivateRefCountArray) and an overflow hash table (PrivateRefCountHash) to
182  * keep track of backend local pins.
183  *
184  * Until no more than REFCOUNT_ARRAY_ENTRIES buffers are pinned at once, all
185  * refcounts are kept track of in the array; after that, new array entries
186  * displace old ones into the hash table. That way a frequently used entry
187  * can't get "stuck" in the hashtable while infrequent ones clog the array.
188  *
189  * Note that in most scenarios the number of pinned buffers will not exceed
190  * REFCOUNT_ARRAY_ENTRIES.
191  *
192  *
193  * To enter a buffer into the refcount tracking mechanism first reserve a free
194  * entry using ReservePrivateRefCountEntry() and then later, if necessary,
195  * fill it with NewPrivateRefCountEntry(). That split lets us avoid doing
196  * memory allocations in NewPrivateRefCountEntry() which can be important
197  * because in some scenarios it's called with a spinlock held...
198  */
200 static HTAB *PrivateRefCountHash = NULL;
204 
205 static void ReservePrivateRefCountEntry(void);
208 static inline int32 GetPrivateRefCount(Buffer buffer);
210 
211 /*
212  * Ensure that the PrivateRefCountArray has sufficient space to store one more
213  * entry. This has to be called before using NewPrivateRefCountEntry() to fill
214  * a new entry - but it's perfectly fine to not use a reserved entry.
215  */
216 static void
218 {
219  /* Already reserved (or freed), nothing to do */
220  if (ReservedRefCountEntry != NULL)
221  return;
222 
223  /*
224  * First search for a free entry the array, that'll be sufficient in the
225  * majority of cases.
226  */
227  {
228  int i;
229 
230  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
231  {
233 
235 
236  if (res->buffer == InvalidBuffer)
237  {
239  return;
240  }
241  }
242  }
243 
244  /*
245  * No luck. All array entries are full. Move one array entry into the hash
246  * table.
247  */
248  {
249  /*
250  * Move entry from the current clock position in the array into the
251  * hashtable. Use that slot.
252  */
253  PrivateRefCountEntry *hashent;
254  bool found;
255 
256  /* select victim slot */
259 
260  /* Better be used, otherwise we shouldn't get here. */
262 
263  /* enter victim array entry into hashtable */
266  HASH_ENTER,
267  &found);
268  Assert(!found);
270 
271  /* clear the now free array slot */
274 
276  }
277 }
278 
279 /*
280  * Fill a previously reserved refcount entry.
281  */
282 static PrivateRefCountEntry *
284 {
286 
287  /* only allowed to be called when a reservation has been made */
288  Assert(ReservedRefCountEntry != NULL);
289 
290  /* use up the reserved entry */
292  ReservedRefCountEntry = NULL;
293 
294  /* and fill it */
295  res->buffer = buffer;
296  res->refcount = 0;
297 
298  return res;
299 }
300 
301 /*
302  * Return the PrivateRefCount entry for the passed buffer.
303  *
304  * Returns NULL if a buffer doesn't have a refcount entry. Otherwise, if
305  * do_move is true, and the entry resides in the hashtable the entry is
306  * optimized for frequent access by moving it to the array.
307  */
308 static PrivateRefCountEntry *
310 {
312  int i;
313 
316 
317  /*
318  * First search for references in the array, that'll be sufficient in the
319  * majority of cases.
320  */
321  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
322  {
324 
325  if (res->buffer == buffer)
326  return res;
327  }
328 
329  /*
330  * By here we know that the buffer, if already pinned, isn't residing in
331  * the array.
332  *
333  * Only look up the buffer in the hashtable if we've previously overflowed
334  * into it.
335  */
336  if (PrivateRefCountOverflowed == 0)
337  return NULL;
338 
340 
341  if (res == NULL)
342  return NULL;
343  else if (!do_move)
344  {
345  /* caller doesn't want us to move the hash entry into the array */
346  return res;
347  }
348  else
349  {
350  /* move buffer from hashtable into the free array slot */
351  bool found;
353 
354  /* Ensure there's a free array slot */
356 
357  /* Use up the reserved slot */
358  Assert(ReservedRefCountEntry != NULL);
360  ReservedRefCountEntry = NULL;
361  Assert(free->buffer == InvalidBuffer);
362 
363  /* and fill it */
364  free->buffer = buffer;
365  free->refcount = res->refcount;
366 
367  /* delete from hashtable */
369  Assert(found);
372 
373  return free;
374  }
375 }
376 
377 /*
378  * Returns how many times the passed buffer is pinned by this backend.
379  *
380  * Only works for shared memory buffers!
381  */
382 static inline int32
384 {
386 
389 
390  /*
391  * Not moving the entry - that's ok for the current users, but we might
392  * want to change this one day.
393  */
394  ref = GetPrivateRefCountEntry(buffer, false);
395 
396  if (ref == NULL)
397  return 0;
398  return ref->refcount;
399 }
400 
401 /*
402  * Release resources used to track the reference count of a buffer which we no
403  * longer have pinned and don't want to pin again immediately.
404  */
405 static void
407 {
408  Assert(ref->refcount == 0);
409 
410  if (ref >= &PrivateRefCountArray[0] &&
412  {
413  ref->buffer = InvalidBuffer;
414 
415  /*
416  * Mark the just used entry as reserved - in many scenarios that
417  * allows us to avoid ever having to search the array/hash for free
418  * entries.
419  */
420  ReservedRefCountEntry = ref;
421  }
422  else
423  {
424  bool found;
425  Buffer buffer = ref->buffer;
426 
428  Assert(found);
431  }
432 }
433 
434 /*
435  * BufferIsPinned
436  * True iff the buffer is pinned (also checks for valid buffer number).
437  *
438  * NOTE: what we check here is that *this* backend holds a pin on
439  * the buffer. We do not care whether some other backend does.
440  */
441 #define BufferIsPinned(bufnum) \
442 ( \
443  !BufferIsValid(bufnum) ? \
444  false \
445  : \
446  BufferIsLocal(bufnum) ? \
447  (LocalRefCount[-(bufnum) - 1] > 0) \
448  : \
449  (GetPrivateRefCount(bufnum) > 0) \
450 )
451 
452 
453 static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence,
454  ForkNumber forkNum, BlockNumber blockNum,
456  bool *hit);
457 static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy);
458 static void PinBuffer_Locked(BufferDesc *buf);
459 static void UnpinBuffer(BufferDesc *buf);
460 static void BufferSync(int flags);
462 static int SyncOneBuffer(int buf_id, bool skip_recently_used,
463  WritebackContext *wb_context);
464 static void WaitIO(BufferDesc *buf);
465 static bool StartBufferIO(BufferDesc *buf, bool forInput);
466 static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty,
467  uint32 set_flag_bits);
468 static void shared_buffer_write_error_callback(void *arg);
469 static void local_buffer_write_error_callback(void *arg);
470 static BufferDesc *BufferAlloc(SMgrRelation smgr,
471  char relpersistence,
472  ForkNumber forkNum,
473  BlockNumber blockNum,
474  BufferAccessStrategy strategy,
475  bool *foundPtr);
476 static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
477 static void FindAndDropRelationBuffers(RelFileLocator rlocator,
478  ForkNumber forkNum,
479  BlockNumber nForkBlock,
480  BlockNumber firstDelBlock);
481 static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator,
482  RelFileLocator dstlocator,
483  ForkNumber forkNum, bool permanent);
484 static void AtProcExit_Buffers(int code, Datum arg);
485 static void CheckForBufferLeaks(void);
486 static int rlocator_comparator(const void *p1, const void *p2);
487 static inline int buffertag_comparator(const BufferTag *ba, const BufferTag *bb);
488 static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
489 static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
490 
491 
492 /*
493  * Implementation of PrefetchBuffer() for shared buffers.
494  */
497  ForkNumber forkNum,
498  BlockNumber blockNum)
499 {
500  PrefetchBufferResult result = {InvalidBuffer, false};
501  BufferTag newTag; /* identity of requested block */
502  uint32 newHash; /* hash value for newTag */
503  LWLock *newPartitionLock; /* buffer partition lock for it */
504  int buf_id;
505 
506  Assert(BlockNumberIsValid(blockNum));
507 
508  /* create a tag so we can lookup the buffer */
509  InitBufferTag(&newTag, &smgr_reln->smgr_rlocator.locator,
510  forkNum, blockNum);
511 
512  /* determine its hash code and partition lock ID */
513  newHash = BufTableHashCode(&newTag);
514  newPartitionLock = BufMappingPartitionLock(newHash);
515 
516  /* see if the block is in the buffer pool already */
517  LWLockAcquire(newPartitionLock, LW_SHARED);
518  buf_id = BufTableLookup(&newTag, newHash);
519  LWLockRelease(newPartitionLock);
520 
521  /* If not in buffers, initiate prefetch */
522  if (buf_id < 0)
523  {
524 #ifdef USE_PREFETCH
525  /*
526  * Try to initiate an asynchronous read. This returns false in
527  * recovery if the relation file doesn't exist.
528  */
529  if (smgrprefetch(smgr_reln, forkNum, blockNum))
530  result.initiated_io = true;
531 #endif /* USE_PREFETCH */
532  }
533  else
534  {
535  /*
536  * Report the buffer it was in at that time. The caller may be able
537  * to avoid a buffer table lookup, but it's not pinned and it must be
538  * rechecked!
539  */
540  result.recent_buffer = buf_id + 1;
541  }
542 
543  /*
544  * If the block *is* in buffers, we do nothing. This is not really ideal:
545  * the block might be just about to be evicted, which would be stupid
546  * since we know we are going to need it soon. But the only easy answer
547  * is to bump the usage_count, which does not seem like a great solution:
548  * when the caller does ultimately touch the block, usage_count would get
549  * bumped again, resulting in too much favoritism for blocks that are
550  * involved in a prefetch sequence. A real fix would involve some
551  * additional per-buffer state, and it's not clear that there's enough of
552  * a problem to justify that.
553  */
554 
555  return result;
556 }
557 
558 /*
559  * PrefetchBuffer -- initiate asynchronous read of a block of a relation
560  *
561  * This is named by analogy to ReadBuffer but doesn't actually allocate a
562  * buffer. Instead it tries to ensure that a future ReadBuffer for the given
563  * block will not be delayed by the I/O. Prefetching is optional.
564  *
565  * There are three possible outcomes:
566  *
567  * 1. If the block is already cached, the result includes a valid buffer that
568  * could be used by the caller to avoid the need for a later buffer lookup, but
569  * it's not pinned, so the caller must recheck it.
570  *
571  * 2. If the kernel has been asked to initiate I/O, the initiated_io member is
572  * true. Currently there is no way to know if the data was already cached by
573  * the kernel and therefore didn't really initiate I/O, and no way to know when
574  * the I/O completes other than using synchronous ReadBuffer().
575  *
576  * 3. Otherwise, the buffer wasn't already cached by PostgreSQL, and either
577  * USE_PREFETCH is not defined (this build doesn't support prefetching due to
578  * lack of a kernel facility), or the underlying relation file wasn't found and
579  * we are in recovery. (If the relation file wasn't found and we are not in
580  * recovery, an error is raised).
581  */
584 {
585  Assert(RelationIsValid(reln));
586  Assert(BlockNumberIsValid(blockNum));
587 
588  if (RelationUsesLocalBuffers(reln))
589  {
590  /* see comments in ReadBufferExtended */
591  if (RELATION_IS_OTHER_TEMP(reln))
592  ereport(ERROR,
593  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
594  errmsg("cannot access temporary tables of other sessions")));
595 
596  /* pass it off to localbuf.c */
597  return PrefetchLocalBuffer(RelationGetSmgr(reln), forkNum, blockNum);
598  }
599  else
600  {
601  /* pass it to the shared buffer version */
602  return PrefetchSharedBuffer(RelationGetSmgr(reln), forkNum, blockNum);
603  }
604 }
605 
606 /*
607  * ReadRecentBuffer -- try to pin a block in a recently observed buffer
608  *
609  * Compared to ReadBuffer(), this avoids a buffer mapping lookup when it's
610  * successful. Return true if the buffer is valid and still has the expected
611  * tag. In that case, the buffer is pinned and the usage count is bumped.
612  */
613 bool
615  Buffer recent_buffer)
616 {
617  BufferDesc *bufHdr;
618  BufferTag tag;
619  uint32 buf_state;
620  bool have_private_ref;
621 
622  Assert(BufferIsValid(recent_buffer));
623 
626  InitBufferTag(&tag, &rlocator, forkNum, blockNum);
627 
628  if (BufferIsLocal(recent_buffer))
629  {
630  int b = -recent_buffer - 1;
631 
632  bufHdr = GetLocalBufferDescriptor(b);
633  buf_state = pg_atomic_read_u32(&bufHdr->state);
634 
635  /* Is it still valid and holding the right tag? */
636  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
637  {
638  /*
639  * Bump buffer's ref and usage counts. This is equivalent of
640  * PinBuffer for a shared buffer.
641  */
642  if (LocalRefCount[b] == 0)
643  {
645  {
646  buf_state += BUF_USAGECOUNT_ONE;
647  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
648  }
649  }
650  LocalRefCount[b]++;
652 
654 
655  return true;
656  }
657  }
658  else
659  {
660  bufHdr = GetBufferDescriptor(recent_buffer - 1);
661  have_private_ref = GetPrivateRefCount(recent_buffer) > 0;
662 
663  /*
664  * Do we already have this buffer pinned with a private reference? If
665  * so, it must be valid and it is safe to check the tag without
666  * locking. If not, we have to lock the header first and then check.
667  */
668  if (have_private_ref)
669  buf_state = pg_atomic_read_u32(&bufHdr->state);
670  else
671  buf_state = LockBufHdr(bufHdr);
672 
673  if ((buf_state & BM_VALID) && BufferTagsEqual(&tag, &bufHdr->tag))
674  {
675  /*
676  * It's now safe to pin the buffer. We can't pin first and ask
677  * questions later, because it might confuse code paths like
678  * InvalidateBuffer() if we pinned a random non-matching buffer.
679  */
680  if (have_private_ref)
681  PinBuffer(bufHdr, NULL); /* bump pin count */
682  else
683  PinBuffer_Locked(bufHdr); /* pin for first time */
684 
686 
687  return true;
688  }
689 
690  /* If we locked the header above, now unlock. */
691  if (!have_private_ref)
692  UnlockBufHdr(bufHdr, buf_state);
693  }
694 
695  return false;
696 }
697 
698 /*
699  * ReadBuffer -- a shorthand for ReadBufferExtended, for reading from main
700  * fork with RBM_NORMAL mode and default strategy.
701  */
702 Buffer
704 {
705  return ReadBufferExtended(reln, MAIN_FORKNUM, blockNum, RBM_NORMAL, NULL);
706 }
707 
708 /*
709  * ReadBufferExtended -- returns a buffer containing the requested
710  * block of the requested relation. If the blknum
711  * requested is P_NEW, extend the relation file and
712  * allocate a new block. (Caller is responsible for
713  * ensuring that only one backend tries to extend a
714  * relation at the same time!)
715  *
716  * Returns: the buffer number for the buffer containing
717  * the block read. The returned buffer has been pinned.
718  * Does not return on error --- elog's instead.
719  *
720  * Assume when this function is called, that reln has been opened already.
721  *
722  * In RBM_NORMAL mode, the page is read from disk, and the page header is
723  * validated. An error is thrown if the page header is not valid. (But
724  * note that an all-zero page is considered "valid"; see
725  * PageIsVerifiedExtended().)
726  *
727  * RBM_ZERO_ON_ERROR is like the normal mode, but if the page header is not
728  * valid, the page is zeroed instead of throwing an error. This is intended
729  * for non-critical data, where the caller is prepared to repair errors.
730  *
731  * In RBM_ZERO_AND_LOCK mode, if the page isn't in buffer cache already, it's
732  * filled with zeros instead of reading it from disk. Useful when the caller
733  * is going to fill the page from scratch, since this saves I/O and avoids
734  * unnecessary failure if the page-on-disk has corrupt page headers.
735  * The page is returned locked to ensure that the caller has a chance to
736  * initialize the page before it's made visible to others.
737  * Caution: do not use this mode to read a page that is beyond the relation's
738  * current physical EOF; that is likely to cause problems in md.c when
739  * the page is modified and written out. P_NEW is OK, though.
740  *
741  * RBM_ZERO_AND_CLEANUP_LOCK is the same as RBM_ZERO_AND_LOCK, but acquires
742  * a cleanup-strength lock on the page.
743  *
744  * RBM_NORMAL_NO_LOG mode is treated the same as RBM_NORMAL here.
745  *
746  * If strategy is not NULL, a nondefault buffer access strategy is used.
747  * See buffer/README for details.
748  */
749 Buffer
752 {
753  bool hit;
754  Buffer buf;
755 
756  /*
757  * Reject attempts to read non-local temporary relations; we would be
758  * likely to get wrong data since we have no visibility into the owning
759  * session's local buffers.
760  */
761  if (RELATION_IS_OTHER_TEMP(reln))
762  ereport(ERROR,
763  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
764  errmsg("cannot access temporary tables of other sessions")));
765 
766  /*
767  * Read the buffer, and update pgstat counters to reflect a cache hit or
768  * miss.
769  */
771  buf = ReadBuffer_common(RelationGetSmgr(reln), reln->rd_rel->relpersistence,
772  forkNum, blockNum, mode, strategy, &hit);
773  if (hit)
775  return buf;
776 }
777 
778 
779 /*
780  * ReadBufferWithoutRelcache -- like ReadBufferExtended, but doesn't require
781  * a relcache entry for the relation.
782  *
783  * Pass permanent = true for a RELPERSISTENCE_PERMANENT relation, and
784  * permanent = false for a RELPERSISTENCE_UNLOGGED relation. This function
785  * cannot be used for temporary relations (and making that work might be
786  * difficult, unless we only want to read temporary relations for our own
787  * BackendId).
788  */
789 Buffer
791  BlockNumber blockNum, ReadBufferMode mode,
792  BufferAccessStrategy strategy, bool permanent)
793 {
794  bool hit;
795 
796  SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
797 
798  return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
799  RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
800  mode, strategy, &hit);
801 }
802 
803 
804 /*
805  * ReadBuffer_common -- common logic for all ReadBuffer variants
806  *
807  * *hit is set to true if the request was satisfied from shared buffer cache.
808  */
809 static Buffer
810 ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
811  BlockNumber blockNum, ReadBufferMode mode,
812  BufferAccessStrategy strategy, bool *hit)
813 {
814  BufferDesc *bufHdr;
815  Block bufBlock;
816  bool found;
817  bool isExtend;
818  bool isLocalBuf = SmgrIsTemp(smgr);
819 
820  *hit = false;
821 
822  /* Make sure we will have room to remember the buffer pin */
824 
825  isExtend = (blockNum == P_NEW);
826 
827  TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
831  smgr->smgr_rlocator.backend,
832  isExtend);
833 
834  /* Substitute proper block number if caller asked for P_NEW */
835  if (isExtend)
836  {
837  blockNum = smgrnblocks(smgr, forkNum);
838  /* Fail if relation is already at maximum possible length */
839  if (blockNum == P_NEW)
840  ereport(ERROR,
841  (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
842  errmsg("cannot extend relation %s beyond %u blocks",
843  relpath(smgr->smgr_rlocator, forkNum),
844  P_NEW)));
845  }
846 
847  if (isLocalBuf)
848  {
849  bufHdr = LocalBufferAlloc(smgr, forkNum, blockNum, &found);
850  if (found)
852  else if (isExtend)
854  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
857  }
858  else
859  {
860  /*
861  * lookup the buffer. IO_IN_PROGRESS is set if the requested block is
862  * not currently in memory.
863  */
864  bufHdr = BufferAlloc(smgr, relpersistence, forkNum, blockNum,
865  strategy, &found);
866  if (found)
868  else if (isExtend)
870  else if (mode == RBM_NORMAL || mode == RBM_NORMAL_NO_LOG ||
873  }
874 
875  /* At this point we do NOT hold any locks. */
876 
877  /* if it was already in the buffer pool, we're done */
878  if (found)
879  {
880  if (!isExtend)
881  {
882  /* Just need to update stats before we exit */
883  *hit = true;
884  VacuumPageHit++;
885 
886  if (VacuumCostActive)
888 
889  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
893  smgr->smgr_rlocator.backend,
894  isExtend,
895  found);
896 
897  /*
898  * In RBM_ZERO_AND_LOCK mode the caller expects the page to be
899  * locked on return.
900  */
901  if (!isLocalBuf)
902  {
903  if (mode == RBM_ZERO_AND_LOCK)
905  LW_EXCLUSIVE);
906  else if (mode == RBM_ZERO_AND_CLEANUP_LOCK)
908  }
909 
910  return BufferDescriptorGetBuffer(bufHdr);
911  }
912 
913  /*
914  * We get here only in the corner case where we are trying to extend
915  * the relation but we found a pre-existing buffer marked BM_VALID.
916  * This can happen because mdread doesn't complain about reads beyond
917  * EOF (when zero_damaged_pages is ON) and so a previous attempt to
918  * read a block beyond EOF could have left a "valid" zero-filled
919  * buffer. Unfortunately, we have also seen this case occurring
920  * because of buggy Linux kernels that sometimes return an
921  * lseek(SEEK_END) result that doesn't account for a recent write. In
922  * that situation, the pre-existing buffer would contain valid data
923  * that we don't want to overwrite. Since the legitimate case should
924  * always have left a zero-filled buffer, complain if not PageIsNew.
925  */
926  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
927  if (!PageIsNew((Page) bufBlock))
928  ereport(ERROR,
929  (errmsg("unexpected data beyond EOF in block %u of relation %s",
930  blockNum, relpath(smgr->smgr_rlocator, forkNum)),
931  errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
932 
933  /*
934  * We *must* do smgrextend before succeeding, else the page will not
935  * be reserved by the kernel, and the next P_NEW call will decide to
936  * return the same page. Clear the BM_VALID bit, do the StartBufferIO
937  * call that BufferAlloc didn't, and proceed.
938  */
939  if (isLocalBuf)
940  {
941  /* Only need to adjust flags */
942  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
943 
944  Assert(buf_state & BM_VALID);
945  buf_state &= ~BM_VALID;
946  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
947  }
948  else
949  {
950  /*
951  * Loop to handle the very small possibility that someone re-sets
952  * BM_VALID between our clearing it and StartBufferIO inspecting
953  * it.
954  */
955  do
956  {
957  uint32 buf_state = LockBufHdr(bufHdr);
958 
959  Assert(buf_state & BM_VALID);
960  buf_state &= ~BM_VALID;
961  UnlockBufHdr(bufHdr, buf_state);
962  } while (!StartBufferIO(bufHdr, true));
963  }
964  }
965 
966  /*
967  * if we have gotten to this point, we have allocated a buffer for the
968  * page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
969  * if it's a shared buffer.
970  *
971  * Note: if smgrextend fails, we will end up with a buffer that is
972  * allocated but not marked BM_VALID. P_NEW will still select the same
973  * block number (because the relation didn't get any longer on disk) and
974  * so future attempts to extend the relation will find the same buffer (if
975  * it's not been recycled) but come right back here to try smgrextend
976  * again.
977  */
978  Assert(!(pg_atomic_read_u32(&bufHdr->state) & BM_VALID)); /* spinlock not needed */
979 
980  bufBlock = isLocalBuf ? LocalBufHdrGetBlock(bufHdr) : BufHdrGetBlock(bufHdr);
981 
982  if (isExtend)
983  {
984  /* new buffers are zero-filled */
985  MemSet((char *) bufBlock, 0, BLCKSZ);
986  /* don't set checksum for all-zero page */
987  smgrextend(smgr, forkNum, blockNum, (char *) bufBlock, false);
988 
989  /*
990  * NB: we're *not* doing a ScheduleBufferTagForWriteback here;
991  * although we're essentially performing a write. At least on linux
992  * doing so defeats the 'delayed allocation' mechanism, leading to
993  * increased file fragmentation.
994  */
995  }
996  else
997  {
998  /*
999  * Read in the page, unless the caller intends to overwrite it and
1000  * just wants us to allocate a buffer.
1001  */
1003  MemSet((char *) bufBlock, 0, BLCKSZ);
1004  else
1005  {
1006  instr_time io_start,
1007  io_time;
1008 
1009  if (track_io_timing)
1010  INSTR_TIME_SET_CURRENT(io_start);
1011  else
1012  INSTR_TIME_SET_ZERO(io_start);
1013 
1014  smgrread(smgr, forkNum, blockNum, (char *) bufBlock);
1015 
1016  if (track_io_timing)
1017  {
1018  INSTR_TIME_SET_CURRENT(io_time);
1019  INSTR_TIME_SUBTRACT(io_time, io_start);
1022  }
1023 
1024  /* check for garbage data */
1025  if (!PageIsVerifiedExtended((Page) bufBlock, blockNum,
1027  {
1029  {
1030  ereport(WARNING,
1032  errmsg("invalid page in block %u of relation %s; zeroing out page",
1033  blockNum,
1034  relpath(smgr->smgr_rlocator, forkNum))));
1035  MemSet((char *) bufBlock, 0, BLCKSZ);
1036  }
1037  else
1038  ereport(ERROR,
1040  errmsg("invalid page in block %u of relation %s",
1041  blockNum,
1042  relpath(smgr->smgr_rlocator, forkNum))));
1043  }
1044  }
1045  }
1046 
1047  /*
1048  * In RBM_ZERO_AND_LOCK mode, grab the buffer content lock before marking
1049  * the page as valid, to make sure that no other backend sees the zeroed
1050  * page before the caller has had a chance to initialize it.
1051  *
1052  * Since no-one else can be looking at the page contents yet, there is no
1053  * difference between an exclusive lock and a cleanup-strength lock. (Note
1054  * that we cannot use LockBuffer() or LockBufferForCleanup() here, because
1055  * they assert that the buffer is already valid.)
1056  */
1058  !isLocalBuf)
1059  {
1061  }
1062 
1063  if (isLocalBuf)
1064  {
1065  /* Only need to adjust flags */
1066  uint32 buf_state = pg_atomic_read_u32(&bufHdr->state);
1067 
1068  buf_state |= BM_VALID;
1069  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
1070  }
1071  else
1072  {
1073  /* Set BM_VALID, terminate IO, and wake up any waiters */
1074  TerminateBufferIO(bufHdr, false, BM_VALID);
1075  }
1076 
1077  VacuumPageMiss++;
1078  if (VacuumCostActive)
1080 
1081  TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
1083  smgr->smgr_rlocator.locator.dbOid,
1085  smgr->smgr_rlocator.backend,
1086  isExtend,
1087  found);
1088 
1089  return BufferDescriptorGetBuffer(bufHdr);
1090 }
1091 
1092 /*
1093  * BufferAlloc -- subroutine for ReadBuffer. Handles lookup of a shared
1094  * buffer. If no buffer exists already, selects a replacement
1095  * victim and evicts the old page, but does NOT read in new page.
1096  *
1097  * "strategy" can be a buffer replacement strategy object, or NULL for
1098  * the default strategy. The selected buffer's usage_count is advanced when
1099  * using the default strategy, but otherwise possibly not (see PinBuffer).
1100  *
1101  * The returned buffer is pinned and is already marked as holding the
1102  * desired page. If it already did have the desired page, *foundPtr is
1103  * set true. Otherwise, *foundPtr is set false and the buffer is marked
1104  * as IO_IN_PROGRESS; ReadBuffer will now need to do I/O to fill it.
1105  *
1106  * *foundPtr is actually redundant with the buffer's BM_VALID flag, but
1107  * we keep it for simplicity in ReadBuffer.
1108  *
1109  * No locks are held either at entry or exit.
1110  */
1111 static BufferDesc *
1112 BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
1113  BlockNumber blockNum,
1114  BufferAccessStrategy strategy,
1115  bool *foundPtr)
1116 {
1117  BufferTag newTag; /* identity of requested block */
1118  uint32 newHash; /* hash value for newTag */
1119  LWLock *newPartitionLock; /* buffer partition lock for it */
1120  BufferTag oldTag; /* previous identity of selected buffer */
1121  uint32 oldHash; /* hash value for oldTag */
1122  LWLock *oldPartitionLock; /* buffer partition lock for it */
1123  uint32 oldFlags;
1124  int buf_id;
1125  BufferDesc *buf;
1126  bool valid;
1127  uint32 buf_state;
1128 
1129  /* create a tag so we can lookup the buffer */
1130  InitBufferTag(&newTag, &smgr->smgr_rlocator.locator, forkNum, blockNum);
1131 
1132  /* determine its hash code and partition lock ID */
1133  newHash = BufTableHashCode(&newTag);
1134  newPartitionLock = BufMappingPartitionLock(newHash);
1135 
1136  /* see if the block is in the buffer pool already */
1137  LWLockAcquire(newPartitionLock, LW_SHARED);
1138  buf_id = BufTableLookup(&newTag, newHash);
1139  if (buf_id >= 0)
1140  {
1141  /*
1142  * Found it. Now, pin the buffer so no one can steal it from the
1143  * buffer pool, and check to see if the correct data has been loaded
1144  * into the buffer.
1145  */
1146  buf = GetBufferDescriptor(buf_id);
1147 
1148  valid = PinBuffer(buf, strategy);
1149 
1150  /* Can release the mapping lock as soon as we've pinned it */
1151  LWLockRelease(newPartitionLock);
1152 
1153  *foundPtr = true;
1154 
1155  if (!valid)
1156  {
1157  /*
1158  * We can only get here if (a) someone else is still reading in
1159  * the page, or (b) a previous read attempt failed. We have to
1160  * wait for any active read attempt to finish, and then set up our
1161  * own read attempt if the page is still not BM_VALID.
1162  * StartBufferIO does it all.
1163  */
1164  if (StartBufferIO(buf, true))
1165  {
1166  /*
1167  * If we get here, previous attempts to read the buffer must
1168  * have failed ... but we shall bravely try again.
1169  */
1170  *foundPtr = false;
1171  }
1172  }
1173 
1174  return buf;
1175  }
1176 
1177  /*
1178  * Didn't find it in the buffer pool. We'll have to initialize a new
1179  * buffer. Remember to unlock the mapping lock while doing the work.
1180  */
1181  LWLockRelease(newPartitionLock);
1182 
1183  /* Loop here in case we have to try another victim buffer */
1184  for (;;)
1185  {
1186  /*
1187  * Ensure, while the spinlock's not yet held, that there's a free
1188  * refcount entry.
1189  */
1191 
1192  /*
1193  * Select a victim buffer. The buffer is returned with its header
1194  * spinlock still held!
1195  */
1196  buf = StrategyGetBuffer(strategy, &buf_state);
1197 
1198  Assert(BUF_STATE_GET_REFCOUNT(buf_state) == 0);
1199 
1200  /* Must copy buffer flags while we still hold the spinlock */
1201  oldFlags = buf_state & BUF_FLAG_MASK;
1202 
1203  /* Pin the buffer and then release the buffer spinlock */
1205 
1206  /*
1207  * If the buffer was dirty, try to write it out. There is a race
1208  * condition here, in that someone might dirty it after we released it
1209  * above, or even while we are writing it out (since our share-lock
1210  * won't prevent hint-bit updates). We will recheck the dirty bit
1211  * after re-locking the buffer header.
1212  */
1213  if (oldFlags & BM_DIRTY)
1214  {
1215  /*
1216  * We need a share-lock on the buffer contents to write it out
1217  * (else we might write invalid data, eg because someone else is
1218  * compacting the page contents while we write). We must use a
1219  * conditional lock acquisition here to avoid deadlock. Even
1220  * though the buffer was not pinned (and therefore surely not
1221  * locked) when StrategyGetBuffer returned it, someone else could
1222  * have pinned and exclusive-locked it by the time we get here. If
1223  * we try to get the lock unconditionally, we'd block waiting for
1224  * them; if they later block waiting for us, deadlock ensues.
1225  * (This has been observed to happen when two backends are both
1226  * trying to split btree index pages, and the second one just
1227  * happens to be trying to split the page the first one got from
1228  * StrategyGetBuffer.)
1229  */
1231  LW_SHARED))
1232  {
1233  /*
1234  * If using a nondefault strategy, and writing the buffer
1235  * would require a WAL flush, let the strategy decide whether
1236  * to go ahead and write/reuse the buffer or to choose another
1237  * victim. We need lock to inspect the page LSN, so this
1238  * can't be done inside StrategyGetBuffer.
1239  */
1240  if (strategy != NULL)
1241  {
1242  XLogRecPtr lsn;
1243 
1244  /* Read the LSN while holding buffer header lock */
1245  buf_state = LockBufHdr(buf);
1246  lsn = BufferGetLSN(buf);
1247  UnlockBufHdr(buf, buf_state);
1248 
1249  if (XLogNeedsFlush(lsn) &&
1250  StrategyRejectBuffer(strategy, buf))
1251  {
1252  /* Drop lock/pin and loop around for another buffer */
1254  UnpinBuffer(buf);
1255  continue;
1256  }
1257  }
1258 
1259  /* OK, do the I/O */
1260  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
1262  smgr->smgr_rlocator.locator.dbOid,
1264 
1265  FlushBuffer(buf, NULL);
1267 
1269  &buf->tag);
1270 
1271  TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
1273  smgr->smgr_rlocator.locator.dbOid,
1275  }
1276  else
1277  {
1278  /*
1279  * Someone else has locked the buffer, so give it up and loop
1280  * back to get another one.
1281  */
1282  UnpinBuffer(buf);
1283  continue;
1284  }
1285  }
1286 
1287  /*
1288  * To change the association of a valid buffer, we'll need to have
1289  * exclusive lock on both the old and new mapping partitions.
1290  */
1291  if (oldFlags & BM_TAG_VALID)
1292  {
1293  /*
1294  * Need to compute the old tag's hashcode and partition lock ID.
1295  * XXX is it worth storing the hashcode in BufferDesc so we need
1296  * not recompute it here? Probably not.
1297  */
1298  oldTag = buf->tag;
1299  oldHash = BufTableHashCode(&oldTag);
1300  oldPartitionLock = BufMappingPartitionLock(oldHash);
1301 
1302  /*
1303  * Must lock the lower-numbered partition first to avoid
1304  * deadlocks.
1305  */
1306  if (oldPartitionLock < newPartitionLock)
1307  {
1308  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1309  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1310  }
1311  else if (oldPartitionLock > newPartitionLock)
1312  {
1313  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1314  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1315  }
1316  else
1317  {
1318  /* only one partition, only one lock */
1319  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1320  }
1321  }
1322  else
1323  {
1324  /* if it wasn't valid, we need only the new partition */
1325  LWLockAcquire(newPartitionLock, LW_EXCLUSIVE);
1326  /* remember we have no old-partition lock or tag */
1327  oldPartitionLock = NULL;
1328  /* keep the compiler quiet about uninitialized variables */
1329  oldHash = 0;
1330  }
1331 
1332  /*
1333  * Try to make a hashtable entry for the buffer under its new tag.
1334  * This could fail because while we were writing someone else
1335  * allocated another buffer for the same block we want to read in.
1336  * Note that we have not yet removed the hashtable entry for the old
1337  * tag.
1338  */
1339  buf_id = BufTableInsert(&newTag, newHash, buf->buf_id);
1340 
1341  if (buf_id >= 0)
1342  {
1343  /*
1344  * Got a collision. Someone has already done what we were about to
1345  * do. We'll just handle this as if it were found in the buffer
1346  * pool in the first place. First, give up the buffer we were
1347  * planning to use.
1348  */
1349  UnpinBuffer(buf);
1350 
1351  /* Can give up that buffer's mapping partition lock now */
1352  if (oldPartitionLock != NULL &&
1353  oldPartitionLock != newPartitionLock)
1354  LWLockRelease(oldPartitionLock);
1355 
1356  /* remaining code should match code at top of routine */
1357 
1358  buf = GetBufferDescriptor(buf_id);
1359 
1360  valid = PinBuffer(buf, strategy);
1361 
1362  /* Can release the mapping lock as soon as we've pinned it */
1363  LWLockRelease(newPartitionLock);
1364 
1365  *foundPtr = true;
1366 
1367  if (!valid)
1368  {
1369  /*
1370  * We can only get here if (a) someone else is still reading
1371  * in the page, or (b) a previous read attempt failed. We
1372  * have to wait for any active read attempt to finish, and
1373  * then set up our own read attempt if the page is still not
1374  * BM_VALID. StartBufferIO does it all.
1375  */
1376  if (StartBufferIO(buf, true))
1377  {
1378  /*
1379  * If we get here, previous attempts to read the buffer
1380  * must have failed ... but we shall bravely try again.
1381  */
1382  *foundPtr = false;
1383  }
1384  }
1385 
1386  return buf;
1387  }
1388 
1389  /*
1390  * Need to lock the buffer header too in order to change its tag.
1391  */
1392  buf_state = LockBufHdr(buf);
1393 
1394  /*
1395  * Somebody could have pinned or re-dirtied the buffer while we were
1396  * doing the I/O and making the new hashtable entry. If so, we can't
1397  * recycle this buffer; we must undo everything we've done and start
1398  * over with a new victim buffer.
1399  */
1400  oldFlags = buf_state & BUF_FLAG_MASK;
1401  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1 && !(oldFlags & BM_DIRTY))
1402  break;
1403 
1404  UnlockBufHdr(buf, buf_state);
1405  BufTableDelete(&newTag, newHash);
1406  if (oldPartitionLock != NULL &&
1407  oldPartitionLock != newPartitionLock)
1408  LWLockRelease(oldPartitionLock);
1409  LWLockRelease(newPartitionLock);
1410  UnpinBuffer(buf);
1411  }
1412 
1413  /*
1414  * Okay, it's finally safe to rename the buffer.
1415  *
1416  * Clearing BM_VALID here is necessary, clearing the dirtybits is just
1417  * paranoia. We also reset the usage_count since any recency of use of
1418  * the old content is no longer relevant. (The usage_count starts out at
1419  * 1 so that the buffer can survive one clock-sweep pass.)
1420  *
1421  * Make sure BM_PERMANENT is set for buffers that must be written at every
1422  * checkpoint. Unlogged buffers only need to be written at shutdown
1423  * checkpoints, except for their "init" forks, which need to be treated
1424  * just like permanent relations.
1425  */
1426  buf->tag = newTag;
1427  buf_state &= ~(BM_VALID | BM_DIRTY | BM_JUST_DIRTIED |
1430  if (relpersistence == RELPERSISTENCE_PERMANENT || forkNum == INIT_FORKNUM)
1431  buf_state |= BM_TAG_VALID | BM_PERMANENT | BUF_USAGECOUNT_ONE;
1432  else
1433  buf_state |= BM_TAG_VALID | BUF_USAGECOUNT_ONE;
1434 
1435  UnlockBufHdr(buf, buf_state);
1436 
1437  if (oldPartitionLock != NULL)
1438  {
1439  BufTableDelete(&oldTag, oldHash);
1440  if (oldPartitionLock != newPartitionLock)
1441  LWLockRelease(oldPartitionLock);
1442  }
1443 
1444  LWLockRelease(newPartitionLock);
1445 
1446  /*
1447  * Buffer contents are currently invalid. Try to obtain the right to
1448  * start I/O. If StartBufferIO returns false, then someone else managed
1449  * to read it before we did, so there's nothing left for BufferAlloc() to
1450  * do.
1451  */
1452  if (StartBufferIO(buf, true))
1453  *foundPtr = false;
1454  else
1455  *foundPtr = true;
1456 
1457  return buf;
1458 }
1459 
1460 /*
1461  * InvalidateBuffer -- mark a shared buffer invalid and return it to the
1462  * freelist.
1463  *
1464  * The buffer header spinlock must be held at entry. We drop it before
1465  * returning. (This is sane because the caller must have locked the
1466  * buffer in order to be sure it should be dropped.)
1467  *
1468  * This is used only in contexts such as dropping a relation. We assume
1469  * that no other backend could possibly be interested in using the page,
1470  * so the only reason the buffer might be pinned is if someone else is
1471  * trying to write it out. We have to let them finish before we can
1472  * reclaim the buffer.
1473  *
1474  * The buffer could get reclaimed by someone else while we are waiting
1475  * to acquire the necessary locks; if so, don't mess it up.
1476  */
1477 static void
1479 {
1480  BufferTag oldTag;
1481  uint32 oldHash; /* hash value for oldTag */
1482  LWLock *oldPartitionLock; /* buffer partition lock for it */
1483  uint32 oldFlags;
1484  uint32 buf_state;
1485 
1486  /* Save the original buffer tag before dropping the spinlock */
1487  oldTag = buf->tag;
1488 
1489  buf_state = pg_atomic_read_u32(&buf->state);
1490  Assert(buf_state & BM_LOCKED);
1491  UnlockBufHdr(buf, buf_state);
1492 
1493  /*
1494  * Need to compute the old tag's hashcode and partition lock ID. XXX is it
1495  * worth storing the hashcode in BufferDesc so we need not recompute it
1496  * here? Probably not.
1497  */
1498  oldHash = BufTableHashCode(&oldTag);
1499  oldPartitionLock = BufMappingPartitionLock(oldHash);
1500 
1501 retry:
1502 
1503  /*
1504  * Acquire exclusive mapping lock in preparation for changing the buffer's
1505  * association.
1506  */
1507  LWLockAcquire(oldPartitionLock, LW_EXCLUSIVE);
1508 
1509  /* Re-lock the buffer header */
1510  buf_state = LockBufHdr(buf);
1511 
1512  /* If it's changed while we were waiting for lock, do nothing */
1513  if (!BufferTagsEqual(&buf->tag, &oldTag))
1514  {
1515  UnlockBufHdr(buf, buf_state);
1516  LWLockRelease(oldPartitionLock);
1517  return;
1518  }
1519 
1520  /*
1521  * We assume the only reason for it to be pinned is that someone else is
1522  * flushing the page out. Wait for them to finish. (This could be an
1523  * infinite loop if the refcount is messed up... it would be nice to time
1524  * out after awhile, but there seems no way to be sure how many loops may
1525  * be needed. Note that if the other guy has pinned the buffer but not
1526  * yet done StartBufferIO, WaitIO will fall through and we'll effectively
1527  * be busy-looping here.)
1528  */
1529  if (BUF_STATE_GET_REFCOUNT(buf_state) != 0)
1530  {
1531  UnlockBufHdr(buf, buf_state);
1532  LWLockRelease(oldPartitionLock);
1533  /* safety check: should definitely not be our *own* pin */
1535  elog(ERROR, "buffer is pinned in InvalidateBuffer");
1536  WaitIO(buf);
1537  goto retry;
1538  }
1539 
1540  /*
1541  * Clear out the buffer's tag and flags. We must do this to ensure that
1542  * linear scans of the buffer array don't think the buffer is valid.
1543  */
1544  oldFlags = buf_state & BUF_FLAG_MASK;
1545  ClearBufferTag(&buf->tag);
1546  buf_state &= ~(BUF_FLAG_MASK | BUF_USAGECOUNT_MASK);
1547  UnlockBufHdr(buf, buf_state);
1548 
1549  /*
1550  * Remove the buffer from the lookup hashtable, if it was in there.
1551  */
1552  if (oldFlags & BM_TAG_VALID)
1553  BufTableDelete(&oldTag, oldHash);
1554 
1555  /*
1556  * Done with mapping lock.
1557  */
1558  LWLockRelease(oldPartitionLock);
1559 
1560  /*
1561  * Insert the buffer at the head of the list of free buffers.
1562  */
1564 }
1565 
1566 /*
1567  * MarkBufferDirty
1568  *
1569  * Marks buffer contents as dirty (actual write happens later).
1570  *
1571  * Buffer must be pinned and exclusive-locked. (If caller does not hold
1572  * exclusive lock, then somebody could be in process of writing the buffer,
1573  * leading to risk of bad data written to disk.)
1574  */
1575 void
1577 {
1578  BufferDesc *bufHdr;
1579  uint32 buf_state;
1580  uint32 old_buf_state;
1581 
1582  if (!BufferIsValid(buffer))
1583  elog(ERROR, "bad buffer ID: %d", buffer);
1584 
1585  if (BufferIsLocal(buffer))
1586  {
1588  return;
1589  }
1590 
1591  bufHdr = GetBufferDescriptor(buffer - 1);
1592 
1595  LW_EXCLUSIVE));
1596 
1597  old_buf_state = pg_atomic_read_u32(&bufHdr->state);
1598  for (;;)
1599  {
1600  if (old_buf_state & BM_LOCKED)
1601  old_buf_state = WaitBufHdrUnlocked(bufHdr);
1602 
1603  buf_state = old_buf_state;
1604 
1605  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
1606  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
1607 
1608  if (pg_atomic_compare_exchange_u32(&bufHdr->state, &old_buf_state,
1609  buf_state))
1610  break;
1611  }
1612 
1613  /*
1614  * If the buffer was not dirty already, do vacuum accounting.
1615  */
1616  if (!(old_buf_state & BM_DIRTY))
1617  {
1618  VacuumPageDirty++;
1620  if (VacuumCostActive)
1622  }
1623 }
1624 
1625 /*
1626  * ReleaseAndReadBuffer -- combine ReleaseBuffer() and ReadBuffer()
1627  *
1628  * Formerly, this saved one cycle of acquiring/releasing the BufMgrLock
1629  * compared to calling the two routines separately. Now it's mainly just
1630  * a convenience function. However, if the passed buffer is valid and
1631  * already contains the desired block, we just return it as-is; and that
1632  * does save considerable work compared to a full release and reacquire.
1633  *
1634  * Note: it is OK to pass buffer == InvalidBuffer, indicating that no old
1635  * buffer actually needs to be released. This case is the same as ReadBuffer,
1636  * but can save some tests in the caller.
1637  */
1638 Buffer
1640  Relation relation,
1641  BlockNumber blockNum)
1642 {
1643  ForkNumber forkNum = MAIN_FORKNUM;
1644  BufferDesc *bufHdr;
1645 
1646  if (BufferIsValid(buffer))
1647  {
1649  if (BufferIsLocal(buffer))
1650  {
1651  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
1652  if (bufHdr->tag.blockNum == blockNum &&
1653  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
1654  BufTagGetForkNum(&bufHdr->tag) == forkNum)
1655  return buffer;
1657  LocalRefCount[-buffer - 1]--;
1658  }
1659  else
1660  {
1661  bufHdr = GetBufferDescriptor(buffer - 1);
1662  /* we have pin, so it's ok to examine tag without spinlock */
1663  if (bufHdr->tag.blockNum == blockNum &&
1664  BufTagMatchesRelFileLocator(&bufHdr->tag, &relation->rd_locator) &&
1665  BufTagGetForkNum(&bufHdr->tag) == forkNum)
1666  return buffer;
1667  UnpinBuffer(bufHdr);
1668  }
1669  }
1670 
1671  return ReadBuffer(relation, blockNum);
1672 }
1673 
1674 /*
1675  * PinBuffer -- make buffer unavailable for replacement.
1676  *
1677  * For the default access strategy, the buffer's usage_count is incremented
1678  * when we first pin it; for other strategies we just make sure the usage_count
1679  * isn't zero. (The idea of the latter is that we don't want synchronized
1680  * heap scans to inflate the count, but we need it to not be zero to discourage
1681  * other backends from stealing buffers from our ring. As long as we cycle
1682  * through the ring faster than the global clock-sweep cycles, buffers in
1683  * our ring won't be chosen as victims for replacement by other backends.)
1684  *
1685  * This should be applied only to shared buffers, never local ones.
1686  *
1687  * Since buffers are pinned/unpinned very frequently, pin buffers without
1688  * taking the buffer header lock; instead update the state variable in loop of
1689  * CAS operations. Hopefully it's just a single CAS.
1690  *
1691  * Note that ResourceOwnerEnlargeBuffers must have been done already.
1692  *
1693  * Returns true if buffer is BM_VALID, else false. This provision allows
1694  * some callers to avoid an extra spinlock cycle.
1695  */
1696 static bool
1698 {
1700  bool result;
1701  PrivateRefCountEntry *ref;
1702 
1703  ref = GetPrivateRefCountEntry(b, true);
1704 
1705  if (ref == NULL)
1706  {
1707  uint32 buf_state;
1708  uint32 old_buf_state;
1709 
1711  ref = NewPrivateRefCountEntry(b);
1712 
1713  old_buf_state = pg_atomic_read_u32(&buf->state);
1714  for (;;)
1715  {
1716  if (old_buf_state & BM_LOCKED)
1717  old_buf_state = WaitBufHdrUnlocked(buf);
1718 
1719  buf_state = old_buf_state;
1720 
1721  /* increase refcount */
1722  buf_state += BUF_REFCOUNT_ONE;
1723 
1724  if (strategy == NULL)
1725  {
1726  /* Default case: increase usagecount unless already max. */
1728  buf_state += BUF_USAGECOUNT_ONE;
1729  }
1730  else
1731  {
1732  /*
1733  * Ring buffers shouldn't evict others from pool. Thus we
1734  * don't make usagecount more than 1.
1735  */
1736  if (BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
1737  buf_state += BUF_USAGECOUNT_ONE;
1738  }
1739 
1740  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1741  buf_state))
1742  {
1743  result = (buf_state & BM_VALID) != 0;
1744 
1745  /*
1746  * Assume that we acquired a buffer pin for the purposes of
1747  * Valgrind buffer client checks (even in !result case) to
1748  * keep things simple. Buffers that are unsafe to access are
1749  * not generally guaranteed to be marked undefined or
1750  * non-accessible in any case.
1751  */
1753  break;
1754  }
1755  }
1756  }
1757  else
1758  {
1759  /*
1760  * If we previously pinned the buffer, it must surely be valid.
1761  *
1762  * Note: We deliberately avoid a Valgrind client request here.
1763  * Individual access methods can optionally superimpose buffer page
1764  * client requests on top of our client requests to enforce that
1765  * buffers are only accessed while locked (and pinned). It's possible
1766  * that the buffer page is legitimately non-accessible here. We
1767  * cannot meddle with that.
1768  */
1769  result = true;
1770  }
1771 
1772  ref->refcount++;
1773  Assert(ref->refcount > 0);
1775  return result;
1776 }
1777 
1778 /*
1779  * PinBuffer_Locked -- as above, but caller already locked the buffer header.
1780  * The spinlock is released before return.
1781  *
1782  * As this function is called with the spinlock held, the caller has to
1783  * previously call ReservePrivateRefCountEntry().
1784  *
1785  * Currently, no callers of this function want to modify the buffer's
1786  * usage_count at all, so there's no need for a strategy parameter.
1787  * Also we don't bother with a BM_VALID test (the caller could check that for
1788  * itself).
1789  *
1790  * Also all callers only ever use this function when it's known that the
1791  * buffer can't have a preexisting pin by this backend. That allows us to skip
1792  * searching the private refcount array & hash, which is a boon, because the
1793  * spinlock is still held.
1794  *
1795  * Note: use of this routine is frequently mandatory, not just an optimization
1796  * to save a spin lock/unlock cycle, because we need to pin a buffer before
1797  * its state can change under us.
1798  */
1799 static void
1801 {
1802  Buffer b;
1803  PrivateRefCountEntry *ref;
1804  uint32 buf_state;
1805 
1806  /*
1807  * As explained, We don't expect any preexisting pins. That allows us to
1808  * manipulate the PrivateRefCount after releasing the spinlock
1809  */
1811 
1812  /*
1813  * Buffer can't have a preexisting pin, so mark its page as defined to
1814  * Valgrind (this is similar to the PinBuffer() case where the backend
1815  * doesn't already have a buffer pin)
1816  */
1818 
1819  /*
1820  * Since we hold the buffer spinlock, we can update the buffer state and
1821  * release the lock in one operation.
1822  */
1823  buf_state = pg_atomic_read_u32(&buf->state);
1824  Assert(buf_state & BM_LOCKED);
1825  buf_state += BUF_REFCOUNT_ONE;
1826  UnlockBufHdr(buf, buf_state);
1827 
1829 
1830  ref = NewPrivateRefCountEntry(b);
1831  ref->refcount++;
1832 
1834 }
1835 
1836 /*
1837  * UnpinBuffer -- make buffer available for replacement.
1838  *
1839  * This should be applied only to shared buffers, never local ones. This
1840  * always adjusts CurrentResourceOwner.
1841  */
1842 static void
1844 {
1845  PrivateRefCountEntry *ref;
1847 
1848  /* not moving as we're likely deleting it soon anyway */
1849  ref = GetPrivateRefCountEntry(b, false);
1850  Assert(ref != NULL);
1851 
1853 
1854  Assert(ref->refcount > 0);
1855  ref->refcount--;
1856  if (ref->refcount == 0)
1857  {
1858  uint32 buf_state;
1859  uint32 old_buf_state;
1860 
1861  /*
1862  * Mark buffer non-accessible to Valgrind.
1863  *
1864  * Note that the buffer may have already been marked non-accessible
1865  * within access method code that enforces that buffers are only
1866  * accessed while a buffer lock is held.
1867  */
1869 
1870  /* I'd better not still hold the buffer content lock */
1872 
1873  /*
1874  * Decrement the shared reference count.
1875  *
1876  * Since buffer spinlock holder can update status using just write,
1877  * it's not safe to use atomic decrement here; thus use a CAS loop.
1878  */
1879  old_buf_state = pg_atomic_read_u32(&buf->state);
1880  for (;;)
1881  {
1882  if (old_buf_state & BM_LOCKED)
1883  old_buf_state = WaitBufHdrUnlocked(buf);
1884 
1885  buf_state = old_buf_state;
1886 
1887  buf_state -= BUF_REFCOUNT_ONE;
1888 
1889  if (pg_atomic_compare_exchange_u32(&buf->state, &old_buf_state,
1890  buf_state))
1891  break;
1892  }
1893 
1894  /* Support LockBufferForCleanup() */
1895  if (buf_state & BM_PIN_COUNT_WAITER)
1896  {
1897  /*
1898  * Acquire the buffer header lock, re-check that there's a waiter.
1899  * Another backend could have unpinned this buffer, and already
1900  * woken up the waiter. There's no danger of the buffer being
1901  * replaced after we unpinned it above, as it's pinned by the
1902  * waiter.
1903  */
1904  buf_state = LockBufHdr(buf);
1905 
1906  if ((buf_state & BM_PIN_COUNT_WAITER) &&
1907  BUF_STATE_GET_REFCOUNT(buf_state) == 1)
1908  {
1909  /* we just released the last pin other than the waiter's */
1910  int wait_backend_pgprocno = buf->wait_backend_pgprocno;
1911 
1912  buf_state &= ~BM_PIN_COUNT_WAITER;
1913  UnlockBufHdr(buf, buf_state);
1914  ProcSendSignal(wait_backend_pgprocno);
1915  }
1916  else
1917  UnlockBufHdr(buf, buf_state);
1918  }
1920  }
1921 }
1922 
1923 #define ST_SORT sort_checkpoint_bufferids
1924 #define ST_ELEMENT_TYPE CkptSortItem
1925 #define ST_COMPARE(a, b) ckpt_buforder_comparator(a, b)
1926 #define ST_SCOPE static
1927 #define ST_DEFINE
1928 #include <lib/sort_template.h>
1929 
1930 /*
1931  * BufferSync -- Write out all dirty buffers in the pool.
1932  *
1933  * This is called at checkpoint time to write out all dirty shared buffers.
1934  * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
1935  * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
1936  * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
1937  * unlogged buffers, which are otherwise skipped. The remaining flags
1938  * currently have no effect here.
1939  */
1940 static void
1941 BufferSync(int flags)
1942 {
1943  uint32 buf_state;
1944  int buf_id;
1945  int num_to_scan;
1946  int num_spaces;
1947  int num_processed;
1948  int num_written;
1949  CkptTsStatus *per_ts_stat = NULL;
1950  Oid last_tsid;
1951  binaryheap *ts_heap;
1952  int i;
1953  int mask = BM_DIRTY;
1954  WritebackContext wb_context;
1955 
1956  /* Make sure we can handle the pin inside SyncOneBuffer */
1958 
1959  /*
1960  * Unless this is a shutdown checkpoint or we have been explicitly told,
1961  * we write only permanent, dirty buffers. But at shutdown or end of
1962  * recovery, we write all dirty buffers.
1963  */
1966  mask |= BM_PERMANENT;
1967 
1968  /*
1969  * Loop over all buffers, and mark the ones that need to be written with
1970  * BM_CHECKPOINT_NEEDED. Count them as we go (num_to_scan), so that we
1971  * can estimate how much work needs to be done.
1972  *
1973  * This allows us to write only those pages that were dirty when the
1974  * checkpoint began, and not those that get dirtied while it proceeds.
1975  * Whenever a page with BM_CHECKPOINT_NEEDED is written out, either by us
1976  * later in this function, or by normal backends or the bgwriter cleaning
1977  * scan, the flag is cleared. Any buffer dirtied after this point won't
1978  * have the flag set.
1979  *
1980  * Note that if we fail to write some buffer, we may leave buffers with
1981  * BM_CHECKPOINT_NEEDED still set. This is OK since any such buffer would
1982  * certainly need to be written for the next checkpoint attempt, too.
1983  */
1984  num_to_scan = 0;
1985  for (buf_id = 0; buf_id < NBuffers; buf_id++)
1986  {
1987  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
1988 
1989  /*
1990  * Header spinlock is enough to examine BM_DIRTY, see comment in
1991  * SyncOneBuffer.
1992  */
1993  buf_state = LockBufHdr(bufHdr);
1994 
1995  if ((buf_state & mask) == mask)
1996  {
1997  CkptSortItem *item;
1998 
1999  buf_state |= BM_CHECKPOINT_NEEDED;
2000 
2001  item = &CkptBufferIds[num_to_scan++];
2002  item->buf_id = buf_id;
2003  item->tsId = bufHdr->tag.spcOid;
2004  item->relNumber = BufTagGetRelNumber(&bufHdr->tag);
2005  item->forkNum = BufTagGetForkNum(&bufHdr->tag);
2006  item->blockNum = bufHdr->tag.blockNum;
2007  }
2008 
2009  UnlockBufHdr(bufHdr, buf_state);
2010 
2011  /* Check for barrier events in case NBuffers is large. */
2014  }
2015 
2016  if (num_to_scan == 0)
2017  return; /* nothing to do */
2018 
2020 
2021  TRACE_POSTGRESQL_BUFFER_SYNC_START(NBuffers, num_to_scan);
2022 
2023  /*
2024  * Sort buffers that need to be written to reduce the likelihood of random
2025  * IO. The sorting is also important for the implementation of balancing
2026  * writes between tablespaces. Without balancing writes we'd potentially
2027  * end up writing to the tablespaces one-by-one; possibly overloading the
2028  * underlying system.
2029  */
2030  sort_checkpoint_bufferids(CkptBufferIds, num_to_scan);
2031 
2032  num_spaces = 0;
2033 
2034  /*
2035  * Allocate progress status for each tablespace with buffers that need to
2036  * be flushed. This requires the to-be-flushed array to be sorted.
2037  */
2038  last_tsid = InvalidOid;
2039  for (i = 0; i < num_to_scan; i++)
2040  {
2041  CkptTsStatus *s;
2042  Oid cur_tsid;
2043 
2044  cur_tsid = CkptBufferIds[i].tsId;
2045 
2046  /*
2047  * Grow array of per-tablespace status structs, every time a new
2048  * tablespace is found.
2049  */
2050  if (last_tsid == InvalidOid || last_tsid != cur_tsid)
2051  {
2052  Size sz;
2053 
2054  num_spaces++;
2055 
2056  /*
2057  * Not worth adding grow-by-power-of-2 logic here - even with a
2058  * few hundred tablespaces this should be fine.
2059  */
2060  sz = sizeof(CkptTsStatus) * num_spaces;
2061 
2062  if (per_ts_stat == NULL)
2063  per_ts_stat = (CkptTsStatus *) palloc(sz);
2064  else
2065  per_ts_stat = (CkptTsStatus *) repalloc(per_ts_stat, sz);
2066 
2067  s = &per_ts_stat[num_spaces - 1];
2068  memset(s, 0, sizeof(*s));
2069  s->tsId = cur_tsid;
2070 
2071  /*
2072  * The first buffer in this tablespace. As CkptBufferIds is sorted
2073  * by tablespace all (s->num_to_scan) buffers in this tablespace
2074  * will follow afterwards.
2075  */
2076  s->index = i;
2077 
2078  /*
2079  * progress_slice will be determined once we know how many buffers
2080  * are in each tablespace, i.e. after this loop.
2081  */
2082 
2083  last_tsid = cur_tsid;
2084  }
2085  else
2086  {
2087  s = &per_ts_stat[num_spaces - 1];
2088  }
2089 
2090  s->num_to_scan++;
2091 
2092  /* Check for barrier events. */
2095  }
2096 
2097  Assert(num_spaces > 0);
2098 
2099  /*
2100  * Build a min-heap over the write-progress in the individual tablespaces,
2101  * and compute how large a portion of the total progress a single
2102  * processed buffer is.
2103  */
2104  ts_heap = binaryheap_allocate(num_spaces,
2106  NULL);
2107 
2108  for (i = 0; i < num_spaces; i++)
2109  {
2110  CkptTsStatus *ts_stat = &per_ts_stat[i];
2111 
2112  ts_stat->progress_slice = (float8) num_to_scan / ts_stat->num_to_scan;
2113 
2114  binaryheap_add_unordered(ts_heap, PointerGetDatum(ts_stat));
2115  }
2116 
2117  binaryheap_build(ts_heap);
2118 
2119  /*
2120  * Iterate through to-be-checkpointed buffers and write the ones (still)
2121  * marked with BM_CHECKPOINT_NEEDED. The writes are balanced between
2122  * tablespaces; otherwise the sorting would lead to only one tablespace
2123  * receiving writes at a time, making inefficient use of the hardware.
2124  */
2125  num_processed = 0;
2126  num_written = 0;
2127  while (!binaryheap_empty(ts_heap))
2128  {
2129  BufferDesc *bufHdr = NULL;
2130  CkptTsStatus *ts_stat = (CkptTsStatus *)
2132 
2133  buf_id = CkptBufferIds[ts_stat->index].buf_id;
2134  Assert(buf_id != -1);
2135 
2136  bufHdr = GetBufferDescriptor(buf_id);
2137 
2138  num_processed++;
2139 
2140  /*
2141  * We don't need to acquire the lock here, because we're only looking
2142  * at a single bit. It's possible that someone else writes the buffer
2143  * and clears the flag right after we check, but that doesn't matter
2144  * since SyncOneBuffer will then do nothing. However, there is a
2145  * further race condition: it's conceivable that between the time we
2146  * examine the bit here and the time SyncOneBuffer acquires the lock,
2147  * someone else not only wrote the buffer but replaced it with another
2148  * page and dirtied it. In that improbable case, SyncOneBuffer will
2149  * write the buffer though we didn't need to. It doesn't seem worth
2150  * guarding against this, though.
2151  */
2153  {
2154  if (SyncOneBuffer(buf_id, false, &wb_context) & BUF_WRITTEN)
2155  {
2156  TRACE_POSTGRESQL_BUFFER_SYNC_WRITTEN(buf_id);
2158  num_written++;
2159  }
2160  }
2161 
2162  /*
2163  * Measure progress independent of actually having to flush the buffer
2164  * - otherwise writing become unbalanced.
2165  */
2166  ts_stat->progress += ts_stat->progress_slice;
2167  ts_stat->num_scanned++;
2168  ts_stat->index++;
2169 
2170  /* Have all the buffers from the tablespace been processed? */
2171  if (ts_stat->num_scanned == ts_stat->num_to_scan)
2172  {
2173  binaryheap_remove_first(ts_heap);
2174  }
2175  else
2176  {
2177  /* update heap with the new progress */
2178  binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
2179  }
2180 
2181  /*
2182  * Sleep to throttle our I/O rate.
2183  *
2184  * (This will check for barrier events even if it doesn't sleep.)
2185  */
2186  CheckpointWriteDelay(flags, (double) num_processed / num_to_scan);
2187  }
2188 
2189  /* issue all pending flushes */
2190  IssuePendingWritebacks(&wb_context);
2191 
2192  pfree(per_ts_stat);
2193  per_ts_stat = NULL;
2194  binaryheap_free(ts_heap);
2195 
2196  /*
2197  * Update checkpoint statistics. As noted above, this doesn't include
2198  * buffers written by other backends or bgwriter scan.
2199  */
2200  CheckpointStats.ckpt_bufs_written += num_written;
2201 
2202  TRACE_POSTGRESQL_BUFFER_SYNC_DONE(NBuffers, num_written, num_to_scan);
2203 }
2204 
2205 /*
2206  * BgBufferSync -- Write out some dirty buffers in the pool.
2207  *
2208  * This is called periodically by the background writer process.
2209  *
2210  * Returns true if it's appropriate for the bgwriter process to go into
2211  * low-power hibernation mode. (This happens if the strategy clock sweep
2212  * has been "lapped" and no buffer allocations have occurred recently,
2213  * or if the bgwriter has been effectively disabled by setting
2214  * bgwriter_lru_maxpages to 0.)
2215  */
2216 bool
2218 {
2219  /* info obtained from freelist.c */
2220  int strategy_buf_id;
2221  uint32 strategy_passes;
2222  uint32 recent_alloc;
2223 
2224  /*
2225  * Information saved between calls so we can determine the strategy
2226  * point's advance rate and avoid scanning already-cleaned buffers.
2227  */
2228  static bool saved_info_valid = false;
2229  static int prev_strategy_buf_id;
2230  static uint32 prev_strategy_passes;
2231  static int next_to_clean;
2232  static uint32 next_passes;
2233 
2234  /* Moving averages of allocation rate and clean-buffer density */
2235  static float smoothed_alloc = 0;
2236  static float smoothed_density = 10.0;
2237 
2238  /* Potentially these could be tunables, but for now, not */
2239  float smoothing_samples = 16;
2240  float scan_whole_pool_milliseconds = 120000.0;
2241 
2242  /* Used to compute how far we scan ahead */
2243  long strategy_delta;
2244  int bufs_to_lap;
2245  int bufs_ahead;
2246  float scans_per_alloc;
2247  int reusable_buffers_est;
2248  int upcoming_alloc_est;
2249  int min_scan_buffers;
2250 
2251  /* Variables for the scanning loop proper */
2252  int num_to_scan;
2253  int num_written;
2254  int reusable_buffers;
2255 
2256  /* Variables for final smoothed_density update */
2257  long new_strategy_delta;
2258  uint32 new_recent_alloc;
2259 
2260  /*
2261  * Find out where the freelist clock sweep currently is, and how many
2262  * buffer allocations have happened since our last call.
2263  */
2264  strategy_buf_id = StrategySyncStart(&strategy_passes, &recent_alloc);
2265 
2266  /* Report buffer alloc counts to pgstat */
2267  PendingBgWriterStats.buf_alloc += recent_alloc;
2268 
2269  /*
2270  * If we're not running the LRU scan, just stop after doing the stats
2271  * stuff. We mark the saved state invalid so that we can recover sanely
2272  * if LRU scan is turned back on later.
2273  */
2274  if (bgwriter_lru_maxpages <= 0)
2275  {
2276  saved_info_valid = false;
2277  return true;
2278  }
2279 
2280  /*
2281  * Compute strategy_delta = how many buffers have been scanned by the
2282  * clock sweep since last time. If first time through, assume none. Then
2283  * see if we are still ahead of the clock sweep, and if so, how many
2284  * buffers we could scan before we'd catch up with it and "lap" it. Note:
2285  * weird-looking coding of xxx_passes comparisons are to avoid bogus
2286  * behavior when the passes counts wrap around.
2287  */
2288  if (saved_info_valid)
2289  {
2290  int32 passes_delta = strategy_passes - prev_strategy_passes;
2291 
2292  strategy_delta = strategy_buf_id - prev_strategy_buf_id;
2293  strategy_delta += (long) passes_delta * NBuffers;
2294 
2295  Assert(strategy_delta >= 0);
2296 
2297  if ((int32) (next_passes - strategy_passes) > 0)
2298  {
2299  /* we're one pass ahead of the strategy point */
2300  bufs_to_lap = strategy_buf_id - next_to_clean;
2301 #ifdef BGW_DEBUG
2302  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2303  next_passes, next_to_clean,
2304  strategy_passes, strategy_buf_id,
2305  strategy_delta, bufs_to_lap);
2306 #endif
2307  }
2308  else if (next_passes == strategy_passes &&
2309  next_to_clean >= strategy_buf_id)
2310  {
2311  /* on same pass, but ahead or at least not behind */
2312  bufs_to_lap = NBuffers - (next_to_clean - strategy_buf_id);
2313 #ifdef BGW_DEBUG
2314  elog(DEBUG2, "bgwriter ahead: bgw %u-%u strategy %u-%u delta=%ld lap=%d",
2315  next_passes, next_to_clean,
2316  strategy_passes, strategy_buf_id,
2317  strategy_delta, bufs_to_lap);
2318 #endif
2319  }
2320  else
2321  {
2322  /*
2323  * We're behind, so skip forward to the strategy point and start
2324  * cleaning from there.
2325  */
2326 #ifdef BGW_DEBUG
2327  elog(DEBUG2, "bgwriter behind: bgw %u-%u strategy %u-%u delta=%ld",
2328  next_passes, next_to_clean,
2329  strategy_passes, strategy_buf_id,
2330  strategy_delta);
2331 #endif
2332  next_to_clean = strategy_buf_id;
2333  next_passes = strategy_passes;
2334  bufs_to_lap = NBuffers;
2335  }
2336  }
2337  else
2338  {
2339  /*
2340  * Initializing at startup or after LRU scanning had been off. Always
2341  * start at the strategy point.
2342  */
2343 #ifdef BGW_DEBUG
2344  elog(DEBUG2, "bgwriter initializing: strategy %u-%u",
2345  strategy_passes, strategy_buf_id);
2346 #endif
2347  strategy_delta = 0;
2348  next_to_clean = strategy_buf_id;
2349  next_passes = strategy_passes;
2350  bufs_to_lap = NBuffers;
2351  }
2352 
2353  /* Update saved info for next time */
2354  prev_strategy_buf_id = strategy_buf_id;
2355  prev_strategy_passes = strategy_passes;
2356  saved_info_valid = true;
2357 
2358  /*
2359  * Compute how many buffers had to be scanned for each new allocation, ie,
2360  * 1/density of reusable buffers, and track a moving average of that.
2361  *
2362  * If the strategy point didn't move, we don't update the density estimate
2363  */
2364  if (strategy_delta > 0 && recent_alloc > 0)
2365  {
2366  scans_per_alloc = (float) strategy_delta / (float) recent_alloc;
2367  smoothed_density += (scans_per_alloc - smoothed_density) /
2368  smoothing_samples;
2369  }
2370 
2371  /*
2372  * Estimate how many reusable buffers there are between the current
2373  * strategy point and where we've scanned ahead to, based on the smoothed
2374  * density estimate.
2375  */
2376  bufs_ahead = NBuffers - bufs_to_lap;
2377  reusable_buffers_est = (float) bufs_ahead / smoothed_density;
2378 
2379  /*
2380  * Track a moving average of recent buffer allocations. Here, rather than
2381  * a true average we want a fast-attack, slow-decline behavior: we
2382  * immediately follow any increase.
2383  */
2384  if (smoothed_alloc <= (float) recent_alloc)
2385  smoothed_alloc = recent_alloc;
2386  else
2387  smoothed_alloc += ((float) recent_alloc - smoothed_alloc) /
2388  smoothing_samples;
2389 
2390  /* Scale the estimate by a GUC to allow more aggressive tuning. */
2391  upcoming_alloc_est = (int) (smoothed_alloc * bgwriter_lru_multiplier);
2392 
2393  /*
2394  * If recent_alloc remains at zero for many cycles, smoothed_alloc will
2395  * eventually underflow to zero, and the underflows produce annoying
2396  * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
2397  * zero, there's no point in tracking smaller and smaller values of
2398  * smoothed_alloc, so just reset it to exactly zero to avoid this
2399  * syndrome. It will pop back up as soon as recent_alloc increases.
2400  */
2401  if (upcoming_alloc_est == 0)
2402  smoothed_alloc = 0;
2403 
2404  /*
2405  * Even in cases where there's been little or no buffer allocation
2406  * activity, we want to make a small amount of progress through the buffer
2407  * cache so that as many reusable buffers as possible are clean after an
2408  * idle period.
2409  *
2410  * (scan_whole_pool_milliseconds / BgWriterDelay) computes how many times
2411  * the BGW will be called during the scan_whole_pool time; slice the
2412  * buffer pool into that many sections.
2413  */
2414  min_scan_buffers = (int) (NBuffers / (scan_whole_pool_milliseconds / BgWriterDelay));
2415 
2416  if (upcoming_alloc_est < (min_scan_buffers + reusable_buffers_est))
2417  {
2418 #ifdef BGW_DEBUG
2419  elog(DEBUG2, "bgwriter: alloc_est=%d too small, using min=%d + reusable_est=%d",
2420  upcoming_alloc_est, min_scan_buffers, reusable_buffers_est);
2421 #endif
2422  upcoming_alloc_est = min_scan_buffers + reusable_buffers_est;
2423  }
2424 
2425  /*
2426  * Now write out dirty reusable buffers, working forward from the
2427  * next_to_clean point, until we have lapped the strategy scan, or cleaned
2428  * enough buffers to match our estimate of the next cycle's allocation
2429  * requirements, or hit the bgwriter_lru_maxpages limit.
2430  */
2431 
2432  /* Make sure we can handle the pin inside SyncOneBuffer */
2434 
2435  num_to_scan = bufs_to_lap;
2436  num_written = 0;
2437  reusable_buffers = reusable_buffers_est;
2438 
2439  /* Execute the LRU scan */
2440  while (num_to_scan > 0 && reusable_buffers < upcoming_alloc_est)
2441  {
2442  int sync_state = SyncOneBuffer(next_to_clean, true,
2443  wb_context);
2444 
2445  if (++next_to_clean >= NBuffers)
2446  {
2447  next_to_clean = 0;
2448  next_passes++;
2449  }
2450  num_to_scan--;
2451 
2452  if (sync_state & BUF_WRITTEN)
2453  {
2454  reusable_buffers++;
2455  if (++num_written >= bgwriter_lru_maxpages)
2456  {
2458  break;
2459  }
2460  }
2461  else if (sync_state & BUF_REUSABLE)
2462  reusable_buffers++;
2463  }
2464 
2465  PendingBgWriterStats.buf_written_clean += num_written;
2466 
2467 #ifdef BGW_DEBUG
2468  elog(DEBUG1, "bgwriter: recent_alloc=%u smoothed=%.2f delta=%ld ahead=%d density=%.2f reusable_est=%d upcoming_est=%d scanned=%d wrote=%d reusable=%d",
2469  recent_alloc, smoothed_alloc, strategy_delta, bufs_ahead,
2470  smoothed_density, reusable_buffers_est, upcoming_alloc_est,
2471  bufs_to_lap - num_to_scan,
2472  num_written,
2473  reusable_buffers - reusable_buffers_est);
2474 #endif
2475 
2476  /*
2477  * Consider the above scan as being like a new allocation scan.
2478  * Characterize its density and update the smoothed one based on it. This
2479  * effectively halves the moving average period in cases where both the
2480  * strategy and the background writer are doing some useful scanning,
2481  * which is helpful because a long memory isn't as desirable on the
2482  * density estimates.
2483  */
2484  new_strategy_delta = bufs_to_lap - num_to_scan;
2485  new_recent_alloc = reusable_buffers - reusable_buffers_est;
2486  if (new_strategy_delta > 0 && new_recent_alloc > 0)
2487  {
2488  scans_per_alloc = (float) new_strategy_delta / (float) new_recent_alloc;
2489  smoothed_density += (scans_per_alloc - smoothed_density) /
2490  smoothing_samples;
2491 
2492 #ifdef BGW_DEBUG
2493  elog(DEBUG2, "bgwriter: cleaner density alloc=%u scan=%ld density=%.2f new smoothed=%.2f",
2494  new_recent_alloc, new_strategy_delta,
2495  scans_per_alloc, smoothed_density);
2496 #endif
2497  }
2498 
2499  /* Return true if OK to hibernate */
2500  return (bufs_to_lap == 0 && recent_alloc == 0);
2501 }
2502 
2503 /*
2504  * SyncOneBuffer -- process a single buffer during syncing.
2505  *
2506  * If skip_recently_used is true, we don't write currently-pinned buffers, nor
2507  * buffers marked recently used, as these are not replacement candidates.
2508  *
2509  * Returns a bitmask containing the following flag bits:
2510  * BUF_WRITTEN: we wrote the buffer.
2511  * BUF_REUSABLE: buffer is available for replacement, ie, it has
2512  * pin count 0 and usage count 0.
2513  *
2514  * (BUF_WRITTEN could be set in error if FlushBuffer finds the buffer clean
2515  * after locking it, but we don't care all that much.)
2516  *
2517  * Note: caller must have done ResourceOwnerEnlargeBuffers.
2518  */
2519 static int
2520 SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
2521 {
2522  BufferDesc *bufHdr = GetBufferDescriptor(buf_id);
2523  int result = 0;
2524  uint32 buf_state;
2525  BufferTag tag;
2526 
2528 
2529  /*
2530  * Check whether buffer needs writing.
2531  *
2532  * We can make this check without taking the buffer content lock so long
2533  * as we mark pages dirty in access methods *before* logging changes with
2534  * XLogInsert(): if someone marks the buffer dirty just after our check we
2535  * don't worry because our checkpoint.redo points before log record for
2536  * upcoming changes and so we are not required to write such dirty buffer.
2537  */
2538  buf_state = LockBufHdr(bufHdr);
2539 
2540  if (BUF_STATE_GET_REFCOUNT(buf_state) == 0 &&
2541  BUF_STATE_GET_USAGECOUNT(buf_state) == 0)
2542  {
2543  result |= BUF_REUSABLE;
2544  }
2545  else if (skip_recently_used)
2546  {
2547  /* Caller told us not to write recently-used buffers */
2548  UnlockBufHdr(bufHdr, buf_state);
2549  return result;
2550  }
2551 
2552  if (!(buf_state & BM_VALID) || !(buf_state & BM_DIRTY))
2553  {
2554  /* It's clean, so nothing to do */
2555  UnlockBufHdr(bufHdr, buf_state);
2556  return result;
2557  }
2558 
2559  /*
2560  * Pin it, share-lock it, write it. (FlushBuffer will do nothing if the
2561  * buffer is clean by the time we've locked it.)
2562  */
2563  PinBuffer_Locked(bufHdr);
2565 
2566  FlushBuffer(bufHdr, NULL);
2567 
2569 
2570  tag = bufHdr->tag;
2571 
2572  UnpinBuffer(bufHdr);
2573 
2574  ScheduleBufferTagForWriteback(wb_context, &tag);
2575 
2576  return result | BUF_WRITTEN;
2577 }
2578 
2579 /*
2580  * AtEOXact_Buffers - clean up at end of transaction.
2581  *
2582  * As of PostgreSQL 8.0, buffer pins should get released by the
2583  * ResourceOwner mechanism. This routine is just a debugging
2584  * cross-check that no pins remain.
2585  */
2586 void
2587 AtEOXact_Buffers(bool isCommit)
2588 {
2590 
2591  AtEOXact_LocalBuffers(isCommit);
2592 
2594 }
2595 
2596 /*
2597  * Initialize access to shared buffer pool
2598  *
2599  * This is called during backend startup (whether standalone or under the
2600  * postmaster). It sets up for this backend's access to the already-existing
2601  * buffer pool.
2602  */
2603 void
2605 {
2606  HASHCTL hash_ctl;
2607 
2608  memset(&PrivateRefCountArray, 0, sizeof(PrivateRefCountArray));
2609 
2610  hash_ctl.keysize = sizeof(int32);
2611  hash_ctl.entrysize = sizeof(PrivateRefCountEntry);
2612 
2613  PrivateRefCountHash = hash_create("PrivateRefCount", 100, &hash_ctl,
2614  HASH_ELEM | HASH_BLOBS);
2615 
2616  /*
2617  * AtProcExit_Buffers needs LWLock access, and thereby has to be called at
2618  * the corresponding phase of backend shutdown.
2619  */
2620  Assert(MyProc != NULL);
2622 }
2623 
2624 /*
2625  * During backend exit, ensure that we released all shared-buffer locks and
2626  * assert that we have no remaining pins.
2627  */
2628 static void
2630 {
2631  AbortBufferIO();
2632  UnlockBuffers();
2633 
2635 
2636  /* localbuf.c needs a chance too */
2638 }
2639 
2640 /*
2641  * CheckForBufferLeaks - ensure this backend holds no buffer pins
2642  *
2643  * As of PostgreSQL 8.0, buffer pins should get released by the
2644  * ResourceOwner mechanism. This routine is just a debugging
2645  * cross-check that no pins remain.
2646  */
2647 static void
2649 {
2650 #ifdef USE_ASSERT_CHECKING
2651  int RefCountErrors = 0;
2653  int i;
2654 
2655  /* check the array */
2656  for (i = 0; i < REFCOUNT_ARRAY_ENTRIES; i++)
2657  {
2659 
2660  if (res->buffer != InvalidBuffer)
2661  {
2662  PrintBufferLeakWarning(res->buffer);
2663  RefCountErrors++;
2664  }
2665  }
2666 
2667  /* if necessary search the hash */
2669  {
2670  HASH_SEQ_STATUS hstat;
2671 
2673  while ((res = (PrivateRefCountEntry *) hash_seq_search(&hstat)) != NULL)
2674  {
2675  PrintBufferLeakWarning(res->buffer);
2676  RefCountErrors++;
2677  }
2678  }
2679 
2680  Assert(RefCountErrors == 0);
2681 #endif
2682 }
2683 
2684 /*
2685  * Helper routine to issue warnings when a buffer is unexpectedly pinned
2686  */
2687 void
2689 {
2690  BufferDesc *buf;
2691  int32 loccount;
2692  char *path;
2693  BackendId backend;
2694  uint32 buf_state;
2695 
2697  if (BufferIsLocal(buffer))
2698  {
2700  loccount = LocalRefCount[-buffer - 1];
2701  backend = MyBackendId;
2702  }
2703  else
2704  {
2706  loccount = GetPrivateRefCount(buffer);
2707  backend = InvalidBackendId;
2708  }
2709 
2710  /* theoretically we should lock the bufhdr here */
2711  path = relpathbackend(BufTagGetRelFileLocator(&buf->tag), backend,
2712  BufTagGetForkNum(&buf->tag));
2713  buf_state = pg_atomic_read_u32(&buf->state);
2714  elog(WARNING,
2715  "buffer refcount leak: [%03d] "
2716  "(rel=%s, blockNum=%u, flags=0x%x, refcount=%u %d)",
2717  buffer, path,
2718  buf->tag.blockNum, buf_state & BUF_FLAG_MASK,
2719  BUF_STATE_GET_REFCOUNT(buf_state), loccount);
2720  pfree(path);
2721 }
2722 
2723 /*
2724  * CheckPointBuffers
2725  *
2726  * Flush all dirty blocks in buffer pool to disk at checkpoint time.
2727  *
2728  * Note: temporary relations do not participate in checkpoints, so they don't
2729  * need to be flushed.
2730  */
2731 void
2733 {
2734  BufferSync(flags);
2735 }
2736 
2737 
2738 /*
2739  * Do whatever is needed to prepare for commit at the bufmgr and smgr levels
2740  */
2741 void
2743 {
2744  /* Nothing to do in bufmgr anymore... */
2745 }
2746 
2747 /*
2748  * BufferGetBlockNumber
2749  * Returns the block number associated with a buffer.
2750  *
2751  * Note:
2752  * Assumes that the buffer is valid and pinned, else the
2753  * value may be obsolete immediately...
2754  */
2757 {
2758  BufferDesc *bufHdr;
2759 
2761 
2762  if (BufferIsLocal(buffer))
2763  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2764  else
2765  bufHdr = GetBufferDescriptor(buffer - 1);
2766 
2767  /* pinned, so OK to read tag without spinlock */
2768  return bufHdr->tag.blockNum;
2769 }
2770 
2771 /*
2772  * BufferGetTag
2773  * Returns the relfilelocator, fork number and block number associated with
2774  * a buffer.
2775  */
2776 void
2778  BlockNumber *blknum)
2779 {
2780  BufferDesc *bufHdr;
2781 
2782  /* Do the same checks as BufferGetBlockNumber. */
2784 
2785  if (BufferIsLocal(buffer))
2786  bufHdr = GetLocalBufferDescriptor(-buffer - 1);
2787  else
2788  bufHdr = GetBufferDescriptor(buffer - 1);
2789 
2790  /* pinned, so OK to read tag without spinlock */
2791  *rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
2792  *forknum = BufTagGetForkNum(&bufHdr->tag);
2793  *blknum = bufHdr->tag.blockNum;
2794 }
2795 
2796 /*
2797  * FlushBuffer
2798  * Physically write out a shared buffer.
2799  *
2800  * NOTE: this actually just passes the buffer contents to the kernel; the
2801  * real write to disk won't happen until the kernel feels like it. This
2802  * is okay from our point of view since we can redo the changes from WAL.
2803  * However, we will need to force the changes to disk via fsync before
2804  * we can checkpoint WAL.
2805  *
2806  * The caller must hold a pin on the buffer and have share-locked the
2807  * buffer contents. (Note: a share-lock does not prevent updates of
2808  * hint bits in the buffer, so the page could change while the write
2809  * is in progress, but we assume that that will not invalidate the data
2810  * written.)
2811  *
2812  * If the caller has an smgr reference for the buffer's relation, pass it
2813  * as the second parameter. If not, pass NULL.
2814  */
2815 static void
2817 {
2818  XLogRecPtr recptr;
2819  ErrorContextCallback errcallback;
2820  instr_time io_start,
2821  io_time;
2822  Block bufBlock;
2823  char *bufToWrite;
2824  uint32 buf_state;
2825 
2826  /*
2827  * Try to start an I/O operation. If StartBufferIO returns false, then
2828  * someone else flushed the buffer before we could, so we need not do
2829  * anything.
2830  */
2831  if (!StartBufferIO(buf, false))
2832  return;
2833 
2834  /* Setup error traceback support for ereport() */
2836  errcallback.arg = (void *) buf;
2837  errcallback.previous = error_context_stack;
2838  error_context_stack = &errcallback;
2839 
2840  /* Find smgr relation for buffer */
2841  if (reln == NULL)
2843 
2844  TRACE_POSTGRESQL_BUFFER_FLUSH_START(BufTagGetForkNum(&buf->tag),
2845  buf->tag.blockNum,
2847  reln->smgr_rlocator.locator.dbOid,
2849 
2850  buf_state = LockBufHdr(buf);
2851 
2852  /*
2853  * Run PageGetLSN while holding header lock, since we don't have the
2854  * buffer locked exclusively in all cases.
2855  */
2856  recptr = BufferGetLSN(buf);
2857 
2858  /* To check if block content changes while flushing. - vadim 01/17/97 */
2859  buf_state &= ~BM_JUST_DIRTIED;
2860  UnlockBufHdr(buf, buf_state);
2861 
2862  /*
2863  * Force XLOG flush up to buffer's LSN. This implements the basic WAL
2864  * rule that log updates must hit disk before any of the data-file changes
2865  * they describe do.
2866  *
2867  * However, this rule does not apply to unlogged relations, which will be
2868  * lost after a crash anyway. Most unlogged relation pages do not bear
2869  * LSNs since we never emit WAL records for them, and therefore flushing
2870  * up through the buffer LSN would be useless, but harmless. However,
2871  * GiST indexes use LSNs internally to track page-splits, and therefore
2872  * unlogged GiST pages bear "fake" LSNs generated by
2873  * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
2874  * LSN counter could advance past the WAL insertion point; and if it did
2875  * happen, attempting to flush WAL through that location would fail, with
2876  * disastrous system-wide consequences. To make sure that can't happen,
2877  * skip the flush if the buffer isn't permanent.
2878  */
2879  if (buf_state & BM_PERMANENT)
2880  XLogFlush(recptr);
2881 
2882  /*
2883  * Now it's safe to write buffer to disk. Note that no one else should
2884  * have been able to write it while we were busy with log flushing because
2885  * only one process at a time can set the BM_IO_IN_PROGRESS bit.
2886  */
2887  bufBlock = BufHdrGetBlock(buf);
2888 
2889  /*
2890  * Update page checksum if desired. Since we have only shared lock on the
2891  * buffer, other processes might be updating hint bits in it, so we must
2892  * copy the page to private storage if we do checksumming.
2893  */
2894  bufToWrite = PageSetChecksumCopy((Page) bufBlock, buf->tag.blockNum);
2895 
2896  if (track_io_timing)
2897  INSTR_TIME_SET_CURRENT(io_start);
2898  else
2899  INSTR_TIME_SET_ZERO(io_start);
2900 
2901  /*
2902  * bufToWrite is either the shared buffer or a copy, as appropriate.
2903  */
2904  smgrwrite(reln,
2905  BufTagGetForkNum(&buf->tag),
2906  buf->tag.blockNum,
2907  bufToWrite,
2908  false);
2909 
2910  if (track_io_timing)
2911  {
2912  INSTR_TIME_SET_CURRENT(io_time);
2913  INSTR_TIME_SUBTRACT(io_time, io_start);
2916  }
2917 
2919 
2920  /*
2921  * Mark the buffer as clean (unless BM_JUST_DIRTIED has become set) and
2922  * end the BM_IO_IN_PROGRESS state.
2923  */
2924  TerminateBufferIO(buf, true, 0);
2925 
2926  TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(BufTagGetForkNum(&buf->tag),
2927  buf->tag.blockNum,
2929  reln->smgr_rlocator.locator.dbOid,
2931 
2932  /* Pop the error context stack */
2933  error_context_stack = errcallback.previous;
2934 }
2935 
2936 /*
2937  * RelationGetNumberOfBlocksInFork
2938  * Determines the current number of pages in the specified relation fork.
2939  *
2940  * Note that the accuracy of the result will depend on the details of the
2941  * relation's storage. For builtin AMs it'll be accurate, but for external AMs
2942  * it might not be.
2943  */
2946 {
2947  if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
2948  {
2949  /*
2950  * Not every table AM uses BLCKSZ wide fixed size blocks. Therefore
2951  * tableam returns the size in bytes - but for the purpose of this
2952  * routine, we want the number of blocks. Therefore divide, rounding
2953  * up.
2954  */
2955  uint64 szbytes;
2956 
2957  szbytes = table_relation_size(relation, forkNum);
2958 
2959  return (szbytes + (BLCKSZ - 1)) / BLCKSZ;
2960  }
2961  else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
2962  {
2963  return smgrnblocks(RelationGetSmgr(relation), forkNum);
2964  }
2965  else
2966  Assert(false);
2967 
2968  return 0; /* keep compiler quiet */
2969 }
2970 
2971 /*
2972  * BufferIsPermanent
2973  * Determines whether a buffer will potentially still be around after
2974  * a crash. Caller must hold a buffer pin.
2975  */
2976 bool
2978 {
2979  BufferDesc *bufHdr;
2980 
2981  /* Local buffers are used only for temp relations. */
2982  if (BufferIsLocal(buffer))
2983  return false;
2984 
2985  /* Make sure we've got a real buffer, and that we hold a pin on it. */
2988 
2989  /*
2990  * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
2991  * need not bother with the buffer header spinlock. Even if someone else
2992  * changes the buffer header state while we're doing this, the state is
2993  * changed atomically, so we'll read the old value or the new value, but
2994  * not random garbage.
2995  */
2996  bufHdr = GetBufferDescriptor(buffer - 1);
2997  return (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT) != 0;
2998 }
2999 
3000 /*
3001  * BufferGetLSNAtomic
3002  * Retrieves the LSN of the buffer atomically using a buffer header lock.
3003  * This is necessary for some callers who may not have an exclusive lock
3004  * on the buffer.
3005  */
3006 XLogRecPtr
3008 {
3009  BufferDesc *bufHdr = GetBufferDescriptor(buffer - 1);
3010  char *page = BufferGetPage(buffer);
3011  XLogRecPtr lsn;
3012  uint32 buf_state;
3013 
3014  /*
3015  * If we don't need locking for correctness, fastpath out.
3016  */
3018  return PageGetLSN(page);
3019 
3020  /* Make sure we've got a real buffer, and that we hold a pin on it. */
3023 
3024  buf_state = LockBufHdr(bufHdr);
3025  lsn = PageGetLSN(page);
3026  UnlockBufHdr(bufHdr, buf_state);
3027 
3028  return lsn;
3029 }
3030 
3031 /* ---------------------------------------------------------------------
3032  * DropRelationBuffers
3033  *
3034  * This function removes from the buffer pool all the pages of the
3035  * specified relation forks that have block numbers >= firstDelBlock.
3036  * (In particular, with firstDelBlock = 0, all pages are removed.)
3037  * Dirty pages are simply dropped, without bothering to write them
3038  * out first. Therefore, this is NOT rollback-able, and so should be
3039  * used only with extreme caution!
3040  *
3041  * Currently, this is called only from smgr.c when the underlying file
3042  * is about to be deleted or truncated (firstDelBlock is needed for
3043  * the truncation case). The data in the affected pages would therefore
3044  * be deleted momentarily anyway, and there is no point in writing it.
3045  * It is the responsibility of higher-level code to ensure that the
3046  * deletion or truncation does not lose any data that could be needed
3047  * later. It is also the responsibility of higher-level code to ensure
3048  * that no other process could be trying to load more pages of the
3049  * relation into buffers.
3050  * --------------------------------------------------------------------
3051  */
3052 void
3054  int nforks, BlockNumber *firstDelBlock)
3055 {
3056  int i;
3057  int j;
3058  RelFileLocatorBackend rlocator;
3059  BlockNumber nForkBlock[MAX_FORKNUM];
3060  uint64 nBlocksToInvalidate = 0;
3061 
3062  rlocator = smgr_reln->smgr_rlocator;
3063 
3064  /* If it's a local relation, it's localbuf.c's problem. */
3065  if (RelFileLocatorBackendIsTemp(rlocator))
3066  {
3067  if (rlocator.backend == MyBackendId)
3068  {
3069  for (j = 0; j < nforks; j++)
3070  DropRelationLocalBuffers(rlocator.locator, forkNum[j],
3071  firstDelBlock[j]);
3072  }
3073  return;
3074  }
3075 
3076  /*
3077  * To remove all the pages of the specified relation forks from the buffer
3078  * pool, we need to scan the entire buffer pool but we can optimize it by
3079  * finding the buffers from BufMapping table provided we know the exact
3080  * size of each fork of the relation. The exact size is required to ensure
3081  * that we don't leave any buffer for the relation being dropped as
3082  * otherwise the background writer or checkpointer can lead to a PANIC
3083  * error while flushing buffers corresponding to files that don't exist.
3084  *
3085  * To know the exact size, we rely on the size cached for each fork by us
3086  * during recovery which limits the optimization to recovery and on
3087  * standbys but we can easily extend it once we have shared cache for
3088  * relation size.
3089  *
3090  * In recovery, we cache the value returned by the first lseek(SEEK_END)
3091  * and the future writes keeps the cached value up-to-date. See
3092  * smgrextend. It is possible that the value of the first lseek is smaller
3093  * than the actual number of existing blocks in the file due to buggy
3094  * Linux kernels that might not have accounted for the recent write. But
3095  * that should be fine because there must not be any buffers after that
3096  * file size.
3097  */
3098  for (i = 0; i < nforks; i++)
3099  {
3100  /* Get the number of blocks for a relation's fork */
3101  nForkBlock[i] = smgrnblocks_cached(smgr_reln, forkNum[i]);
3102 
3103  if (nForkBlock[i] == InvalidBlockNumber)
3104  {
3105  nBlocksToInvalidate = InvalidBlockNumber;
3106  break;
3107  }
3108 
3109  /* calculate the number of blocks to be invalidated */
3110  nBlocksToInvalidate += (nForkBlock[i] - firstDelBlock[i]);
3111  }
3112 
3113  /*
3114  * We apply the optimization iff the total number of blocks to invalidate
3115  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3116  */
3117  if (BlockNumberIsValid(nBlocksToInvalidate) &&
3118  nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3119  {
3120  for (j = 0; j < nforks; j++)
3121  FindAndDropRelationBuffers(rlocator.locator, forkNum[j],
3122  nForkBlock[j], firstDelBlock[j]);
3123  return;
3124  }
3125 
3126  for (i = 0; i < NBuffers; i++)
3127  {
3128  BufferDesc *bufHdr = GetBufferDescriptor(i);
3129  uint32 buf_state;
3130 
3131  /*
3132  * We can make this a tad faster by prechecking the buffer tag before
3133  * we attempt to lock the buffer; this saves a lot of lock
3134  * acquisitions in typical cases. It should be safe because the
3135  * caller must have AccessExclusiveLock on the relation, or some other
3136  * reason to be certain that no one is loading new pages of the rel
3137  * into the buffer pool. (Otherwise we might well miss such pages
3138  * entirely.) Therefore, while the tag might be changing while we
3139  * look at it, it can't be changing *to* a value we care about, only
3140  * *away* from such a value. So false negatives are impossible, and
3141  * false positives are safe because we'll recheck after getting the
3142  * buffer lock.
3143  *
3144  * We could check forkNum and blockNum as well as the rlocator, but
3145  * the incremental win from doing so seems small.
3146  */
3147  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator))
3148  continue;
3149 
3150  buf_state = LockBufHdr(bufHdr);
3151 
3152  for (j = 0; j < nforks; j++)
3153  {
3154  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator.locator) &&
3155  BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
3156  bufHdr->tag.blockNum >= firstDelBlock[j])
3157  {
3158  InvalidateBuffer(bufHdr); /* releases spinlock */
3159  break;
3160  }
3161  }
3162  if (j >= nforks)
3163  UnlockBufHdr(bufHdr, buf_state);
3164  }
3165 }
3166 
3167 /* ---------------------------------------------------------------------
3168  * DropRelationsAllBuffers
3169  *
3170  * This function removes from the buffer pool all the pages of all
3171  * forks of the specified relations. It's equivalent to calling
3172  * DropRelationBuffers once per fork per relation with firstDelBlock = 0.
3173  * --------------------------------------------------------------------
3174  */
3175 void
3176 DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3177 {
3178  int i;
3179  int n = 0;
3180  SMgrRelation *rels;
3181  BlockNumber (*block)[MAX_FORKNUM + 1];
3182  uint64 nBlocksToInvalidate = 0;
3183  RelFileLocator *locators;
3184  bool cached = true;
3185  bool use_bsearch;
3186 
3187  if (nlocators == 0)
3188  return;
3189 
3190  rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
3191 
3192  /* If it's a local relation, it's localbuf.c's problem. */
3193  for (i = 0; i < nlocators; i++)
3194  {
3195  if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
3196  {
3197  if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
3198  DropRelationAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
3199  }
3200  else
3201  rels[n++] = smgr_reln[i];
3202  }
3203 
3204  /*
3205  * If there are no non-local relations, then we're done. Release the
3206  * memory and return.
3207  */
3208  if (n == 0)
3209  {
3210  pfree(rels);
3211  return;
3212  }
3213 
3214  /*
3215  * This is used to remember the number of blocks for all the relations
3216  * forks.
3217  */
3218  block = (BlockNumber (*)[MAX_FORKNUM + 1])
3219  palloc(sizeof(BlockNumber) * n * (MAX_FORKNUM + 1));
3220 
3221  /*
3222  * We can avoid scanning the entire buffer pool if we know the exact size
3223  * of each of the given relation forks. See DropRelationBuffers.
3224  */
3225  for (i = 0; i < n && cached; i++)
3226  {
3227  for (int j = 0; j <= MAX_FORKNUM; j++)
3228  {
3229  /* Get the number of blocks for a relation's fork. */
3230  block[i][j] = smgrnblocks_cached(rels[i], j);
3231 
3232  /* We need to only consider the relation forks that exists. */
3233  if (block[i][j] == InvalidBlockNumber)
3234  {
3235  if (!smgrexists(rels[i], j))
3236  continue;
3237  cached = false;
3238  break;
3239  }
3240 
3241  /* calculate the total number of blocks to be invalidated */
3242  nBlocksToInvalidate += block[i][j];
3243  }
3244  }
3245 
3246  /*
3247  * We apply the optimization iff the total number of blocks to invalidate
3248  * is below the BUF_DROP_FULL_SCAN_THRESHOLD.
3249  */
3250  if (cached && nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
3251  {
3252  for (i = 0; i < n; i++)
3253  {
3254  for (int j = 0; j <= MAX_FORKNUM; j++)
3255  {
3256  /* ignore relation forks that doesn't exist */
3257  if (!BlockNumberIsValid(block[i][j]))
3258  continue;
3259 
3260  /* drop all the buffers for a particular relation fork */
3261  FindAndDropRelationBuffers(rels[i]->smgr_rlocator.locator,
3262  j, block[i][j], 0);
3263  }
3264  }
3265 
3266  pfree(block);
3267  pfree(rels);
3268  return;
3269  }
3270 
3271  pfree(block);
3272  locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
3273  for (i = 0; i < n; i++)
3274  locators[i] = rels[i]->smgr_rlocator.locator;
3275 
3276  /*
3277  * For low number of relations to drop just use a simple walk through, to
3278  * save the bsearch overhead. The threshold to use is rather a guess than
3279  * an exactly determined value, as it depends on many factors (CPU and RAM
3280  * speeds, amount of shared buffers etc.).
3281  */
3282  use_bsearch = n > RELS_BSEARCH_THRESHOLD;
3283 
3284  /* sort the list of rlocators if necessary */
3285  if (use_bsearch)
3286  pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
3287 
3288  for (i = 0; i < NBuffers; i++)
3289  {
3290  RelFileLocator *rlocator = NULL;
3291  BufferDesc *bufHdr = GetBufferDescriptor(i);
3292  uint32 buf_state;
3293 
3294  /*
3295  * As in DropRelationBuffers, an unlocked precheck should be safe and
3296  * saves some cycles.
3297  */
3298 
3299  if (!use_bsearch)
3300  {
3301  int j;
3302 
3303  for (j = 0; j < n; j++)
3304  {
3305  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &locators[j]))
3306  {
3307  rlocator = &locators[j];
3308  break;
3309  }
3310  }
3311  }
3312  else
3313  {
3314  RelFileLocator locator;
3315 
3316  locator = BufTagGetRelFileLocator(&bufHdr->tag);
3317  rlocator = bsearch((const void *) &(locator),
3318  locators, n, sizeof(RelFileLocator),
3320  }
3321 
3322  /* buffer doesn't belong to any of the given relfilelocators; skip it */
3323  if (rlocator == NULL)
3324  continue;
3325 
3326  buf_state = LockBufHdr(bufHdr);
3327  if (BufTagMatchesRelFileLocator(&bufHdr->tag, rlocator))
3328  InvalidateBuffer(bufHdr); /* releases spinlock */
3329  else
3330  UnlockBufHdr(bufHdr, buf_state);
3331  }
3332 
3333  pfree(locators);
3334  pfree(rels);
3335 }
3336 
3337 /* ---------------------------------------------------------------------
3338  * FindAndDropRelationBuffers
3339  *
3340  * This function performs look up in BufMapping table and removes from the
3341  * buffer pool all the pages of the specified relation fork that has block
3342  * number >= firstDelBlock. (In particular, with firstDelBlock = 0, all
3343  * pages are removed.)
3344  * --------------------------------------------------------------------
3345  */
3346 static void
3348  BlockNumber nForkBlock,
3349  BlockNumber firstDelBlock)
3350 {
3351  BlockNumber curBlock;
3352 
3353  for (curBlock = firstDelBlock; curBlock < nForkBlock; curBlock++)
3354  {
3355  uint32 bufHash; /* hash value for tag */
3356  BufferTag bufTag; /* identity of requested block */
3357  LWLock *bufPartitionLock; /* buffer partition lock for it */
3358  int buf_id;
3359  BufferDesc *bufHdr;
3360  uint32 buf_state;
3361 
3362  /* create a tag so we can lookup the buffer */
3363  InitBufferTag(&bufTag, &rlocator, forkNum, curBlock);
3364 
3365  /* determine its hash code and partition lock ID */
3366  bufHash = BufTableHashCode(&bufTag);
3367  bufPartitionLock = BufMappingPartitionLock(bufHash);
3368 
3369  /* Check that it is in the buffer pool. If not, do nothing. */
3370  LWLockAcquire(bufPartitionLock, LW_SHARED);
3371  buf_id = BufTableLookup(&bufTag, bufHash);
3372  LWLockRelease(bufPartitionLock);
3373 
3374  if (buf_id < 0)
3375  continue;
3376 
3377  bufHdr = GetBufferDescriptor(buf_id);
3378 
3379  /*
3380  * We need to lock the buffer header and recheck if the buffer is
3381  * still associated with the same block because the buffer could be
3382  * evicted by some other backend loading blocks for a different
3383  * relation after we release lock on the BufMapping table.
3384  */
3385  buf_state = LockBufHdr(bufHdr);
3386 
3387  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
3388  BufTagGetForkNum(&bufHdr->tag) == forkNum &&
3389  bufHdr->tag.blockNum >= firstDelBlock)
3390  InvalidateBuffer(bufHdr); /* releases spinlock */
3391  else
3392  UnlockBufHdr(bufHdr, buf_state);
3393  }
3394 }
3395 
3396 /* ---------------------------------------------------------------------
3397  * DropDatabaseBuffers
3398  *
3399  * This function removes all the buffers in the buffer cache for a
3400  * particular database. Dirty pages are simply dropped, without
3401  * bothering to write them out first. This is used when we destroy a
3402  * database, to avoid trying to flush data to disk when the directory
3403  * tree no longer exists. Implementation is pretty similar to
3404  * DropRelationBuffers() which is for destroying just one relation.
3405  * --------------------------------------------------------------------
3406  */
3407 void
3409 {
3410  int i;
3411 
3412  /*
3413  * We needn't consider local buffers, since by assumption the target
3414  * database isn't our own.
3415  */
3416 
3417  for (i = 0; i < NBuffers; i++)
3418  {
3419  BufferDesc *bufHdr = GetBufferDescriptor(i);
3420  uint32 buf_state;
3421 
3422  /*
3423  * As in DropRelationBuffers, an unlocked precheck should be safe and
3424  * saves some cycles.
3425  */
3426  if (bufHdr->tag.dbOid != dbid)
3427  continue;
3428 
3429  buf_state = LockBufHdr(bufHdr);
3430  if (bufHdr->tag.dbOid == dbid)
3431  InvalidateBuffer(bufHdr); /* releases spinlock */
3432  else
3433  UnlockBufHdr(bufHdr, buf_state);
3434  }
3435 }
3436 
3437 /* -----------------------------------------------------------------
3438  * PrintBufferDescs
3439  *
3440  * this function prints all the buffer descriptors, for debugging
3441  * use only.
3442  * -----------------------------------------------------------------
3443  */
3444 #ifdef NOT_USED
3445 void
3446 PrintBufferDescs(void)
3447 {
3448  int i;
3449 
3450  for (i = 0; i < NBuffers; ++i)
3451  {
3454 
3455  /* theoretically we should lock the bufhdr here */
3456  elog(LOG,
3457  "[%02d] (freeNext=%d, rel=%s, "
3458  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3459  i, buf->freeNext,
3462  buf->tag.blockNum, buf->flags,
3463  buf->refcount, GetPrivateRefCount(b));
3464  }
3465 }
3466 #endif
3467 
3468 #ifdef NOT_USED
3469 void
3470 PrintPinnedBufs(void)
3471 {
3472  int i;
3473 
3474  for (i = 0; i < NBuffers; ++i)
3475  {
3478 
3479  if (GetPrivateRefCount(b) > 0)
3480  {
3481  /* theoretically we should lock the bufhdr here */
3482  elog(LOG,
3483  "[%02d] (freeNext=%d, rel=%s, "
3484  "blockNum=%u, flags=0x%x, refcount=%u %d)",
3485  i, buf->freeNext,
3487  BufTagGetForkNum(&buf->tag)),
3488  buf->tag.blockNum, buf->flags,
3489  buf->refcount, GetPrivateRefCount(b));
3490  }
3491  }
3492 }
3493 #endif
3494 
3495 /* ---------------------------------------------------------------------
3496  * FlushRelationBuffers
3497  *
3498  * This function writes all dirty pages of a relation out to disk
3499  * (or more accurately, out to kernel disk buffers), ensuring that the
3500  * kernel has an up-to-date view of the relation.
3501  *
3502  * Generally, the caller should be holding AccessExclusiveLock on the
3503  * target relation to ensure that no other backend is busy dirtying
3504  * more blocks of the relation; the effects can't be expected to last
3505  * after the lock is released.
3506  *
3507  * XXX currently it sequentially searches the buffer pool, should be
3508  * changed to more clever ways of searching. This routine is not
3509  * used in any performance-critical code paths, so it's not worth
3510  * adding additional overhead to normal paths to make it go faster.
3511  * --------------------------------------------------------------------
3512  */
3513 void
3515 {
3516  int i;
3517  BufferDesc *bufHdr;
3518 
3519  if (RelationUsesLocalBuffers(rel))
3520  {
3521  for (i = 0; i < NLocBuffer; i++)
3522  {
3523  uint32 buf_state;
3524 
3525  bufHdr = GetLocalBufferDescriptor(i);
3526  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3527  ((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
3528  (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3529  {
3530  ErrorContextCallback errcallback;
3531  Page localpage;
3532 
3533  localpage = (char *) LocalBufHdrGetBlock(bufHdr);
3534 
3535  /* Setup error traceback support for ereport() */
3537  errcallback.arg = (void *) bufHdr;
3538  errcallback.previous = error_context_stack;
3539  error_context_stack = &errcallback;
3540 
3541  PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
3542 
3544  BufTagGetForkNum(&bufHdr->tag),
3545  bufHdr->tag.blockNum,
3546  localpage,
3547  false);
3548 
3549  buf_state &= ~(BM_DIRTY | BM_JUST_DIRTIED);
3550  pg_atomic_unlocked_write_u32(&bufHdr->state, buf_state);
3551 
3552  /* Pop the error context stack */
3553  error_context_stack = errcallback.previous;
3554  }
3555  }
3556 
3557  return;
3558  }
3559 
3560  /* Make sure we can handle the pin inside the loop */
3562 
3563  for (i = 0; i < NBuffers; i++)
3564  {
3565  uint32 buf_state;
3566 
3567  bufHdr = GetBufferDescriptor(i);
3568 
3569  /*
3570  * As in DropRelationBuffers, an unlocked precheck should be safe and
3571  * saves some cycles.
3572  */
3573  if (!BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator))
3574  continue;
3575 
3577 
3578  buf_state = LockBufHdr(bufHdr);
3579  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &rel->rd_locator) &&
3580  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3581  {
3582  PinBuffer_Locked(bufHdr);
3584  FlushBuffer(bufHdr, RelationGetSmgr(rel));
3586  UnpinBuffer(bufHdr);
3587  }
3588  else
3589  UnlockBufHdr(bufHdr, buf_state);
3590  }
3591 }
3592 
3593 /* ---------------------------------------------------------------------
3594  * FlushRelationsAllBuffers
3595  *
3596  * This function flushes out of the buffer pool all the pages of all
3597  * forks of the specified smgr relations. It's equivalent to calling
3598  * FlushRelationBuffers once per relation. The relations are assumed not
3599  * to use local buffers.
3600  * --------------------------------------------------------------------
3601  */
3602 void
3604 {
3605  int i;
3606  SMgrSortArray *srels;
3607  bool use_bsearch;
3608 
3609  if (nrels == 0)
3610  return;
3611 
3612  /* fill-in array for qsort */
3613  srels = palloc(sizeof(SMgrSortArray) * nrels);
3614 
3615  for (i = 0; i < nrels; i++)
3616  {
3617  Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
3618 
3619  srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
3620  srels[i].srel = smgrs[i];
3621  }
3622 
3623  /*
3624  * Save the bsearch overhead for low number of relations to sync. See
3625  * DropRelationsAllBuffers for details.
3626  */
3627  use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
3628 
3629  /* sort the list of SMgrRelations if necessary */
3630  if (use_bsearch)
3631  pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
3632 
3633  /* Make sure we can handle the pin inside the loop */
3635 
3636  for (i = 0; i < NBuffers; i++)
3637  {
3638  SMgrSortArray *srelent = NULL;
3639  BufferDesc *bufHdr = GetBufferDescriptor(i);
3640  uint32 buf_state;
3641 
3642  /*
3643  * As in DropRelationBuffers, an unlocked precheck should be safe and
3644  * saves some cycles.
3645  */
3646 
3647  if (!use_bsearch)
3648  {
3649  int j;
3650 
3651  for (j = 0; j < nrels; j++)
3652  {
3653  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srels[j].rlocator))
3654  {
3655  srelent = &srels[j];
3656  break;
3657  }
3658  }
3659  }
3660  else
3661  {
3662  RelFileLocator rlocator;
3663 
3664  rlocator = BufTagGetRelFileLocator(&bufHdr->tag);
3665  srelent = bsearch((const void *) &(rlocator),
3666  srels, nrels, sizeof(SMgrSortArray),
3668  }
3669 
3670  /* buffer doesn't belong to any of the given relfilelocators; skip it */
3671  if (srelent == NULL)
3672  continue;
3673 
3675 
3676  buf_state = LockBufHdr(bufHdr);
3677  if (BufTagMatchesRelFileLocator(&bufHdr->tag, &srelent->rlocator) &&
3678  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3679  {
3680  PinBuffer_Locked(bufHdr);
3682  FlushBuffer(bufHdr, srelent->srel);
3684  UnpinBuffer(bufHdr);
3685  }
3686  else
3687  UnlockBufHdr(bufHdr, buf_state);
3688  }
3689 
3690  pfree(srels);
3691 }
3692 
3693 /* ---------------------------------------------------------------------
3694  * RelationCopyStorageUsingBuffer
3695  *
3696  * Copy fork's data using bufmgr. Same as RelationCopyStorage but instead
3697  * of using smgrread and smgrextend this will copy using bufmgr APIs.
3698  *
3699  * Refer comments atop CreateAndCopyRelationData() for details about
3700  * 'permanent' parameter.
3701  * --------------------------------------------------------------------
3702  */
3703 static void
3705  RelFileLocator dstlocator,
3706  ForkNumber forkNum, bool permanent)
3707 {
3708  Buffer srcBuf;
3709  Buffer dstBuf;
3710  Page srcPage;
3711  Page dstPage;
3712  bool use_wal;
3713  BlockNumber nblocks;
3714  BlockNumber blkno;
3716  BufferAccessStrategy bstrategy_src;
3717  BufferAccessStrategy bstrategy_dst;
3718 
3719  /*
3720  * In general, we want to write WAL whenever wal_level > 'minimal', but we
3721  * can skip it when copying any fork of an unlogged relation other than
3722  * the init fork.
3723  */
3724  use_wal = XLogIsNeeded() && (permanent || forkNum == INIT_FORKNUM);
3725 
3726  /* Get number of blocks in the source relation. */
3727  nblocks = smgrnblocks(smgropen(srclocator, InvalidBackendId),
3728  forkNum);
3729 
3730  /* Nothing to copy; just return. */
3731  if (nblocks == 0)
3732  return;
3733 
3734  /*
3735  * Bulk extend the destination relation of the same size as the source
3736  * relation before starting to copy block by block.
3737  */
3738  memset(buf.data, 0, BLCKSZ);
3739  smgrextend(smgropen(dstlocator, InvalidBackendId), forkNum, nblocks - 1,
3740  buf.data, true);
3741 
3742  /* This is a bulk operation, so use buffer access strategies. */
3743  bstrategy_src = GetAccessStrategy(BAS_BULKREAD);
3744  bstrategy_dst = GetAccessStrategy(BAS_BULKWRITE);
3745 
3746  /* Iterate over each block of the source relation file. */
3747  for (blkno = 0; blkno < nblocks; blkno++)
3748  {
3750 
3751  /* Read block from source relation. */
3752  srcBuf = ReadBufferWithoutRelcache(srclocator, forkNum, blkno,
3753  RBM_NORMAL, bstrategy_src,
3754  permanent);
3755  LockBuffer(srcBuf, BUFFER_LOCK_SHARE);
3756  srcPage = BufferGetPage(srcBuf);
3757 
3758  /* Use P_NEW to extend the destination relation. */
3759  dstBuf = ReadBufferWithoutRelcache(dstlocator, forkNum, blkno,
3760  RBM_NORMAL, bstrategy_dst,
3761  permanent);
3763  dstPage = BufferGetPage(dstBuf);
3764 
3766 
3767  /* Copy page data from the source to the destination. */
3768  memcpy(dstPage, srcPage, BLCKSZ);
3769  MarkBufferDirty(dstBuf);
3770 
3771  /* WAL-log the copied page. */
3772  if (use_wal)
3773  log_newpage_buffer(dstBuf, true);
3774 
3775  END_CRIT_SECTION();
3776 
3777  UnlockReleaseBuffer(dstBuf);
3778  UnlockReleaseBuffer(srcBuf);
3779  }
3780 }
3781 
3782 /* ---------------------------------------------------------------------
3783  * CreateAndCopyRelationData
3784  *
3785  * Create destination relation storage and copy all forks from the
3786  * source relation to the destination.
3787  *
3788  * Pass permanent as true for permanent relations and false for
3789  * unlogged relations. Currently this API is not supported for
3790  * temporary relations.
3791  * --------------------------------------------------------------------
3792  */
3793 void
3795  RelFileLocator dst_rlocator, bool permanent)
3796 {
3797  RelFileLocatorBackend rlocator;
3798  char relpersistence;
3799 
3800  /* Set the relpersistence. */
3801  relpersistence = permanent ?
3802  RELPERSISTENCE_PERMANENT : RELPERSISTENCE_UNLOGGED;
3803 
3804  /*
3805  * Create and copy all forks of the relation. During create database we
3806  * have a separate cleanup mechanism which deletes complete database
3807  * directory. Therefore, each individual relation doesn't need to be
3808  * registered for cleanup.
3809  */
3810  RelationCreateStorage(dst_rlocator, relpersistence, false);
3811 
3812  /* copy main fork. */
3813  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, MAIN_FORKNUM,
3814  permanent);
3815 
3816  /* copy those extra forks that exist */
3817  for (ForkNumber forkNum = MAIN_FORKNUM + 1;
3818  forkNum <= MAX_FORKNUM; forkNum++)
3819  {
3820  if (smgrexists(smgropen(src_rlocator, InvalidBackendId), forkNum))
3821  {
3822  smgrcreate(smgropen(dst_rlocator, InvalidBackendId), forkNum, false);
3823 
3824  /*
3825  * WAL log creation if the relation is persistent, or this is the
3826  * init fork of an unlogged relation.
3827  */
3828  if (permanent || forkNum == INIT_FORKNUM)
3829  log_smgrcreate(&dst_rlocator, forkNum);
3830 
3831  /* Copy a fork's data, block by block. */
3832  RelationCopyStorageUsingBuffer(src_rlocator, dst_rlocator, forkNum,
3833  permanent);
3834  }
3835  }
3836 
3837  /* close source and destination smgr if exists. */
3838  rlocator.backend = InvalidBackendId;
3839 
3840  rlocator.locator = src_rlocator;
3841  smgrcloserellocator(rlocator);
3842 
3843  rlocator.locator = dst_rlocator;
3844  smgrcloserellocator(rlocator);
3845 }
3846 
3847 /* ---------------------------------------------------------------------
3848  * FlushDatabaseBuffers
3849  *
3850  * This function writes all dirty pages of a database out to disk
3851  * (or more accurately, out to kernel disk buffers), ensuring that the
3852  * kernel has an up-to-date view of the database.
3853  *
3854  * Generally, the caller should be holding an appropriate lock to ensure
3855  * no other backend is active in the target database; otherwise more
3856  * pages could get dirtied.
3857  *
3858  * Note we don't worry about flushing any pages of temporary relations.
3859  * It's assumed these wouldn't be interesting.
3860  * --------------------------------------------------------------------
3861  */
3862 void
3864 {
3865  int i;
3866  BufferDesc *bufHdr;
3867 
3868  /* Make sure we can handle the pin inside the loop */
3870 
3871  for (i = 0; i < NBuffers; i++)
3872  {
3873  uint32 buf_state;
3874 
3875  bufHdr = GetBufferDescriptor(i);
3876 
3877  /*
3878  * As in DropRelationBuffers, an unlocked precheck should be safe and
3879  * saves some cycles.
3880  */
3881  if (bufHdr->tag.dbOid != dbid)
3882  continue;
3883 
3885 
3886  buf_state = LockBufHdr(bufHdr);
3887  if (bufHdr->tag.dbOid == dbid &&
3888  (buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
3889  {
3890  PinBuffer_Locked(bufHdr);
3892  FlushBuffer(bufHdr, NULL);
3894  UnpinBuffer(bufHdr);
3895  }
3896  else
3897  UnlockBufHdr(bufHdr, buf_state);
3898  }
3899 }
3900 
3901 /*
3902  * Flush a previously, shared or exclusively, locked and pinned buffer to the
3903  * OS.
3904  */
3905 void
3907 {
3908  BufferDesc *bufHdr;
3909 
3910  /* currently not needed, but no fundamental reason not to support */
3912 
3914 
3915  bufHdr = GetBufferDescriptor(buffer - 1);
3916 
3918 
3919  FlushBuffer(bufHdr, NULL);
3920 }
3921 
3922 /*
3923  * ReleaseBuffer -- release the pin on a buffer
3924  */
3925 void
3927 {
3928  if (!BufferIsValid(buffer))
3929  elog(ERROR, "bad buffer ID: %d", buffer);
3930 
3931  if (BufferIsLocal(buffer))
3932  {
3934 
3935  Assert(LocalRefCount[-buffer - 1] > 0);
3936  LocalRefCount[-buffer - 1]--;
3937  return;
3938  }
3939 
3941 }
3942 
3943 /*
3944  * UnlockReleaseBuffer -- release the content lock and pin on a buffer
3945  *
3946  * This is just a shorthand for a common combination.
3947  */
3948 void
3950 {
3953 }
3954 
3955 /*
3956  * IncrBufferRefCount
3957  * Increment the pin count on a buffer that we have *already* pinned
3958  * at least once.
3959  *
3960  * This function cannot be used on a buffer we do not have pinned,
3961  * because it doesn't change the shared buffer state.
3962  */
3963 void
3965 {
3968  if (BufferIsLocal(buffer))
3969  LocalRefCount[-buffer - 1]++;
3970  else
3971  {
3972  PrivateRefCountEntry *ref;
3973 
3974  ref = GetPrivateRefCountEntry(buffer, true);
3975  Assert(ref != NULL);
3976  ref->refcount++;
3977  }
3979 }
3980 
3981 /*
3982  * MarkBufferDirtyHint
3983  *
3984  * Mark a buffer dirty for non-critical changes.
3985  *
3986  * This is essentially the same as MarkBufferDirty, except:
3987  *
3988  * 1. The caller does not write WAL; so if checksums are enabled, we may need
3989  * to write an XLOG_FPI_FOR_HINT WAL record to protect against torn pages.
3990  * 2. The caller might have only share-lock instead of exclusive-lock on the
3991  * buffer's content lock.
3992  * 3. This function does not guarantee that the buffer is always marked dirty
3993  * (due to a race condition), so it cannot be used for important changes.
3994  */
3995 void
3997 {
3998  BufferDesc *bufHdr;
3999  Page page = BufferGetPage(buffer);
4000 
4001  if (!BufferIsValid(buffer))
4002  elog(ERROR, "bad buffer ID: %d", buffer);
4003 
4004  if (BufferIsLocal(buffer))
4005  {
4007  return;
4008  }
4009 
4010  bufHdr = GetBufferDescriptor(buffer - 1);
4011 
4013  /* here, either share or exclusive lock is OK */
4015 
4016  /*
4017  * This routine might get called many times on the same page, if we are
4018  * making the first scan after commit of an xact that added/deleted many
4019  * tuples. So, be as quick as we can if the buffer is already dirty. We
4020  * do this by not acquiring spinlock if it looks like the status bits are
4021  * already set. Since we make this test unlocked, there's a chance we
4022  * might fail to notice that the flags have just been cleared, and failed
4023  * to reset them, due to memory-ordering issues. But since this function
4024  * is only intended to be used in cases where failing to write out the
4025  * data would be harmless anyway, it doesn't really matter.
4026  */
4027  if ((pg_atomic_read_u32(&bufHdr->state) & (BM_DIRTY | BM_JUST_DIRTIED)) !=
4029  {
4031  bool dirtied = false;
4032  bool delayChkptFlags = false;
4033  uint32 buf_state;
4034 
4035  /*
4036  * If we need to protect hint bit updates from torn writes, WAL-log a
4037  * full page image of the page. This full page image is only necessary
4038  * if the hint bit update is the first change to the page since the
4039  * last checkpoint.
4040  *
4041  * We don't check full_page_writes here because that logic is included
4042  * when we call XLogInsert() since the value changes dynamically.
4043  */
4044  if (XLogHintBitIsNeeded() &&
4045  (pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
4046  {
4047  /*
4048  * If we must not write WAL, due to a relfilelocator-specific
4049  * condition or being in recovery, don't dirty the page. We can
4050  * set the hint, just not dirty the page as a result so the hint
4051  * is lost when we evict the page or shutdown.
4052  *
4053  * See src/backend/storage/page/README for longer discussion.
4054  */
4055  if (RecoveryInProgress() ||
4057  return;
4058 
4059  /*
4060  * If the block is already dirty because we either made a change
4061  * or set a hint already, then we don't need to write a full page
4062  * image. Note that aggressive cleaning of blocks dirtied by hint
4063  * bit setting would increase the call rate. Bulk setting of hint
4064  * bits would reduce the call rate...
4065  *
4066  * We must issue the WAL record before we mark the buffer dirty.
4067  * Otherwise we might write the page before we write the WAL. That
4068  * causes a race condition, since a checkpoint might occur between
4069  * writing the WAL record and marking the buffer dirty. We solve
4070  * that with a kluge, but one that is already in use during
4071  * transaction commit to prevent race conditions. Basically, we
4072  * simply prevent the checkpoint WAL record from being written
4073  * until we have marked the buffer dirty. We don't start the
4074  * checkpoint flush until we have marked dirty, so our checkpoint
4075  * must flush the change to disk successfully or the checkpoint
4076  * never gets written, so crash recovery will fix.
4077  *
4078  * It's possible we may enter here without an xid, so it is
4079  * essential that CreateCheckPoint waits for virtual transactions
4080  * rather than full transactionids.
4081  */
4084  delayChkptFlags = true;
4085  lsn = XLogSaveBufferForHint(buffer, buffer_std);
4086  }
4087 
4088  buf_state = LockBufHdr(bufHdr);
4089 
4090  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4091 
4092  if (!(buf_state & BM_DIRTY))
4093  {
4094  dirtied = true; /* Means "will be dirtied by this action" */
4095 
4096  /*
4097  * Set the page LSN if we wrote a backup block. We aren't supposed
4098  * to set this when only holding a share lock but as long as we
4099  * serialise it somehow we're OK. We choose to set LSN while
4100  * holding the buffer header lock, which causes any reader of an
4101  * LSN who holds only a share lock to also obtain a buffer header
4102  * lock before using PageGetLSN(), which is enforced in
4103  * BufferGetLSNAtomic().
4104  *
4105  * If checksums are enabled, you might think we should reset the
4106  * checksum here. That will happen when the page is written
4107  * sometime later in this checkpoint cycle.
4108  */
4109  if (!XLogRecPtrIsInvalid(lsn))
4110  PageSetLSN(page, lsn);
4111  }
4112 
4113  buf_state |= BM_DIRTY | BM_JUST_DIRTIED;
4114  UnlockBufHdr(bufHdr, buf_state);
4115 
4116  if (delayChkptFlags)
4118 
4119  if (dirtied)
4120  {
4121  VacuumPageDirty++;
4123  if (VacuumCostActive)
4125  }
4126  }
4127 }
4128 
4129 /*
4130  * Release buffer content locks for shared buffers.
4131  *
4132  * Used to clean up after errors.
4133  *
4134  * Currently, we can expect that lwlock.c's LWLockReleaseAll() took care
4135  * of releasing buffer content locks per se; the only thing we need to deal
4136  * with here is clearing any PIN_COUNT request that was in progress.
4137  */
4138 void
4140 {
4142 
4143  if (buf)
4144  {
4145  uint32 buf_state;
4146 
4147  buf_state = LockBufHdr(buf);
4148 
4149  /*
4150  * Don't complain if flag bit not set; it could have been reset but we
4151  * got a cancel/die interrupt before getting the signal.
4152  */
4153  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4154  buf->wait_backend_pgprocno == MyProc->pgprocno)
4155  buf_state &= ~BM_PIN_COUNT_WAITER;
4156 
4157  UnlockBufHdr(buf, buf_state);
4158 
4159  PinCountWaitBuf = NULL;
4160  }
4161 }
4162 
4163 /*
4164  * Acquire or release the content_lock for the buffer.
4165  */
4166 void
4168 {
4169  BufferDesc *buf;
4170 
4172  if (BufferIsLocal(buffer))
4173  return; /* local buffers need no lock */
4174 
4176 
4177  if (mode == BUFFER_LOCK_UNLOCK)
4179  else if (mode == BUFFER_LOCK_SHARE)
4181  else if (mode == BUFFER_LOCK_EXCLUSIVE)
4183  else
4184  elog(ERROR, "unrecognized buffer lock mode: %d", mode);
4185 }
4186 
4187 /*
4188  * Acquire the content_lock for the buffer, but only if we don't have to wait.
4189  *
4190  * This assumes the caller wants BUFFER_LOCK_EXCLUSIVE mode.
4191  */
4192 bool
4194 {
4195  BufferDesc *buf;
4196 
4198  if (BufferIsLocal(buffer))
4199  return true; /* act as though we got it */
4200 
4202 
4204  LW_EXCLUSIVE);
4205 }
4206 
4207 /*
4208  * LockBufferForCleanup - lock a buffer in preparation for deleting items
4209  *
4210  * Items may be deleted from a disk page only when the caller (a) holds an
4211  * exclusive lock on the buffer and (b) has observed that no other backend
4212  * holds a pin on the buffer. If there is a pin, then the other backend
4213  * might have a pointer into the buffer (for example, a heapscan reference
4214  * to an item --- see README for more details). It's OK if a pin is added
4215  * after the cleanup starts, however; the newly-arrived backend will be
4216  * unable to look at the page until we release the exclusive lock.
4217  *
4218  * To implement this protocol, a would-be deleter must pin the buffer and
4219  * then call LockBufferForCleanup(). LockBufferForCleanup() is similar to
4220  * LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE), except that it loops until
4221  * it has successfully observed pin count = 1.
4222  */
4223 void
4225 {
4226  BufferDesc *bufHdr;
4227  char *new_status = NULL;
4228  TimestampTz waitStart = 0;
4229  bool logged_recovery_conflict = false;
4230 
4232  Assert(PinCountWaitBuf == NULL);
4233 
4234  if (BufferIsLocal(buffer))
4235  {
4236  /* There should be exactly one pin */
4237  if (LocalRefCount[-buffer - 1] != 1)
4238  elog(ERROR, "incorrect local pin count: %d",
4239  LocalRefCount[-buffer - 1]);
4240  /* Nobody else to wait for */
4241  return;
4242  }
4243 
4244  /* There should be exactly one local pin */
4245  if (GetPrivateRefCount(buffer) != 1)
4246  elog(ERROR, "incorrect local pin count: %d",
4248 
4249  bufHdr = GetBufferDescriptor(buffer - 1);
4250 
4251  for (;;)
4252  {
4253  uint32 buf_state;
4254 
4255  /* Try to acquire lock */
4257  buf_state = LockBufHdr(bufHdr);
4258 
4259  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4260  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4261  {
4262  /* Successfully acquired exclusive lock with pincount 1 */
4263  UnlockBufHdr(bufHdr, buf_state);
4264 
4265  /*
4266  * Emit the log message if recovery conflict on buffer pin was
4267  * resolved but the startup process waited longer than
4268  * deadlock_timeout for it.
4269  */
4270  if (logged_recovery_conflict)
4272  waitStart, GetCurrentTimestamp(),
4273  NULL, false);
4274 
4275  /* Report change to non-waiting status */
4276  if (new_status)
4277  {
4278  set_ps_display(new_status);
4279  pfree(new_status);
4280  }
4281  return;
4282  }
4283  /* Failed, so mark myself as waiting for pincount 1 */
4284  if (buf_state & BM_PIN_COUNT_WAITER)
4285  {
4286  UnlockBufHdr(bufHdr, buf_state);
4288  elog(ERROR, "multiple backends attempting to wait for pincount 1");
4289  }
4291  PinCountWaitBuf = bufHdr;
4292  buf_state |= BM_PIN_COUNT_WAITER;
4293  UnlockBufHdr(bufHdr, buf_state);
4295 
4296  /* Wait to be signaled by UnpinBuffer() */
4297  if (InHotStandby)
4298  {
4299  /* Report change to waiting status */
4300  if (update_process_title && new_status == NULL)
4301  {
4302  const char *old_status;
4303  int len;
4304 
4305  old_status = get_ps_display(&len);
4306  new_status = (char *) palloc(len + 8 + 1);
4307  memcpy(new_status, old_status, len);
4308  strcpy(new_status + len, " waiting");
4309  set_ps_display(new_status);
4310  new_status[len] = '\0'; /* truncate off " waiting" */
4311  }
4312 
4313  /*
4314  * Emit the log message if the startup process is waiting longer
4315  * than deadlock_timeout for recovery conflict on buffer pin.
4316  *
4317  * Skip this if first time through because the startup process has
4318  * not started waiting yet in this case. So, the wait start
4319  * timestamp is set after this logic.
4320  */
4321  if (waitStart != 0 && !logged_recovery_conflict)
4322  {
4324 
4325  if (TimestampDifferenceExceeds(waitStart, now,
4326  DeadlockTimeout))
4327  {
4329  waitStart, now, NULL, true);
4330  logged_recovery_conflict = true;
4331  }
4332  }
4333 
4334  /*
4335  * Set the wait start timestamp if logging is enabled and first
4336  * time through.
4337  */
4338  if (log_recovery_conflict_waits && waitStart == 0)
4339  waitStart = GetCurrentTimestamp();
4340 
4341  /* Publish the bufid that Startup process waits on */
4343  /* Set alarm and then wait to be signaled by UnpinBuffer() */
4345  /* Reset the published bufid */
4347  }
4348  else
4350 
4351  /*
4352  * Remove flag marking us as waiter. Normally this will not be set
4353  * anymore, but ProcWaitForSignal() can return for other signals as
4354  * well. We take care to only reset the flag if we're the waiter, as
4355  * theoretically another backend could have started waiting. That's
4356  * impossible with the current usages due to table level locking, but
4357  * better be safe.
4358  */
4359  buf_state = LockBufHdr(bufHdr);
4360  if ((buf_state & BM_PIN_COUNT_WAITER) != 0 &&
4362  buf_state &= ~BM_PIN_COUNT_WAITER;
4363  UnlockBufHdr(bufHdr, buf_state);
4364 
4365  PinCountWaitBuf = NULL;
4366  /* Loop back and try again */
4367  }
4368 }
4369 
4370 /*
4371  * Check called from RecoveryConflictInterrupt handler when Startup
4372  * process requests cancellation of all pin holders that are blocking it.
4373  */
4374 bool
4376 {
4377  int bufid = GetStartupBufferPinWaitBufId();
4378 
4379  /*
4380  * If we get woken slowly then it's possible that the Startup process was
4381  * already woken by other backends before we got here. Also possible that
4382  * we get here by multiple interrupts or interrupts at inappropriate
4383  * times, so make sure we do nothing if the bufid is not set.
4384  */
4385  if (bufid < 0)
4386  return false;
4387 
4388  if (GetPrivateRefCount(bufid + 1) > 0)
4389  return true;
4390 
4391  return false;
4392 }
4393 
4394 /*
4395  * ConditionalLockBufferForCleanup - as above, but don't wait to get the lock
4396  *
4397  * We won't loop, but just check once to see if the pin count is OK. If
4398  * not, return false with no lock held.
4399  */
4400 bool
4402 {
4403  BufferDesc *bufHdr;
4404  uint32 buf_state,
4405  refcount;
4406 
4408 
4409  if (BufferIsLocal(buffer))
4410  {
4411  refcount = LocalRefCount[-buffer - 1];
4412  /* There should be exactly one pin */
4413  Assert(refcount > 0);
4414  if (refcount != 1)
4415  return false;
4416  /* Nobody else to wait for */
4417  return true;
4418  }
4419 
4420  /* There should be exactly one local pin */
4422  Assert(refcount);
4423  if (refcount != 1)
4424  return false;
4425 
4426  /* Try to acquire lock */
4428  return false;
4429 
4430  bufHdr = GetBufferDescriptor(buffer - 1);
4431  buf_state = LockBufHdr(bufHdr);
4432  refcount = BUF_STATE_GET_REFCOUNT(buf_state);
4433 
4434  Assert(refcount > 0);
4435  if (refcount == 1)
4436  {
4437  /* Successfully acquired exclusive lock with pincount 1 */
4438  UnlockBufHdr(bufHdr, buf_state);
4439  return true;
4440  }
4441 
4442  /* Failed, so release the lock */
4443  UnlockBufHdr(bufHdr, buf_state);
4445  return false;
4446 }
4447 
4448 /*
4449  * IsBufferCleanupOK - as above, but we already have the lock
4450  *
4451  * Check whether it's OK to perform cleanup on a buffer we've already
4452  * locked. If we observe that the pin count is 1, our exclusive lock
4453  * happens to be a cleanup lock, and we can proceed with anything that
4454  * would have been allowable had we sought a cleanup lock originally.
4455  */
4456 bool
4458 {
4459  BufferDesc *bufHdr;
4460  uint32 buf_state;
4461 
4463 
4464  if (BufferIsLocal(buffer))
4465  {
4466  /* There should be exactly one pin */
4467  if (LocalRefCount[-buffer - 1] != 1)
4468  return false;
4469  /* Nobody else to wait for */
4470  return true;
4471  }
4472 
4473  /* There should be exactly one local pin */
4474  if (GetPrivateRefCount(buffer) != 1)
4475  return false;
4476 
4477  bufHdr = GetBufferDescriptor(buffer - 1);
4478 
4479  /* caller must hold exclusive lock on buffer */
4481  LW_EXCLUSIVE));
4482 
4483  buf_state = LockBufHdr(bufHdr);
4484 
4485  Assert(BUF_STATE_GET_REFCOUNT(buf_state) > 0);
4486  if (BUF_STATE_GET_REFCOUNT(buf_state) == 1)
4487  {
4488  /* pincount is OK. */
4489  UnlockBufHdr(bufHdr, buf_state);
4490  return true;
4491  }
4492 
4493  UnlockBufHdr(bufHdr, buf_state);
4494  return false;
4495 }
4496 
4497 
4498 /*
4499  * Functions for buffer I/O handling
4500  *
4501  * Note: We assume that nested buffer I/O never occurs.
4502  * i.e at most one BM_IO_IN_PROGRESS bit is set per proc.
4503  *
4504  * Also note that these are used only for shared buffers, not local ones.
4505  */
4506 
4507 /*
4508  * WaitIO -- Block until the IO_IN_PROGRESS flag on 'buf' is cleared.
4509  */
4510 static void
4512 {
4514 
4516  for (;;)
4517  {
4518  uint32 buf_state;
4519 
4520  /*
4521  * It may not be necessary to acquire the spinlock to check the flag
4522  * here, but since this test is essential for correctness, we'd better
4523  * play it safe.
4524  */
4525  buf_state = LockBufHdr(buf);
4526  UnlockBufHdr(buf, buf_state);
4527 
4528  if (!(buf_state & BM_IO_IN_PROGRESS))
4529  break;
4531  }
4533 }
4534 
4535 /*
4536  * StartBufferIO: begin I/O on this buffer
4537  * (Assumptions)
4538  * My process is executing no IO
4539  * The buffer is Pinned
4540  *
4541  * In some scenarios there are race conditions in which multiple backends
4542  * could attempt the same I/O operation concurrently. If someone else
4543  * has already started I/O on this buffer then we will block on the
4544  * I/O condition variable until he's done.
4545  *
4546  * Input operations are only attempted on buffers that are not BM_VALID,
4547  * and output operations only on buffers that are BM_VALID and BM_DIRTY,
4548  * so we can always tell if the work is already done.
4549  *
4550  * Returns true if we successfully marked the buffer as I/O busy,
4551  * false if someone else already did the work.
4552  */
4553 static bool
4554 StartBufferIO(BufferDesc *buf, bool forInput)
4555 {
4556  uint32 buf_state;
4557 
4559 
4560  for (;;)
4561  {
4562  buf_state = LockBufHdr(buf);
4563 
4564  if (!(buf_state & BM_IO_IN_PROGRESS))
4565  break;
4566  UnlockBufHdr(buf, buf_state);
4567  WaitIO(buf);
4568  }
4569 
4570  /* Once we get here, there is definitely no I/O active on this buffer */
4571 
4572  if (forInput ? (buf_state & BM_VALID) : !(buf_state & BM_DIRTY))
4573  {
4574  /* someone else already did the I/O */
4575  UnlockBufHdr(buf, buf_state);
4576  return false;
4577  }
4578 
4579  buf_state |= BM_IO_IN_PROGRESS;
4580  UnlockBufHdr(buf, buf_state);
4581 
4582  InProgressBuf = buf;
4583  IsForInput = forInput;
4584 
4585  return true;
4586 }
4587 
4588 /*
4589  * TerminateBufferIO: release a buffer we were doing I/O on
4590  * (Assumptions)
4591  * My process is executing IO for the buffer
4592  * BM_IO_IN_PROGRESS bit is set for the buffer
4593  * The buffer is Pinned
4594  *
4595  * If clear_dirty is true and BM_JUST_DIRTIED is not set, we clear the
4596  * buffer's BM_DIRTY flag. This is appropriate when terminating a
4597  * successful write. The check on BM_JUST_DIRTIED is necessary to avoid
4598  * marking the buffer clean if it was re-dirtied while we were writing.
4599  *
4600  * set_flag_bits gets ORed into the buffer's flags. It must include
4601  * BM_IO_ERROR in a failure case. For successful completion it could
4602  * be 0, or BM_VALID if we just finished reading in the page.
4603  */
4604 static void
4605 TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
4606 {
4607  uint32 buf_state;
4608 
4609  Assert(buf == InProgressBuf);
4610 
4611  buf_state = LockBufHdr(buf);
4612 
4613  Assert(buf_state & BM_IO_IN_PROGRESS);
4614 
4615  buf_state &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
4616  if (clear_dirty && !(buf_state & BM_JUST_DIRTIED))
4617  buf_state &= ~(BM_DIRTY | BM_CHECKPOINT_NEEDED);
4618 
4619  buf_state |= set_flag_bits;
4620  UnlockBufHdr(buf, buf_state);
4621 
4622  InProgressBuf = NULL;
4623 
4625 }
4626 
4627 /*
4628  * AbortBufferIO: Clean up any active buffer I/O after an error.
4629  *
4630  * All LWLocks we might have held have been released,
4631  * but we haven't yet released buffer pins, so the buffer is still pinned.
4632  *
4633  * If I/O was in progress, we always set BM_IO_ERROR, even though it's
4634  * possible the error condition wasn't related to the I/O.
4635  */
4636 void
4638 {
4640 
4641  if (buf)
4642  {
4643  uint32 buf_state;
4644 
4645  buf_state = LockBufHdr(buf);
4646  Assert(buf_state & BM_IO_IN_PROGRESS);
4647  if (IsForInput)
4648  {
4649  Assert(!(buf_state & BM_DIRTY));
4650 
4651  /* We'd better not think buffer is valid yet */
4652  Assert(!(buf_state & BM_VALID));
4653  UnlockBufHdr(buf, buf_state);
4654  }
4655  else
4656  {
4657  Assert(buf_state & BM_DIRTY);
4658  UnlockBufHdr(buf, buf_state);
4659  /* Issue notice if this is not the first failure... */
4660  if (buf_state & BM_IO_ERROR)
4661  {
4662  /* Buffer is pinned, so we can read tag without spinlock */
4663  char *path;
4664 
4665  path = relpathperm(BufTagGetRelFileLocator(&buf->tag),
4666  BufTagGetForkNum(&buf->tag));
4667  ereport(WARNING,
4668  (errcode(ERRCODE_IO_ERROR),
4669  errmsg("could not write block %u of %s",
4670  buf->tag.blockNum, path),
4671  errdetail("Multiple failures --- write error might be permanent.")));
4672  pfree(path);
4673  }
4674  }
4676  }
4677 }
4678 
4679 /*
4680  * Error context callback for errors occurring during shared buffer writes.
4681  */
4682 static void
4684 {
4685  BufferDesc *bufHdr = (BufferDesc *) arg;
4686 
4687  /* Buffer is pinned, so we can read the tag without locking the spinlock */
4688  if (bufHdr != NULL)
4689  {
4690  char *path = relpathperm(BufTagGetRelFileLocator(&bufHdr->tag),
4691  BufTagGetForkNum(&bufHdr->tag));
4692 
4693  errcontext("writing block %u of relation %s",
4694  bufHdr->tag.blockNum, path);
4695  pfree(path);
4696  }
4697 }
4698 
4699 /*
4700  * Error context callback for errors occurring during local buffer writes.
4701  */
4702 static void
4704 {
4705  BufferDesc *bufHdr = (BufferDesc *) arg;
4706 
4707  if (bufHdr != NULL)
4708  {
4709  char *path = relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
4710  MyBackendId,
4711  BufTagGetForkNum(&bufHdr->tag));
4712 
4713  errcontext("writing block %u of relation %s",
4714  bufHdr->tag.blockNum, path);
4715  pfree(path);
4716  }
4717 }
4718 
4719 /*
4720  * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
4721  */
4722 static int
4723 rlocator_comparator(const void *p1, const void *p2)
4724 {
4725  RelFileLocator n1 = *(const RelFileLocator *) p1;
4726  RelFileLocator n2 = *(const RelFileLocator *) p2;
4727 
4728  if (n1.relNumber < n2.relNumber)
4729  return -1;
4730  else if (n1.relNumber > n2.relNumber)
4731  return 1;
4732 
4733  if (n1.dbOid < n2.dbOid)
4734  return -1;
4735  else if (n1.dbOid > n2.dbOid)
4736  return 1;
4737 
4738  if (n1.spcOid < n2.spcOid)
4739  return -1;
4740  else if (n1.spcOid > n2.spcOid)
4741  return 1;
4742  else
4743  return 0;
4744 }
4745 
4746 /*
4747  * Lock buffer header - set BM_LOCKED in buffer state.
4748  */
4749 uint32
4751 {
4752  SpinDelayStatus delayStatus;
4753  uint32 old_buf_state;
4754 
4755  init_local_spin_delay(&delayStatus);
4756 
4757  while (true)
4758  {
4759  /* set BM_LOCKED flag */
4760  old_buf_state = pg_atomic_fetch_or_u32(&desc->state, BM_LOCKED);
4761  /* if it wasn't set before we're OK */
4762  if (!(old_buf_state & BM_LOCKED))
4763  break;
4764  perform_spin_delay(&delayStatus);
4765  }
4766  finish_spin_delay(&delayStatus);
4767  return old_buf_state | BM_LOCKED;
4768 }
4769 
4770 /*
4771  * Wait until the BM_LOCKED flag isn't set anymore and return the buffer's
4772  * state at that point.
4773  *
4774  * Obviously the buffer could be locked by the time the value is returned, so
4775  * this is primarily useful in CAS style loops.
4776  */
4777 static uint32
4779 {
4780  SpinDelayStatus delayStatus;
4781  uint32 buf_state;
4782 
4783  init_local_spin_delay(&delayStatus);
4784 
4785  buf_state = pg_atomic_read_u32(&buf->state);
4786 
4787  while (buf_state & BM_LOCKED)
4788  {
4789  perform_spin_delay(&delayStatus);
4790  buf_state = pg_atomic_read_u32(&buf->state);
4791  }
4792 
4793  finish_spin_delay(&delayStatus);
4794 
4795  return buf_state;
4796 }
4797 
4798 /*
4799  * BufferTag comparator.
4800  */
4801 static inline int
4803 {
4804  int ret;
4805  RelFileLocator rlocatora;
4806  RelFileLocator rlocatorb;
4807 
4808  rlocatora = BufTagGetRelFileLocator(ba);
4809  rlocatorb = BufTagGetRelFileLocator(bb);
4810 
4811  ret = rlocator_comparator(&rlocatora, &rlocatorb);
4812 
4813  if (ret != 0)
4814  return ret;
4815 
4816  if (BufTagGetForkNum(ba) < BufTagGetForkNum(bb))
4817  return -1;
4818  if (BufTagGetForkNum(ba) > BufTagGetForkNum(bb))
4819  return 1;
4820 
4821  if (ba->blockNum < bb->blockNum)
4822  return -1;
4823  if (ba->blockNum > bb->blockNum)
4824  return 1;
4825 
4826  return 0;
4827 }
4828 
4829 /*
4830  * Comparator determining the writeout order in a checkpoint.
4831  *
4832  * It is important that tablespaces are compared first, the logic balancing
4833  * writes between tablespaces relies on it.
4834  */
4835 static inline int
4837 {
4838  /* compare tablespace */
4839  if (a->tsId < b->tsId)
4840  return -1;
4841  else if (a->tsId > b->tsId)
4842  return 1;
4843  /* compare relation */
4844  if (a->relNumber < b->relNumber)
4845  return -1;
4846  else if (a->relNumber > b->relNumber)
4847  return 1;
4848  /* compare fork */
4849  else if (a->forkNum < b->forkNum)
4850  return -1;
4851  else if (a->forkNum > b->forkNum)
4852  return 1;
4853  /* compare block number */
4854  else if (a->blockNum < b->blockNum)
4855  return -1;
4856  else if (a->blockNum > b->blockNum)
4857  return 1;
4858  /* equal page IDs are unlikely, but not impossible */
4859  return 0;
4860 }
4861 
4862 /*
4863  * Comparator for a Min-Heap over the per-tablespace checkpoint completion
4864  * progress.
4865  */
4866 static int
4868 {
4869  CkptTsStatus *sa = (CkptTsStatus *) a;
4870  CkptTsStatus *sb = (CkptTsStatus *) b;
4871 
4872  /* we want a min-heap, so return 1 for the a < b */
4873  if (sa->progress < sb->progress)
4874  return 1;
4875  else if (sa->progress == sb->progress)
4876  return 0;
4877  else
4878  return -1;
4879 }
4880 
4881 /*
4882  * Initialize a writeback context, discarding potential previous state.
4883  *
4884  * *max_pending is a pointer instead of an immediate value, so the coalesce
4885  * limits can easily changed by the GUC mechanism, and so calling code does
4886  * not have to check the current configuration. A value of 0 means that no
4887  * writeback control will be performed.
4888  */
4889 void
4890 WritebackContextInit(WritebackContext *context, int *max_pending)
4891 {
4892  Assert(*max_pending <= WRITEBACK_MAX_PENDING_FLUSHES);
4893 
4894  context->max_pending = max_pending;
4895  context->nr_pending = 0;
4896 }
4897 
4898 /*
4899  * Add buffer to list of pending writeback requests.
4900  */
4901 void
4903 {
4904  PendingWriteback *pending;
4905 
4906  /*
4907  * Add buffer to the pending writeback array, unless writeback control is
4908  * disabled.
4909  */
4910  if (*context->max_pending > 0)
4911  {
4913 
4914  pending = &context->pending_writebacks[context->nr_pending++];
4915 
4916  pending->tag = *tag;
4917  }
4918 
4919  /*
4920  * Perform pending flushes if the writeback limit is exceeded. This
4921  * includes the case where previously an item has been added, but control
4922  * is now disabled.
4923  */
4924  if (context->nr_pending >= *context->max_pending)
4925  IssuePendingWritebacks(context);
4926 }
4927 
4928 #define ST_SORT sort_pending_writebacks
4929 #define ST_ELEMENT_TYPE PendingWriteback
4930 #define ST_COMPARE(a, b) buffertag_comparator(&a->tag, &b->tag)
4931 #define ST_SCOPE static
4932 #define ST_DEFINE
4933 #include <lib/sort_template.h>
4934 
4935 /*
4936  * Issue all pending writeback requests, previously scheduled with
4937  * ScheduleBufferTagForWriteback, to the OS.
4938  *
4939  * Because this is only used to improve the OSs IO scheduling we try to never
4940  * error out - it's just a hint.
4941  */
4942 void
4944 {
4945  int i;
4946 
4947  if (context->nr_pending == 0)
4948  return;
4949 
4950  /*
4951  * Executing the writes in-order can make them a lot faster, and allows to
4952  * merge writeback requests to consecutive blocks into larger writebacks.
4953  */
4954  sort_pending_writebacks(context->pending_writebacks, context->nr_pending);
4955 
4956  /*
4957  * Coalesce neighbouring writes, but nothing else. For that we iterate
4958  * through the, now sorted, array of pending flushes, and look forward to
4959  * find all neighbouring (or identical) writes.
4960  */
4961  for (i = 0; i < context->nr_pending; i++)
4962  {
4965  SMgrRelation reln;
4966  int ahead;
4967  BufferTag tag;
4968  RelFileLocator currlocator;
4969  Size nblocks = 1;
4970 
4971  cur = &context->pending_writebacks[i];
4972  tag = cur->tag;
4973  currlocator = BufTagGetRelFileLocator(&tag);
4974 
4975  /*
4976  * Peek ahead, into following writeback requests, to see if they can
4977  * be combined with the current one.
4978  */
4979  for (ahead = 0; i + ahead + 1 < context->nr_pending; ahead++)
4980  {
4981 
4982  next = &context->pending_writebacks[i + ahead + 1];
4983 
4984  /* different file, stop */
4985  if (!RelFileLocatorEquals(currlocator,
4986  BufTagGetRelFileLocator(&next->tag)) ||
4987  BufTagGetForkNum(&cur->tag) != BufTagGetForkNum(&next->tag))
4988  break;
4989 
4990  /* ok, block queued twice, skip */
4991  if (cur->tag.blockNum == next->tag.blockNum)
4992  continue;
4993 
4994  /* only merge consecutive writes */
4995  if (cur->tag.blockNum + 1 != next->tag.blockNum)
4996  break;
4997 
4998  nblocks++;
4999  cur = next;
5000  }
5001 
5002  i += ahead;
5003 
5004  /* and finally tell the kernel to write the data to storage */
5005  reln = smgropen(currlocator, InvalidBackendId);
5006  smgrwriteback(reln, BufTagGetForkNum(&tag), tag.blockNum, nblocks);
5007  }
5008 
5009  context->nr_pending = 0;
5010 }
5011 
5012 
5013 /*
5014  * Implement slower/larger portions of TestForOldSnapshot
5015  *
5016  * Smaller/faster portions are put inline, but the entire set of logic is too
5017  * big for that.
5018  */
5019 void
5021 {
5022  if (RelationAllowsEarlyPruning(relation)
5023  && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
5024  ereport(ERROR,
5025  (errcode(ERRCODE_SNAPSHOT_TOO_OLD),
5026  errmsg("snapshot too old")));
5027 }
static bool pg_atomic_compare_exchange_u32(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.h:306
static uint32 pg_atomic_fetch_or_u32(volatile pg_atomic_uint32 *ptr, uint32 or_)
Definition: atomics.h:367
static void pg_atomic_unlocked_write_u32(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.h:272
static uint32 pg_atomic_read_u32(volatile pg_atomic_uint32 *ptr)
Definition: atomics.h:236
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition: timestamp.c:1730
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1585
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1549
int BackendId
Definition: backendid.h:21
#define InvalidBackendId
Definition: backendid.h:23
int BgWriterDelay
Definition: bgwriter.c:61
void binaryheap_build(binaryheap *heap)
Definition: binaryheap.c:125
void binaryheap_add_unordered(binaryheap *heap, Datum d)
Definition: binaryheap.c:109
binaryheap * binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
Definition: binaryheap.c:32
Datum binaryheap_remove_first(binaryheap *heap)
Definition: binaryheap.c:173
void binaryheap_free(binaryheap *heap)
Definition: binaryheap.c:68
void binaryheap_replace_first(binaryheap *heap, Datum d)
Definition: binaryheap.c:207
Datum binaryheap_first(binaryheap *heap)
Definition: binaryheap.c:158
#define binaryheap_empty(h)
Definition: binaryheap.h:52
uint32 BlockNumber
Definition: block.h:31
#define InvalidBlockNumber
Definition: block.h:33
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
static int32 next
Definition: blutils.c:219
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
#define BufferIsLocal(buffer)
Definition: buf.h:37
CkptSortItem * CkptBufferIds
Definition: buf_init.c:25
WritebackContext BackendWritebackContext
Definition: buf_init.c:24
#define BM_MAX_USAGE_COUNT
Definition: buf_internals.h:76
static void InitBufferTag(BufferTag *tag, const RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blockNum)
#define BM_TAG_VALID
Definition: buf_internals.h:61
#define BM_PERMANENT
Definition: buf_internals.h:67
#define BUF_USAGECOUNT_MASK
Definition: buf_internals.h:43
static ForkNumber BufTagGetForkNum(const BufferTag *tag)
#define BUF_REFCOUNT_ONE
Definition: buf_internals.h:41
static BufferDesc * GetLocalBufferDescriptor(uint32 id)
static BufferDesc * GetBufferDescriptor(uint32 id)
static void UnlockBufHdr(BufferDesc *desc, uint32 buf_state)
static bool BufferTagsEqual(const BufferTag *tag1, const BufferTag *tag2)
static RelFileNumber BufTagGetRelNumber(const BufferTag *tag)
static bool BufTagMatchesRelFileLocator(const BufferTag *tag, const RelFileLocator *rlocator)
#define BUF_FLAG_MASK
Definition: buf_internals.h:46
static LWLock * BufMappingPartitionLock(uint32 hashcode)
#define BM_PIN_COUNT_WAITER
Definition: buf_internals.h:65
#define BM_DIRTY
Definition: buf_internals.h:59
#define BM_LOCKED
Definition: buf_internals.h:58
#define BM_JUST_DIRTIED
Definition: buf_internals.h:64
#define BUF_STATE_GET_USAGECOUNT(state)
Definition: buf_internals.h:50
#define BM_IO_IN_PROGRESS
Definition: buf_internals.h:62
static void ClearBufferTag(BufferTag *tag)
static ConditionVariable * BufferDescriptorGetIOCV(const BufferDesc *bdesc)
#define BUF_USAGECOUNT_ONE
Definition: buf_internals.h:44
#define BUF_STATE_GET_REFCOUNT(state)
Definition: buf_internals.h:49
static RelFileLocator BufTagGetRelFileLocator(const BufferTag *tag)
#define BM_VALID
Definition: buf_internals.h:60
#define BM_IO_ERROR
Definition: buf_internals.h:63
static Buffer BufferDescriptorGetBuffer(const BufferDesc *bdesc)
#define BM_CHECKPOINT_NEEDED
Definition: buf_internals.h:66
static LWLock * BufferDescriptorGetContentLock(const BufferDesc *bdesc)
void BufTableDelete(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:149
int BufTableLookup(BufferTag *tagPtr, uint32 hashcode)
Definition: buf_table.c:91
uint32 BufTableHashCode(BufferTag *tagPtr)
Definition: buf_table.c:79
int BufTableInsert(BufferTag *tagPtr, uint32 hashcode, int buf_id)
Definition: buf_table.c:119
bool track_io_timing
Definition: bufmgr.c:137
void FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
Definition: bufmgr.c:3603
void IncrBufferRefCount(Buffer buffer)
Definition: bufmgr.c:3964
void DropDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3408
static int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
Definition: bufmgr.c:4836
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2756
static PrivateRefCountEntry * NewPrivateRefCountEntry(Buffer buffer)
Definition: bufmgr.c:283
void DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum, int nforks, BlockNumber *firstDelBlock)
Definition: bufmgr.c:3053
Buffer ReleaseAndReadBuffer(Buffer buffer, Relation relation, BlockNumber blockNum)
Definition: bufmgr.c:1639
PrefetchBufferResult PrefetchBuffer(Relation reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:583
static uint32 PrivateRefCountClock
Definition: bufmgr.c:202
static bool PinBuffer(BufferDesc *buf, BufferAccessStrategy strategy)
Definition: bufmgr.c:1697
bool zero_damaged_pages
Definition: bufmgr.c:134
#define BUF_DROP_FULL_SCAN_THRESHOLD
Definition: bufmgr.c:81
static void PinBuffer_Locked(BufferDesc *buf)
Definition: bufmgr.c:1800
void BufmgrCommit(void)
Definition: bufmgr.c:2742
static uint32 WaitBufHdrUnlocked(BufferDesc *buf)
Definition: bufmgr.c:4778
static int buffertag_comparator(const BufferTag *ba, const BufferTag *bb)
Definition: bufmgr.c:4802
#define LocalBufHdrGetBlock(bufHdr)
Definition: bufmgr.c:66
bool IsBufferCleanupOK(Buffer buffer)
Definition: bufmgr.c:4457
#define BufferGetLSN(bufHdr)
Definition: bufmgr.c:63
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:2587
static void CheckForBufferLeaks(void)
Definition: bufmgr.c:2648
void CreateAndCopyRelationData(RelFileLocator src_rlocator, RelFileLocator dst_rlocator, bool permanent)
Definition: bufmgr.c:3794
void DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
Definition: bufmgr.c:3176
static int rlocator_comparator(const void *p1, const void *p2)
Definition: bufmgr.c:4723
void TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
Definition: bufmgr.c:5020
struct SMgrSortArray SMgrSortArray
static void AtProcExit_Buffers(int code, Datum arg)
Definition: bufmgr.c:2629
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
Definition: bufmgr.c:4867
void BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: bufmgr.c:2777
#define BufHdrGetBlock(bufHdr)
Definition: bufmgr.c:62
#define BUF_REUSABLE
Definition: bufmgr.c:71
void ScheduleBufferTagForWriteback(WritebackContext *context, BufferTag *tag)
Definition: bufmgr.c:4902
static void local_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4703
static void BufferSync(int flags)
Definition: bufmgr.c:1941
static BufferDesc * BufferAlloc(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, BufferAccessStrategy strategy, bool *foundPtr)
Definition: bufmgr.c:1112
static BufferDesc * InProgressBuf
Definition: bufmgr.c:163
void CheckPointBuffers(int flags)
Definition: bufmgr.c:2732
bool BgBufferSync(WritebackContext *wb_context)
Definition: bufmgr.c:2217
bool BufferIsPermanent(Buffer buffer)
Definition: bufmgr.c:2977
#define REFCOUNT_ARRAY_ENTRIES
Definition: bufmgr.c:90
void UnlockBuffers(void)
Definition: bufmgr.c:4139
PrefetchBufferResult PrefetchSharedBuffer(SMgrRelation smgr_reln, ForkNumber forkNum, BlockNumber blockNum)
Definition: bufmgr.c:496
bool ConditionalLockBuffer(Buffer buffer)
Definition: bufmgr.c:4193
static bool StartBufferIO(BufferDesc *buf, bool forInput)
Definition: bufmgr.c:4554
BlockNumber RelationGetNumberOfBlocksInFork(Relation relation, ForkNumber forkNum)
Definition: bufmgr.c:2945
int bgwriter_flush_after
Definition: bufmgr.c:159
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3926
static void FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber nForkBlock, BlockNumber firstDelBlock)
Definition: bufmgr.c:3347
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:3007
bool HoldingBufferPinThatDelaysRecovery(void)
Definition: bufmgr.c:4375
int checkpoint_flush_after
Definition: bufmgr.c:158
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3949
static void shared_buffer_write_error_callback(void *arg)
Definition: bufmgr.c:4683
void WritebackContextInit(WritebackContext *context, int *max_pending)
Definition: bufmgr.c:4890
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1576
void InitBufferPoolAccess(void)
Definition: bufmgr.c:2604
#define BufferIsPinned(bufnum)
Definition: bufmgr.c:441
double bgwriter_lru_multiplier
Definition: bufmgr.c:136
void AbortBufferIO(void)
Definition: bufmgr.c:4637
int backend_flush_after
Definition: bufmgr.c:160
static void ReservePrivateRefCountEntry(void)
Definition: bufmgr.c:217
static bool IsForInput
Definition: bufmgr.c:164
static BufferDesc * PinCountWaitBuf
Definition: bufmgr.c:167
void PrintBufferLeakWarning(Buffer buffer)
Definition: bufmgr.c:2688
static int32 GetPrivateRefCount(Buffer buffer)
Definition: bufmgr.c:383
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4224
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4167
static PrivateRefCountEntry * ReservedRefCountEntry
Definition: bufmgr.c:203
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:3996
void FlushRelationBuffers(Relation rel)
Definition: bufmgr.c:3514
static void ForgetPrivateRefCountEntry(PrivateRefCountEntry *ref)
Definition: bufmgr.c:406
Buffer ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
Definition: bufmgr.c:790
bool ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:614
#define RELS_BSEARCH_THRESHOLD
Definition: bufmgr.c:73
int maintenance_io_concurrency
Definition: bufmgr.c:152
static Buffer ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool *hit)
Definition: bufmgr.c:810
static void UnpinBuffer(BufferDesc *buf)
Definition: bufmgr.c:1843
void FlushDatabaseBuffers(Oid dbid)
Definition: bufmgr.c:3863
static void InvalidateBuffer(BufferDesc *buf)
Definition: bufmgr.c:1478
static void RelationCopyStorageUsingBuffer(RelFileLocator srclocator, RelFileLocator dstlocator, ForkNumber forkNum, bool permanent)
Definition: bufmgr.c:3704
int effective_io_concurrency
Definition: bufmgr.c:145
static PrivateRefCountEntry * GetPrivateRefCountEntry(Buffer buffer, bool do_move)
Definition: bufmgr.c:309
struct PrivateRefCountEntry PrivateRefCountEntry
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln)
Definition: bufmgr.c:2816
struct CkptTsStatus CkptTsStatus
Buffer ReadBufferExtended(Relation reln, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:750
uint32 LockBufHdr(BufferDesc *desc)
Definition: bufmgr.c:4750
static struct PrivateRefCountEntry PrivateRefCountArray[REFCOUNT_ARRAY_ENTRIES]
Definition: bufmgr.c:199
static void TerminateBufferIO(BufferDesc *buf, bool clear_dirty, uint32 set_flag_bits)
Definition: bufmgr.c:4605
void IssuePendingWritebacks(WritebackContext *context)
Definition: bufmgr.c:4943
static int SyncOneBuffer(int buf_id, bool skip_recently_used, WritebackContext *wb_context)
Definition: bufmgr.c:2520
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:703
static HTAB * PrivateRefCountHash
Definition: bufmgr.c:200
static int32 PrivateRefCountOverflowed
Definition: bufmgr.c:201
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4401
int bgwriter_lru_maxpages
Definition: bufmgr.c:135
static void WaitIO(BufferDesc *buf)
Definition: bufmgr.c:4511
#define BUF_WRITTEN
Definition: bufmgr.c:70
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3906
@ BAS_BULKREAD
Definition: bufmgr.h:30
@ BAS_BULKWRITE
Definition: bufmgr.h:32
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:105
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:106
#define P_NEW
Definition: bufmgr.h:100
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:280
#define DEFAULT_EFFECTIVE_IO_CONCURRENCY
Definition: bufmgr.h:78
#define DEFAULT_MAINTENANCE_IO_CONCURRENCY
Definition: bufmgr.h:79
void * Block
Definition: bufmgr.h:24
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:107
ReadBufferMode
Definition: bufmgr.h:38
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:44
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:42
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:40
@ RBM_NORMAL
Definition: bufmgr.h:39
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:45
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:228
bool PageIsVerifiedExtended(Page page, BlockNumber blkno, int flags)
Definition: bufpage.c:88
void PageSetChecksumInplace(Page page, BlockNumber blkno)
Definition: bufpage.c:1539
char * PageSetChecksumCopy(Page page, BlockNumber blkno)
Definition: bufpage.c:1510
Pointer Page
Definition: bufpage.h:78
#define PIV_LOG_WARNING
Definition: bufpage.h:465
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
#define PIV_REPORT_STAT
Definition: bufpage.h:466
unsigned int uint32
Definition: c.h:490
signed int int32
Definition: c.h:478
double float8
Definition: c.h:614
#define MemSet(start, val, len)
Definition: c.h:1004
size_t Size
Definition: c.h:589
void CheckpointWriteDelay(int flags, double progress)
Definition: checkpointer.c:697
void ConditionVariableBroadcast(ConditionVariable *cv)
void ConditionVariablePrepareToSleep(ConditionVariable *cv)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
void ConditionVariableCancelSleep(void)
int64 TimestampTz
Definition: timestamp.h:39
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
struct cursor * cur
Definition: ecpg.c:28
int errdetail(const char *fmt,...)
Definition: elog.c:1202
ErrorContextCallback * error_context_stack
Definition: elog.c:95
int errhint(const char *fmt,...)
Definition: elog.c:1316
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define LOG
Definition: elog.h:31
#define errcontext
Definition: elog.h:196
#define WARNING
Definition: elog.h:36
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
Definition: freelist.c:394
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:541
BufferDesc * StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
Definition: freelist.c:201
bool StrategyRejectBuffer(BufferAccessStrategy strategy, BufferDesc *buf)
Definition: freelist.c:685
void StrategyFreeBuffer(BufferDesc *buf)
Definition: freelist.c:363
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:37
int64 VacuumPageHit
Definition: globals.c:148
int NBuffers
Definition: globals.c:136
int VacuumCostPageMiss
Definition: globals.c:143
int64 VacuumPageMiss
Definition: globals.c:149
bool VacuumCostActive
Definition: globals.c:153
int64 VacuumPageDirty
Definition: globals.c:150
int VacuumCostBalance
Definition: globals.c:152
BackendId MyBackendId
Definition: globals.c:85
int VacuumCostPageDirty
Definition: globals.c:144
int VacuumCostPageHit
Definition: globals.c:142
#define free(a)
Definition: header.h:65
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_ADD(x, y)
Definition: instr_time.h:178
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:194
#define INSTR_TIME_SET_ZERO(t)
Definition: instr_time.h:172
BufferUsage pgBufferUsage
Definition: instrument.c:20
void on_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:361
int b
Definition: isn.c:70
int a
Definition: isn.c:69
int j
Definition: isn.c:74
int i
Definition: isn.c:73
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:77
Assert(fmt[strlen(fmt) - 1] !='\n')
int32 * LocalRefCount
Definition: localbuf.c:45
BufferDesc * LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum, bool *foundPtr)
Definition: localbuf.c:109
void AtEOXact_LocalBuffers(bool isCommit)
Definition: localbuf.c:595
void DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber firstDelBlock)
Definition: localbuf.c:325
void AtProcExit_LocalBuffers(void)
Definition: localbuf.c:606
void MarkLocalBufferDirty(Buffer buffer)
Definition: localbuf.c:285
void DropRelationAllLocalBuffers(RelFileLocator rlocator)
Definition: localbuf.c:373
int NLocBuffer
Definition: localbuf.c:41
PrefetchBufferResult PrefetchLocalBuffer(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum)
Definition: localbuf.c:64
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1919
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1195
bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1963
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1366
@ LW_SHARED
Definition: lwlock.h:116
@ LW_EXCLUSIVE
Definition: lwlock.h:115
void pfree(void *pointer)
Definition: mcxt.c:1436
void * repalloc(void *pointer, Size size)
Definition: mcxt.c:1456
void * palloc(Size size)
Definition: mcxt.c:1210
#define VALGRIND_MAKE_MEM_DEFINED(addr, size)
Definition: memdebug.h:26
#define VALGRIND_MAKE_MEM_NOACCESS(addr, size)
Definition: memdebug.h:27
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
void * arg
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define WRITEBACK_MAX_PENDING_FLUSHES
#define DEFAULT_BACKEND_FLUSH_AFTER
#define DEFAULT_CHECKPOINT_FLUSH_AFTER
#define DEFAULT_BGWRITER_FLUSH_AFTER
const void size_t len
static char * buf
Definition: pg_test_fsync.c:67
#define pgstat_count_buffer_read_time(n)
Definition: pgstat.h:468
#define pgstat_count_buffer_read(rel)
Definition: pgstat.h:551
#define pgstat_count_buffer_write_time(n)
Definition: pgstat.h:470
#define pgstat_count_buffer_hit(rel)
Definition: pgstat.h:556
PgStat_BgWriterStats PendingBgWriterStats
PgStat_CheckpointerStats PendingCheckpointerStats
void pg_qsort(void *base, size_t nel, size_t elsize, int(*cmp)(const void *, const void *))
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:322
uintptr_t Datum
Definition: postgres.h:64
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:312
#define InvalidOid
Definition: postgres_ext.h:36
unsigned int Oid
Definition: postgres_ext.h:31
#define DELAY_CHKPT_START
Definition: proc.h:119
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:468
@ PROCSIG_RECOVERY_CONFLICT_BUFFERPIN
Definition: procsignal.h:45
const char * get_ps_display(int *displen)
Definition: ps_status.c:414
bool update_process_title
Definition: ps_status.c:35
void set_ps_display(const char *activity)
Definition: ps_status.c:342
static SMgrRelation RelationGetSmgr(Relation rel)
Definition: rel.h:569
#define RelationUsesLocalBuffers(relation)
Definition: rel.h:635
#define RELATION_IS_OTHER_TEMP(relation)
Definition: rel.h:656
#define RelationIsValid(relation)
Definition: rel.h:474
#define RelFileLocatorBackendIsTemp(rlocator)
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition: relpath.h:48
@ MAIN_FORKNUM
Definition: relpath.h:50
@ INIT_FORKNUM
Definition: relpath.h:53
#define MAX_FORKNUM
Definition: relpath.h:62
#define relpath(rlocator, forknum)
Definition: relpath.h:94
#define relpathbackend(rlocator, backend, forknum)
Definition: relpath.h:85
#define relpathperm(rlocator, forknum)
Definition: relpath.h:90
void ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:972
ResourceOwner CurrentResourceOwner
Definition: resowner.c:146
void ResourceOwnerEnlargeBuffers(ResourceOwner owner)
Definition: resowner.c:950
void ResourceOwnerRememberBuffer(ResourceOwner owner, Buffer buffer)
Definition: resowner.c:963
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:126
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:186
#define init_local_spin_delay(status)
Definition: s_lock.h:863
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:579
void smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, BlockNumber nblocks)
Definition: smgr.c:567
void smgrwrite(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:554
void smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer, bool skipFsync)
Definition: smgr.c:493
bool smgrprefetch(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum)
Definition: smgr.c:518
void smgrcloserellocator(RelFileLocatorBackend rlocator)
Definition: smgr.c:346
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:369
BlockNumber smgrnblocks_cached(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:603
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:146
void smgrread(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum, char *buffer)
Definition: smgr.c:532
bool smgrexists(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:247
#define SmgrIsTemp(smgr)
Definition: smgr.h:77
TimestampTz GetOldSnapshotThresholdTimestamp(void)
Definition: snapmgr.c:1705
#define RelationAllowsEarlyPruning(rel)
Definition: snapmgr.h:38
PGPROC * MyProc
Definition: proc.c:66
void ProcSendSignal(int pgprocno)
Definition: proc.c:1809
int GetStartupBufferPinWaitBufId(void)
Definition: proc.c:639
int DeadlockTimeout
Definition: proc.c:58
void SetStartupBufferPinWaitBufId(int bufid)
Definition: proc.c:627
void ProcWaitForSignal(uint32 wait_event_info)
Definition: proc.c:1797
void ResolveRecoveryConflictWithBufferPin(void)
Definition: standby.c:788
bool log_recovery_conflict_waits
Definition: standby.c:43
void LogRecoveryConflict(ProcSignalReason reason, TimestampTz wait_start, TimestampTz now, VirtualTransactionId *wait_list, bool still_waiting)
Definition: standby.c:274
bool RelFileLocatorSkippingWAL(RelFileLocator rlocator)
Definition: storage.c:550
SMgrRelation RelationCreateStorage(RelFileLocator rlocator, char relpersistence, bool register_delete)
Definition: storage.c:120
void log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
Definition: storage.c:185
int wait_backend_pgprocno
BufferTag tag
pg_atomic_uint32 state
int64 shared_blks_dirtied
Definition: instrument.h:28
int64 local_blks_hit
Definition: instrument.h:30
int64 local_blks_written
Definition: instrument.h:33
instr_time blk_write_time
Definition: instrument.h:37
int64 shared_blks_read
Definition: instrument.h:27
int64 shared_blks_written
Definition: instrument.h:29
instr_time blk_read_time
Definition: instrument.h:36
int64 local_blks_read
Definition: instrument.h:31
int64 shared_blks_hit
Definition: instrument.h:26
int ckpt_bufs_written
Definition: xlog.h:162
ForkNumber forkNum
RelFileNumber relNumber
BlockNumber blockNum
float8 progress_slice
Definition: bufmgr.c:109
int index
Definition: bufmgr.c:117
int num_scanned
Definition: bufmgr.c:114
float8 progress
Definition: bufmgr.c:108
int num_to_scan
Definition: bufmgr.c:112
Oid tsId
Definition: bufmgr.c:99
struct ErrorContextCallback * previous
Definition: elog.h:295
void(* callback)(void *arg)
Definition: elog.h:296