PostgreSQL Source Code  git master
slru.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slru.c
4  * Simple LRU buffering for transaction status logfiles
5  *
6  * We use a simple least-recently-used scheme to manage a pool of page
7  * buffers. Under ordinary circumstances we expect that write
8  * traffic will occur mostly to the latest page (and to the just-prior
9  * page, soon after a page transition). Read traffic will probably touch
10  * a larger span of pages, but in any case a fairly small number of page
11  * buffers should be sufficient. So, we just search the buffers using plain
12  * linear search; there's no need for a hashtable or anything fancy.
13  * The management algorithm is straight LRU except that we will never swap
14  * out the latest page (since we know it's going to be hit again eventually).
15  *
16  * We use a control LWLock to protect the shared data structures, plus
17  * per-buffer LWLocks that synchronize I/O for each buffer. The control lock
18  * must be held to examine or modify any shared state. A process that is
19  * reading in or writing out a page buffer does not hold the control lock,
20  * only the per-buffer lock for the buffer it is working on.
21  *
22  * "Holding the control lock" means exclusive lock in all cases except for
23  * SimpleLruReadPage_ReadOnly(); see comments for SlruRecentlyUsed() for
24  * the implications of that.
25  *
26  * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
27  * before releasing the control lock. The per-buffer lock is released after
28  * completing the I/O, re-acquiring the control lock, and updating the shared
29  * state. (Deadlock is not possible here, because we never try to initiate
30  * I/O when someone else is already doing I/O on the same buffer.)
31  * To wait for I/O to complete, release the control lock, acquire the
32  * per-buffer lock in shared mode, immediately release the per-buffer lock,
33  * reacquire the control lock, and then recheck state (since arbitrary things
34  * could have happened while we didn't have the lock).
35  *
36  * As with the regular buffer manager, it is possible for another process
37  * to re-dirty a page that is currently being written out. This is handled
38  * by re-setting the page's page_dirty flag.
39  *
40  *
41  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
42  * Portions Copyright (c) 1994, Regents of the University of California
43  *
44  * src/backend/access/transam/slru.c
45  *
46  *-------------------------------------------------------------------------
47  */
48 #include "postgres.h"
49 
50 #include <fcntl.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 
54 #include "access/slru.h"
55 #include "access/transam.h"
56 #include "access/xlog.h"
57 #include "miscadmin.h"
58 #include "pgstat.h"
59 #include "storage/fd.h"
60 #include "storage/shmem.h"
61 
62 #define SlruFileName(ctl, path, seg) \
63  snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg)
64 
65 /*
66  * During SimpleLruWriteAll(), we will usually not need to write more than one
67  * or two physical files, but we may need to write several pages per file. We
68  * can consolidate the I/O requests by leaving files open until control returns
69  * to SimpleLruWriteAll(). This data structure remembers which files are open.
70  */
71 #define MAX_WRITEALL_BUFFERS 16
72 
73 typedef struct SlruWriteAllData
74 {
75  int num_files; /* # files actually open */
76  int fd[MAX_WRITEALL_BUFFERS]; /* their FD's */
77  int segno[MAX_WRITEALL_BUFFERS]; /* their log seg#s */
79 
81 
82 /*
83  * Populate a file tag describing a segment file. We only use the segment
84  * number, since we can derive everything else we need by having separate
85  * sync handler functions for clog, multixact etc.
86  */
87 #define INIT_SLRUFILETAG(a,xx_handler,xx_segno) \
88 ( \
89  memset(&(a), 0, sizeof(FileTag)), \
90  (a).handler = (xx_handler), \
91  (a).segno = (xx_segno) \
92 )
93 
94 /*
95  * Macro to mark a buffer slot "most recently used". Note multiple evaluation
96  * of arguments!
97  *
98  * The reason for the if-test is that there are often many consecutive
99  * accesses to the same page (particularly the latest page). By suppressing
100  * useless increments of cur_lru_count, we reduce the probability that old
101  * pages' counts will "wrap around" and make them appear recently used.
102  *
103  * We allow this code to be executed concurrently by multiple processes within
104  * SimpleLruReadPage_ReadOnly(). As long as int reads and writes are atomic,
105  * this should not cause any completely-bogus values to enter the computation.
106  * However, it is possible for either cur_lru_count or individual
107  * page_lru_count entries to be "reset" to lower values than they should have,
108  * in case a process is delayed while it executes this macro. With care in
109  * SlruSelectLRUPage(), this does little harm, and in any case the absolute
110  * worst possible consequence is a nonoptimal choice of page to evict. The
111  * gain from allowing concurrent reads of SLRU pages seems worth it.
112  */
113 #define SlruRecentlyUsed(shared, slotno) \
114  do { \
115  int new_lru_count = (shared)->cur_lru_count; \
116  if (new_lru_count != (shared)->page_lru_count[slotno]) { \
117  (shared)->cur_lru_count = ++new_lru_count; \
118  (shared)->page_lru_count[slotno] = new_lru_count; \
119  } \
120  } while (0)
121 
122 /* Saved info for SlruReportIOError */
123 typedef enum
124 {
132 
134 static int slru_errno;
135 
136 
137 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
138 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
139 static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata);
140 static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
141 static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
142  SlruWriteAll fdata);
143 static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
144 static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
145 
146 static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename,
147  int segpage, void *data);
148 static void SlruInternalDeleteSegment(SlruCtl ctl, int segno);
149 
150 /*
151  * Initialization of shared memory
152  */
153 
154 Size
155 SimpleLruShmemSize(int nslots, int nlsns)
156 {
157  Size sz;
158 
159  /* we assume nslots isn't so large as to risk overflow */
160  sz = MAXALIGN(sizeof(SlruSharedData));
161  sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
162  sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
163  sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
164  sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
165  sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
166  sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
167 
168  if (nlsns > 0)
169  sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
170 
171  return BUFFERALIGN(sz) + BLCKSZ * nslots;
172 }
173 
174 /*
175  * Initialize, or attach to, a simple LRU cache in shared memory.
176  *
177  * ctl: address of local (unshared) control structure.
178  * name: name of SLRU. (This is user-visible, pick with care!)
179  * nslots: number of page slots to use.
180  * nlsns: number of LSN groups per page (set to zero if not relevant).
181  * ctllock: LWLock to use to control access to the shared control structure.
182  * subdir: PGDATA-relative subdirectory that will contain the files.
183  * tranche_id: LWLock tranche ID to use for the SLRU's per-buffer LWLocks.
184  */
185 void
186 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
187  LWLock *ctllock, const char *subdir, int tranche_id,
188  SyncRequestHandler sync_handler)
189 {
190  SlruShared shared;
191  bool found;
192 
193  shared = (SlruShared) ShmemInitStruct(name,
194  SimpleLruShmemSize(nslots, nlsns),
195  &found);
196 
197  if (!IsUnderPostmaster)
198  {
199  /* Initialize locks and shared memory area */
200  char *ptr;
201  Size offset;
202  int slotno;
203 
204  Assert(!found);
205 
206  memset(shared, 0, sizeof(SlruSharedData));
207 
208  shared->ControlLock = ctllock;
209 
210  shared->num_slots = nslots;
211  shared->lsn_groups_per_page = nlsns;
212 
213  shared->cur_lru_count = 0;
214 
215  /* shared->latest_page_number will be set later */
216 
217  shared->slru_stats_idx = pgstat_slru_index(name);
218 
219  ptr = (char *) shared;
220  offset = MAXALIGN(sizeof(SlruSharedData));
221  shared->page_buffer = (char **) (ptr + offset);
222  offset += MAXALIGN(nslots * sizeof(char *));
223  shared->page_status = (SlruPageStatus *) (ptr + offset);
224  offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
225  shared->page_dirty = (bool *) (ptr + offset);
226  offset += MAXALIGN(nslots * sizeof(bool));
227  shared->page_number = (int *) (ptr + offset);
228  offset += MAXALIGN(nslots * sizeof(int));
229  shared->page_lru_count = (int *) (ptr + offset);
230  offset += MAXALIGN(nslots * sizeof(int));
231 
232  /* Initialize LWLocks */
233  shared->buffer_locks = (LWLockPadded *) (ptr + offset);
234  offset += MAXALIGN(nslots * sizeof(LWLockPadded));
235 
236  if (nlsns > 0)
237  {
238  shared->group_lsn = (XLogRecPtr *) (ptr + offset);
239  offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
240  }
241 
242  ptr += BUFFERALIGN(offset);
243  for (slotno = 0; slotno < nslots; slotno++)
244  {
245  LWLockInitialize(&shared->buffer_locks[slotno].lock,
246  tranche_id);
247 
248  shared->page_buffer[slotno] = ptr;
249  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
250  shared->page_dirty[slotno] = false;
251  shared->page_lru_count[slotno] = 0;
252  ptr += BLCKSZ;
253  }
254 
255  /* Should fit to estimated shmem size */
256  Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
257  }
258  else
259  Assert(found);
260 
261  /*
262  * Initialize the unshared control struct, including directory path. We
263  * assume caller set PagePrecedes.
264  */
265  ctl->shared = shared;
266  ctl->sync_handler = sync_handler;
267  strlcpy(ctl->Dir, subdir, sizeof(ctl->Dir));
268 }
269 
270 /*
271  * Initialize (or reinitialize) a page to zeroes.
272  *
273  * The page is not actually written, just set up in shared memory.
274  * The slot number of the new page is returned.
275  *
276  * Control lock must be held at entry, and will be held at exit.
277  */
278 int
279 SimpleLruZeroPage(SlruCtl ctl, int pageno)
280 {
281  SlruShared shared = ctl->shared;
282  int slotno;
283 
284  /* Find a suitable buffer slot for the page */
285  slotno = SlruSelectLRUPage(ctl, pageno);
286  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
287  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
288  !shared->page_dirty[slotno]) ||
289  shared->page_number[slotno] == pageno);
290 
291  /* Mark the slot as containing this page */
292  shared->page_number[slotno] = pageno;
293  shared->page_status[slotno] = SLRU_PAGE_VALID;
294  shared->page_dirty[slotno] = true;
295  SlruRecentlyUsed(shared, slotno);
296 
297  /* Set the buffer to zeroes */
298  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
299 
300  /* Set the LSNs for this new page to zero */
301  SimpleLruZeroLSNs(ctl, slotno);
302 
303  /* Assume this page is now the latest active page */
304  shared->latest_page_number = pageno;
305 
306  /* update the stats counter of zeroed pages */
308 
309  return slotno;
310 }
311 
312 /*
313  * Zero all the LSNs we store for this slru page.
314  *
315  * This should be called each time we create a new page, and each time we read
316  * in a page from disk into an existing buffer. (Such an old page cannot
317  * have any interesting LSNs, since we'd have flushed them before writing
318  * the page in the first place.)
319  *
320  * This assumes that InvalidXLogRecPtr is bitwise-all-0.
321  */
322 static void
323 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
324 {
325  SlruShared shared = ctl->shared;
326 
327  if (shared->lsn_groups_per_page > 0)
328  MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
329  shared->lsn_groups_per_page * sizeof(XLogRecPtr));
330 }
331 
332 /*
333  * Wait for any active I/O on a page slot to finish. (This does not
334  * guarantee that new I/O hasn't been started before we return, though.
335  * In fact the slot might not even contain the same page anymore.)
336  *
337  * Control lock must be held at entry, and will be held at exit.
338  */
339 static void
340 SimpleLruWaitIO(SlruCtl ctl, int slotno)
341 {
342  SlruShared shared = ctl->shared;
343 
344  /* See notes at top of file */
345  LWLockRelease(shared->ControlLock);
346  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED);
347  LWLockRelease(&shared->buffer_locks[slotno].lock);
349 
350  /*
351  * If the slot is still in an io-in-progress state, then either someone
352  * already started a new I/O on the slot, or a previous I/O failed and
353  * neglected to reset the page state. That shouldn't happen, really, but
354  * it seems worth a few extra cycles to check and recover from it. We can
355  * cheaply test for failure by seeing if the buffer lock is still held (we
356  * assume that transaction abort would release the lock).
357  */
358  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
359  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
360  {
361  if (LWLockConditionalAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED))
362  {
363  /* indeed, the I/O must have failed */
364  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
365  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
366  else /* write_in_progress */
367  {
368  shared->page_status[slotno] = SLRU_PAGE_VALID;
369  shared->page_dirty[slotno] = true;
370  }
371  LWLockRelease(&shared->buffer_locks[slotno].lock);
372  }
373  }
374 }
375 
376 /*
377  * Find a page in a shared buffer, reading it in if necessary.
378  * The page number must correspond to an already-initialized page.
379  *
380  * If write_ok is true then it is OK to return a page that is in
381  * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
382  * that modification of the page is safe. If write_ok is false then we
383  * will not return the page until it is not undergoing active I/O.
384  *
385  * The passed-in xid is used only for error reporting, and may be
386  * InvalidTransactionId if no specific xid is associated with the action.
387  *
388  * Return value is the shared-buffer slot number now holding the page.
389  * The buffer's LRU access info is updated.
390  *
391  * Control lock must be held at entry, and will be held at exit.
392  */
393 int
394 SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
395  TransactionId xid)
396 {
397  SlruShared shared = ctl->shared;
398 
399  /* Outer loop handles restart if we must wait for someone else's I/O */
400  for (;;)
401  {
402  int slotno;
403  bool ok;
404 
405  /* See if page already is in memory; if not, pick victim slot */
406  slotno = SlruSelectLRUPage(ctl, pageno);
407 
408  /* Did we find the page in memory? */
409  if (shared->page_number[slotno] == pageno &&
410  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
411  {
412  /*
413  * If page is still being read in, we must wait for I/O. Likewise
414  * if the page is being written and the caller said that's not OK.
415  */
416  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
417  (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
418  !write_ok))
419  {
420  SimpleLruWaitIO(ctl, slotno);
421  /* Now we must recheck state from the top */
422  continue;
423  }
424  /* Otherwise, it's ready to use */
425  SlruRecentlyUsed(shared, slotno);
426 
427  /* update the stats counter of pages found in the SLRU */
429 
430  return slotno;
431  }
432 
433  /* We found no match; assert we selected a freeable slot */
434  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
435  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
436  !shared->page_dirty[slotno]));
437 
438  /* Mark the slot read-busy */
439  shared->page_number[slotno] = pageno;
440  shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
441  shared->page_dirty[slotno] = false;
442 
443  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
444  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
445 
446  /* Release control lock while doing I/O */
447  LWLockRelease(shared->ControlLock);
448 
449  /* Do the read */
450  ok = SlruPhysicalReadPage(ctl, pageno, slotno);
451 
452  /* Set the LSNs for this newly read-in page to zero */
453  SimpleLruZeroLSNs(ctl, slotno);
454 
455  /* Re-acquire control lock and update page state */
457 
458  Assert(shared->page_number[slotno] == pageno &&
459  shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
460  !shared->page_dirty[slotno]);
461 
462  shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
463 
464  LWLockRelease(&shared->buffer_locks[slotno].lock);
465 
466  /* Now it's okay to ereport if we failed */
467  if (!ok)
468  SlruReportIOError(ctl, pageno, xid);
469 
470  SlruRecentlyUsed(shared, slotno);
471 
472  /* update the stats counter of pages not found in SLRU */
474 
475  return slotno;
476  }
477 }
478 
479 /*
480  * Find a page in a shared buffer, reading it in if necessary.
481  * The page number must correspond to an already-initialized page.
482  * The caller must intend only read-only access to the page.
483  *
484  * The passed-in xid is used only for error reporting, and may be
485  * InvalidTransactionId if no specific xid is associated with the action.
486  *
487  * Return value is the shared-buffer slot number now holding the page.
488  * The buffer's LRU access info is updated.
489  *
490  * Control lock must NOT be held at entry, but will be held at exit.
491  * It is unspecified whether the lock will be shared or exclusive.
492  */
493 int
495 {
496  SlruShared shared = ctl->shared;
497  int slotno;
498 
499  /* Try to find the page while holding only shared lock */
501 
502  /* See if page is already in a buffer */
503  for (slotno = 0; slotno < shared->num_slots; slotno++)
504  {
505  if (shared->page_number[slotno] == pageno &&
506  shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
507  shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
508  {
509  /* See comments for SlruRecentlyUsed macro */
510  SlruRecentlyUsed(shared, slotno);
511 
512  /* update the stats counter of pages found in the SLRU */
514 
515  return slotno;
516  }
517  }
518 
519  /* No luck, so switch to normal exclusive lock and do regular read */
520  LWLockRelease(shared->ControlLock);
522 
523  return SimpleLruReadPage(ctl, pageno, true, xid);
524 }
525 
526 /*
527  * Write a page from a shared buffer, if necessary.
528  * Does nothing if the specified slot is not dirty.
529  *
530  * NOTE: only one write attempt is made here. Hence, it is possible that
531  * the page is still dirty at exit (if someone else re-dirtied it during
532  * the write). However, we *do* attempt a fresh write even if the page
533  * is already being written; this is for checkpoints.
534  *
535  * Control lock must be held at entry, and will be held at exit.
536  */
537 static void
538 SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata)
539 {
540  SlruShared shared = ctl->shared;
541  int pageno = shared->page_number[slotno];
542  bool ok;
543 
544  /* If a write is in progress, wait for it to finish */
545  while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
546  shared->page_number[slotno] == pageno)
547  {
548  SimpleLruWaitIO(ctl, slotno);
549  }
550 
551  /*
552  * Do nothing if page is not dirty, or if buffer no longer contains the
553  * same page we were called for.
554  */
555  if (!shared->page_dirty[slotno] ||
556  shared->page_status[slotno] != SLRU_PAGE_VALID ||
557  shared->page_number[slotno] != pageno)
558  return;
559 
560  /*
561  * Mark the slot write-busy, and clear the dirtybit. After this point, a
562  * transaction status update on this page will mark it dirty again.
563  */
564  shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
565  shared->page_dirty[slotno] = false;
566 
567  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
568  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
569 
570  /* Release control lock while doing I/O */
571  LWLockRelease(shared->ControlLock);
572 
573  /* Do the write */
574  ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
575 
576  /* If we failed, and we're in a flush, better close the files */
577  if (!ok && fdata)
578  {
579  int i;
580 
581  for (i = 0; i < fdata->num_files; i++)
582  CloseTransientFile(fdata->fd[i]);
583  }
584 
585  /* Re-acquire control lock and update page state */
587 
588  Assert(shared->page_number[slotno] == pageno &&
589  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
590 
591  /* If we failed to write, mark the page dirty again */
592  if (!ok)
593  shared->page_dirty[slotno] = true;
594 
595  shared->page_status[slotno] = SLRU_PAGE_VALID;
596 
597  LWLockRelease(&shared->buffer_locks[slotno].lock);
598 
599  /* Now it's okay to ereport if we failed */
600  if (!ok)
602 
603  /* If part of a checkpoint, count this as a buffer written. */
604  if (fdata)
606 }
607 
608 /*
609  * Wrapper of SlruInternalWritePage, for external callers.
610  * fdata is always passed a NULL here.
611  */
612 void
613 SimpleLruWritePage(SlruCtl ctl, int slotno)
614 {
615  SlruInternalWritePage(ctl, slotno, NULL);
616 }
617 
618 /*
619  * Return whether the given page exists on disk.
620  *
621  * A false return means that either the file does not exist, or that it's not
622  * large enough to contain the given page.
623  */
624 bool
626 {
627  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
628  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
629  int offset = rpageno * BLCKSZ;
630  char path[MAXPGPATH];
631  int fd;
632  bool result;
633  off_t endpos;
634 
635  /* update the stats counter of checked pages */
637 
638  SlruFileName(ctl, path, segno);
639 
640  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
641  if (fd < 0)
642  {
643  /* expected: file doesn't exist */
644  if (errno == ENOENT)
645  return false;
646 
647  /* report error normally */
649  slru_errno = errno;
650  SlruReportIOError(ctl, pageno, 0);
651  }
652 
653  if ((endpos = lseek(fd, 0, SEEK_END)) < 0)
654  {
656  slru_errno = errno;
657  SlruReportIOError(ctl, pageno, 0);
658  }
659 
660  result = endpos >= (off_t) (offset + BLCKSZ);
661 
662  if (CloseTransientFile(fd) != 0)
663  {
665  slru_errno = errno;
666  return false;
667  }
668 
669  return result;
670 }
671 
672 /*
673  * Physical read of a (previously existing) page into a buffer slot
674  *
675  * On failure, we cannot just ereport(ERROR) since caller has put state in
676  * shared memory that must be undone. So, we return false and save enough
677  * info in static variables to let SlruReportIOError make the report.
678  *
679  * For now, assume it's not worth keeping a file pointer open across
680  * read/write operations. We could cache one virtual file pointer ...
681  */
682 static bool
683 SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
684 {
685  SlruShared shared = ctl->shared;
686  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
687  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
688  off_t offset = rpageno * BLCKSZ;
689  char path[MAXPGPATH];
690  int fd;
691 
692  SlruFileName(ctl, path, segno);
693 
694  /*
695  * In a crash-and-restart situation, it's possible for us to receive
696  * commands to set the commit status of transactions whose bits are in
697  * already-truncated segments of the commit log (see notes in
698  * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
699  * where the file doesn't exist, and return zeroes instead.
700  */
701  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
702  if (fd < 0)
703  {
704  if (errno != ENOENT || !InRecovery)
705  {
707  slru_errno = errno;
708  return false;
709  }
710 
711  ereport(LOG,
712  (errmsg("file \"%s\" doesn't exist, reading as zeroes",
713  path)));
714  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
715  return true;
716  }
717 
718  errno = 0;
720  if (pg_pread(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
721  {
724  slru_errno = errno;
725  CloseTransientFile(fd);
726  return false;
727  }
729 
730  if (CloseTransientFile(fd) != 0)
731  {
733  slru_errno = errno;
734  return false;
735  }
736 
737  return true;
738 }
739 
740 /*
741  * Physical write of a page from a buffer slot
742  *
743  * On failure, we cannot just ereport(ERROR) since caller has put state in
744  * shared memory that must be undone. So, we return false and save enough
745  * info in static variables to let SlruReportIOError make the report.
746  *
747  * For now, assume it's not worth keeping a file pointer open across
748  * independent read/write operations. We do batch operations during
749  * SimpleLruWriteAll, though.
750  *
751  * fdata is NULL for a standalone write, pointer to open-file info during
752  * SimpleLruWriteAll.
753  */
754 static bool
755 SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
756 {
757  SlruShared shared = ctl->shared;
758  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
759  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
760  off_t offset = rpageno * BLCKSZ;
761  char path[MAXPGPATH];
762  int fd = -1;
763 
764  /* update the stats counter of written pages */
766 
767  /*
768  * Honor the write-WAL-before-data rule, if appropriate, so that we do not
769  * write out data before associated WAL records. This is the same action
770  * performed during FlushBuffer() in the main buffer manager.
771  */
772  if (shared->group_lsn != NULL)
773  {
774  /*
775  * We must determine the largest async-commit LSN for the page. This
776  * is a bit tedious, but since this entire function is a slow path
777  * anyway, it seems better to do this here than to maintain a per-page
778  * LSN variable (which'd need an extra comparison in the
779  * transaction-commit path).
780  */
781  XLogRecPtr max_lsn;
782  int lsnindex,
783  lsnoff;
784 
785  lsnindex = slotno * shared->lsn_groups_per_page;
786  max_lsn = shared->group_lsn[lsnindex++];
787  for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
788  {
789  XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
790 
791  if (max_lsn < this_lsn)
792  max_lsn = this_lsn;
793  }
794 
795  if (!XLogRecPtrIsInvalid(max_lsn))
796  {
797  /*
798  * As noted above, elog(ERROR) is not acceptable here, so if
799  * XLogFlush were to fail, we must PANIC. This isn't much of a
800  * restriction because XLogFlush is just about all critical
801  * section anyway, but let's make sure.
802  */
804  XLogFlush(max_lsn);
806  }
807  }
808 
809  /*
810  * During a WriteAll, we may already have the desired file open.
811  */
812  if (fdata)
813  {
814  int i;
815 
816  for (i = 0; i < fdata->num_files; i++)
817  {
818  if (fdata->segno[i] == segno)
819  {
820  fd = fdata->fd[i];
821  break;
822  }
823  }
824  }
825 
826  if (fd < 0)
827  {
828  /*
829  * If the file doesn't already exist, we should create it. It is
830  * possible for this to need to happen when writing a page that's not
831  * first in its segment; we assume the OS can cope with that. (Note:
832  * it might seem that it'd be okay to create files only when
833  * SimpleLruZeroPage is called for the first page of a segment.
834  * However, if after a crash and restart the REDO logic elects to
835  * replay the log from a checkpoint before the latest one, then it's
836  * possible that we will get commands to set transaction status of
837  * transactions that have already been truncated from the commit log.
838  * Easiest way to deal with that is to accept references to
839  * nonexistent files here and in SlruPhysicalReadPage.)
840  *
841  * Note: it is possible for more than one backend to be executing this
842  * code simultaneously for different pages of the same file. Hence,
843  * don't use O_EXCL or O_TRUNC or anything like that.
844  */
845  SlruFileName(ctl, path, segno);
846  fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY);
847  if (fd < 0)
848  {
850  slru_errno = errno;
851  return false;
852  }
853 
854  if (fdata)
855  {
856  if (fdata->num_files < MAX_WRITEALL_BUFFERS)
857  {
858  fdata->fd[fdata->num_files] = fd;
859  fdata->segno[fdata->num_files] = segno;
860  fdata->num_files++;
861  }
862  else
863  {
864  /*
865  * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
866  * fall back to treating it as a standalone write.
867  */
868  fdata = NULL;
869  }
870  }
871  }
872 
873  errno = 0;
875  if (pg_pwrite(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
876  {
878  /* if write didn't set errno, assume problem is no disk space */
879  if (errno == 0)
880  errno = ENOSPC;
882  slru_errno = errno;
883  if (!fdata)
884  CloseTransientFile(fd);
885  return false;
886  }
888 
889  /* Queue up a sync request for the checkpointer. */
890  if (ctl->sync_handler != SYNC_HANDLER_NONE)
891  {
892  FileTag tag;
893 
894  INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
895  if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false))
896  {
897  /* No space to enqueue sync request. Do it synchronously. */
899  if (pg_fsync(fd) != 0)
900  {
903  slru_errno = errno;
904  CloseTransientFile(fd);
905  return false;
906  }
908  }
909  }
910 
911  /* Close file, unless part of flush request. */
912  if (!fdata)
913  {
914  if (CloseTransientFile(fd) != 0)
915  {
917  slru_errno = errno;
918  return false;
919  }
920  }
921 
922  return true;
923 }
924 
925 /*
926  * Issue the error message after failure of SlruPhysicalReadPage or
927  * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
928  */
929 static void
931 {
932  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
933  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
934  int offset = rpageno * BLCKSZ;
935  char path[MAXPGPATH];
936 
937  SlruFileName(ctl, path, segno);
938  errno = slru_errno;
939  switch (slru_errcause)
940  {
941  case SLRU_OPEN_FAILED:
942  ereport(ERROR,
944  errmsg("could not access status of transaction %u", xid),
945  errdetail("Could not open file \"%s\": %m.", path)));
946  break;
947  case SLRU_SEEK_FAILED:
948  ereport(ERROR,
950  errmsg("could not access status of transaction %u", xid),
951  errdetail("Could not seek in file \"%s\" to offset %u: %m.",
952  path, offset)));
953  break;
954  case SLRU_READ_FAILED:
955  if (errno)
956  ereport(ERROR,
958  errmsg("could not access status of transaction %u", xid),
959  errdetail("Could not read from file \"%s\" at offset %u: %m.",
960  path, offset)));
961  else
962  ereport(ERROR,
963  (errmsg("could not access status of transaction %u", xid),
964  errdetail("Could not read from file \"%s\" at offset %u: read too few bytes.", path, offset)));
965  break;
966  case SLRU_WRITE_FAILED:
967  if (errno)
968  ereport(ERROR,
970  errmsg("could not access status of transaction %u", xid),
971  errdetail("Could not write to file \"%s\" at offset %u: %m.",
972  path, offset)));
973  else
974  ereport(ERROR,
975  (errmsg("could not access status of transaction %u", xid),
976  errdetail("Could not write to file \"%s\" at offset %u: wrote too few bytes.",
977  path, offset)));
978  break;
979  case SLRU_FSYNC_FAILED:
982  errmsg("could not access status of transaction %u", xid),
983  errdetail("Could not fsync file \"%s\": %m.",
984  path)));
985  break;
986  case SLRU_CLOSE_FAILED:
987  ereport(ERROR,
989  errmsg("could not access status of transaction %u", xid),
990  errdetail("Could not close file \"%s\": %m.",
991  path)));
992  break;
993  default:
994  /* can't get here, we trust */
995  elog(ERROR, "unrecognized SimpleLru error cause: %d",
996  (int) slru_errcause);
997  break;
998  }
999 }
1000 
1001 /*
1002  * Select the slot to re-use when we need a free slot.
1003  *
1004  * The target page number is passed because we need to consider the
1005  * possibility that some other process reads in the target page while
1006  * we are doing I/O to free a slot. Hence, check or recheck to see if
1007  * any slot already holds the target page, and return that slot if so.
1008  * Thus, the returned slot is *either* a slot already holding the pageno
1009  * (could be any state except EMPTY), *or* a freeable slot (state EMPTY
1010  * or CLEAN).
1011  *
1012  * Control lock must be held at entry, and will be held at exit.
1013  */
1014 static int
1015 SlruSelectLRUPage(SlruCtl ctl, int pageno)
1016 {
1017  SlruShared shared = ctl->shared;
1018 
1019  /* Outer loop handles restart after I/O */
1020  for (;;)
1021  {
1022  int slotno;
1023  int cur_count;
1024  int bestvalidslot = 0; /* keep compiler quiet */
1025  int best_valid_delta = -1;
1026  int best_valid_page_number = 0; /* keep compiler quiet */
1027  int bestinvalidslot = 0; /* keep compiler quiet */
1028  int best_invalid_delta = -1;
1029  int best_invalid_page_number = 0; /* keep compiler quiet */
1030 
1031  /* See if page already has a buffer assigned */
1032  for (slotno = 0; slotno < shared->num_slots; slotno++)
1033  {
1034  if (shared->page_number[slotno] == pageno &&
1035  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
1036  return slotno;
1037  }
1038 
1039  /*
1040  * If we find any EMPTY slot, just select that one. Else choose a
1041  * victim page to replace. We normally take the least recently used
1042  * valid page, but we will never take the slot containing
1043  * latest_page_number, even if it appears least recently used. We
1044  * will select a slot that is already I/O busy only if there is no
1045  * other choice: a read-busy slot will not be least recently used once
1046  * the read finishes, and waiting for an I/O on a write-busy slot is
1047  * inferior to just picking some other slot. Testing shows the slot
1048  * we pick instead will often be clean, allowing us to begin a read at
1049  * once.
1050  *
1051  * Normally the page_lru_count values will all be different and so
1052  * there will be a well-defined LRU page. But since we allow
1053  * concurrent execution of SlruRecentlyUsed() within
1054  * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
1055  * acquire the same lru_count values. In that case we break ties by
1056  * choosing the furthest-back page.
1057  *
1058  * Notice that this next line forcibly advances cur_lru_count to a
1059  * value that is certainly beyond any value that will be in the
1060  * page_lru_count array after the loop finishes. This ensures that
1061  * the next execution of SlruRecentlyUsed will mark the page newly
1062  * used, even if it's for a page that has the current counter value.
1063  * That gets us back on the path to having good data when there are
1064  * multiple pages with the same lru_count.
1065  */
1066  cur_count = (shared->cur_lru_count)++;
1067  for (slotno = 0; slotno < shared->num_slots; slotno++)
1068  {
1069  int this_delta;
1070  int this_page_number;
1071 
1072  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1073  return slotno;
1074  this_delta = cur_count - shared->page_lru_count[slotno];
1075  if (this_delta < 0)
1076  {
1077  /*
1078  * Clean up in case shared updates have caused cur_count
1079  * increments to get "lost". We back off the page counts,
1080  * rather than trying to increase cur_count, to avoid any
1081  * question of infinite loops or failure in the presence of
1082  * wrapped-around counts.
1083  */
1084  shared->page_lru_count[slotno] = cur_count;
1085  this_delta = 0;
1086  }
1087  this_page_number = shared->page_number[slotno];
1088  if (this_page_number == shared->latest_page_number)
1089  continue;
1090  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1091  {
1092  if (this_delta > best_valid_delta ||
1093  (this_delta == best_valid_delta &&
1094  ctl->PagePrecedes(this_page_number,
1095  best_valid_page_number)))
1096  {
1097  bestvalidslot = slotno;
1098  best_valid_delta = this_delta;
1099  best_valid_page_number = this_page_number;
1100  }
1101  }
1102  else
1103  {
1104  if (this_delta > best_invalid_delta ||
1105  (this_delta == best_invalid_delta &&
1106  ctl->PagePrecedes(this_page_number,
1107  best_invalid_page_number)))
1108  {
1109  bestinvalidslot = slotno;
1110  best_invalid_delta = this_delta;
1111  best_invalid_page_number = this_page_number;
1112  }
1113  }
1114  }
1115 
1116  /*
1117  * If all pages (except possibly the latest one) are I/O busy, we'll
1118  * have to wait for an I/O to complete and then retry. In that
1119  * unhappy case, we choose to wait for the I/O on the least recently
1120  * used slot, on the assumption that it was likely initiated first of
1121  * all the I/Os in progress and may therefore finish first.
1122  */
1123  if (best_valid_delta < 0)
1124  {
1125  SimpleLruWaitIO(ctl, bestinvalidslot);
1126  continue;
1127  }
1128 
1129  /*
1130  * If the selected page is clean, we're set.
1131  */
1132  if (!shared->page_dirty[bestvalidslot])
1133  return bestvalidslot;
1134 
1135  /*
1136  * Write the page.
1137  */
1138  SlruInternalWritePage(ctl, bestvalidslot, NULL);
1139 
1140  /*
1141  * Now loop back and try again. This is the easiest way of dealing
1142  * with corner cases such as the victim page being re-dirtied while we
1143  * wrote it.
1144  */
1145  }
1146 }
1147 
1148 /*
1149  * Write dirty pages to disk during checkpoint or database shutdown. Flushing
1150  * is deferred until the next call to ProcessSyncRequests(), though we do fsync
1151  * the containing directory here to make sure that newly created directory
1152  * entries are on disk.
1153  */
1154 void
1155 SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied)
1156 {
1157  SlruShared shared = ctl->shared;
1158  SlruWriteAllData fdata;
1159  int slotno;
1160  int pageno = 0;
1161  int i;
1162  bool ok;
1163 
1164  /* update the stats counter of flushes */
1166 
1167  /*
1168  * Find and write dirty pages
1169  */
1170  fdata.num_files = 0;
1171 
1173 
1174  for (slotno = 0; slotno < shared->num_slots; slotno++)
1175  {
1176  SlruInternalWritePage(ctl, slotno, &fdata);
1177 
1178  /*
1179  * In some places (e.g. checkpoints), we cannot assert that the slot
1180  * is clean now, since another process might have re-dirtied it
1181  * already. That's okay.
1182  */
1183  Assert(allow_redirtied ||
1184  shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1185  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1186  !shared->page_dirty[slotno]));
1187  }
1188 
1189  LWLockRelease(shared->ControlLock);
1190 
1191  /*
1192  * Now close any files that were open
1193  */
1194  ok = true;
1195  for (i = 0; i < fdata.num_files; i++)
1196  {
1197  if (CloseTransientFile(fdata.fd[i]) != 0)
1198  {
1200  slru_errno = errno;
1201  pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1202  ok = false;
1203  }
1204  }
1205  if (!ok)
1207 
1208  /* Ensure that directory entries for new files are on disk. */
1209  if (ctl->sync_handler != SYNC_HANDLER_NONE)
1210  fsync_fname(ctl->Dir, true);
1211 }
1212 
1213 /*
1214  * Remove all segments before the one holding the passed page number
1215  *
1216  * All SLRUs prevent concurrent calls to this function, either with an LWLock
1217  * or by calling it only as part of a checkpoint. Mutual exclusion must begin
1218  * before computing cutoffPage. Mutual exclusion must end after any limit
1219  * update that would permit other backends to write fresh data into the
1220  * segment immediately preceding the one containing cutoffPage. Otherwise,
1221  * when the SLRU is quite full, SimpleLruTruncate() might delete that segment
1222  * after it has accrued freshly-written data.
1223  */
1224 void
1225 SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
1226 {
1227  SlruShared shared = ctl->shared;
1228  int slotno;
1229 
1230  /* update the stats counter of truncates */
1232 
1233  /*
1234  * Scan shared memory and remove any pages preceding the cutoff page, to
1235  * ensure we won't rewrite them later. (Since this is normally called in
1236  * or just after a checkpoint, any dirty pages should have been flushed
1237  * already ... we're just being extra careful here.)
1238  */
1240 
1241 restart:;
1242 
1243  /*
1244  * While we are holding the lock, make an important safety check: the
1245  * current endpoint page must not be eligible for removal.
1246  */
1247  if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
1248  {
1249  LWLockRelease(shared->ControlLock);
1250  ereport(LOG,
1251  (errmsg("could not truncate directory \"%s\": apparent wraparound",
1252  ctl->Dir)));
1253  return;
1254  }
1255 
1256  for (slotno = 0; slotno < shared->num_slots; slotno++)
1257  {
1258  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1259  continue;
1260  if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1261  continue;
1262 
1263  /*
1264  * If page is clean, just change state to EMPTY (expected case).
1265  */
1266  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1267  !shared->page_dirty[slotno])
1268  {
1269  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1270  continue;
1271  }
1272 
1273  /*
1274  * Hmm, we have (or may have) I/O operations acting on the page, so
1275  * we've got to wait for them to finish and then start again. This is
1276  * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1277  * wouldn't it be OK to just discard it without writing it?
1278  * SlruMayDeleteSegment() uses a stricter qualification, so we might
1279  * not delete this page in the end; even if we don't delete it, we
1280  * won't have cause to read its data again. For now, keep the logic
1281  * the same as it was.)
1282  */
1283  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1284  SlruInternalWritePage(ctl, slotno, NULL);
1285  else
1286  SimpleLruWaitIO(ctl, slotno);
1287  goto restart;
1288  }
1289 
1290  LWLockRelease(shared->ControlLock);
1291 
1292  /* Now we can remove the old segment(s) */
1293  (void) SlruScanDirectory(ctl, SlruScanDirCbDeleteCutoff, &cutoffPage);
1294 }
1295 
1296 /*
1297  * Delete an individual SLRU segment.
1298  *
1299  * NB: This does not touch the SLRU buffers themselves, callers have to ensure
1300  * they either can't yet contain anything, or have already been cleaned out.
1301  */
1302 static void
1304 {
1305  char path[MAXPGPATH];
1306 
1307  /* Forget any fsync requests queued for this segment. */
1308  if (ctl->sync_handler != SYNC_HANDLER_NONE)
1309  {
1310  FileTag tag;
1311 
1312  INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
1314  }
1315 
1316  /* Unlink the file. */
1317  SlruFileName(ctl, path, segno);
1318  ereport(DEBUG2, (errmsg("removing file \"%s\"", path)));
1319  unlink(path);
1320 }
1321 
1322 /*
1323  * Delete an individual SLRU segment, identified by the segment number.
1324  */
1325 void
1327 {
1328  SlruShared shared = ctl->shared;
1329  int slotno;
1330  bool did_write;
1331 
1332  /* Clean out any possibly existing references to the segment. */
1334 restart:
1335  did_write = false;
1336  for (slotno = 0; slotno < shared->num_slots; slotno++)
1337  {
1338  int pagesegno = shared->page_number[slotno] / SLRU_PAGES_PER_SEGMENT;
1339 
1340  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1341  continue;
1342 
1343  /* not the segment we're looking for */
1344  if (pagesegno != segno)
1345  continue;
1346 
1347  /* If page is clean, just change state to EMPTY (expected case). */
1348  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1349  !shared->page_dirty[slotno])
1350  {
1351  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1352  continue;
1353  }
1354 
1355  /* Same logic as SimpleLruTruncate() */
1356  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1357  SlruInternalWritePage(ctl, slotno, NULL);
1358  else
1359  SimpleLruWaitIO(ctl, slotno);
1360 
1361  did_write = true;
1362  }
1363 
1364  /*
1365  * Be extra careful and re-check. The IO functions release the control
1366  * lock, so new pages could have been read in.
1367  */
1368  if (did_write)
1369  goto restart;
1370 
1371  SlruInternalDeleteSegment(ctl, segno);
1372 
1373  LWLockRelease(shared->ControlLock);
1374 }
1375 
1376 /*
1377  * Determine whether a segment is okay to delete.
1378  *
1379  * segpage is the first page of the segment, and cutoffPage is the oldest (in
1380  * PagePrecedes order) page in the SLRU containing still-useful data. Since
1381  * every core PagePrecedes callback implements "wrap around", check the
1382  * segment's first and last pages:
1383  *
1384  * first<cutoff && last<cutoff: yes
1385  * first<cutoff && last>=cutoff: no; cutoff falls inside this segment
1386  * first>=cutoff && last<cutoff: no; wrap point falls inside this segment
1387  * first>=cutoff && last>=cutoff: no; every page of this segment is too young
1388  */
1389 static bool
1390 SlruMayDeleteSegment(SlruCtl ctl, int segpage, int cutoffPage)
1391 {
1392  int seg_last_page = segpage + SLRU_PAGES_PER_SEGMENT - 1;
1393 
1394  Assert(segpage % SLRU_PAGES_PER_SEGMENT == 0);
1395 
1396  return (ctl->PagePrecedes(segpage, cutoffPage) &&
1397  ctl->PagePrecedes(seg_last_page, cutoffPage));
1398 }
1399 
1400 #ifdef USE_ASSERT_CHECKING
1401 static void
1402 SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset)
1403 {
1404  TransactionId lhs,
1405  rhs;
1406  int newestPage,
1407  oldestPage;
1408  TransactionId newestXact,
1409  oldestXact;
1410 
1411  /*
1412  * Compare an XID pair having undefined order (see RFC 1982), a pair at
1413  * "opposite ends" of the XID space. TransactionIdPrecedes() treats each
1414  * as preceding the other. If RHS is oldestXact, LHS is the first XID we
1415  * must not assign.
1416  */
1417  lhs = per_page + offset; /* skip first page to avoid non-normal XIDs */
1418  rhs = lhs + (1U << 31);
1419  Assert(TransactionIdPrecedes(lhs, rhs));
1420  Assert(TransactionIdPrecedes(rhs, lhs));
1421  Assert(!TransactionIdPrecedes(lhs - 1, rhs));
1422  Assert(TransactionIdPrecedes(rhs, lhs - 1));
1423  Assert(TransactionIdPrecedes(lhs + 1, rhs));
1424  Assert(!TransactionIdPrecedes(rhs, lhs + 1));
1427  Assert(!ctl->PagePrecedes(lhs / per_page, lhs / per_page));
1428  Assert(!ctl->PagePrecedes(lhs / per_page, rhs / per_page));
1429  Assert(!ctl->PagePrecedes(rhs / per_page, lhs / per_page));
1430  Assert(!ctl->PagePrecedes((lhs - per_page) / per_page, rhs / per_page));
1431  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 3 * per_page) / per_page));
1432  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 2 * per_page) / per_page));
1433  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 1 * per_page) / per_page)
1434  || (1U << 31) % per_page != 0); /* See CommitTsPagePrecedes() */
1435  Assert(ctl->PagePrecedes((lhs + 1 * per_page) / per_page, rhs / per_page)
1436  || (1U << 31) % per_page != 0);
1437  Assert(ctl->PagePrecedes((lhs + 2 * per_page) / per_page, rhs / per_page));
1438  Assert(ctl->PagePrecedes((lhs + 3 * per_page) / per_page, rhs / per_page));
1439  Assert(!ctl->PagePrecedes(rhs / per_page, (lhs + per_page) / per_page));
1440 
1441  /*
1442  * GetNewTransactionId() has assigned the last XID it can safely use, and
1443  * that XID is in the *LAST* page of the second segment. We must not
1444  * delete that segment.
1445  */
1446  newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1;
1447  newestXact = newestPage * per_page + offset;
1448  Assert(newestXact / per_page == newestPage);
1449  oldestXact = newestXact + 1;
1450  oldestXact -= 1U << 31;
1451  oldestPage = oldestXact / per_page;
1453  (newestPage -
1454  newestPage % SLRU_PAGES_PER_SEGMENT),
1455  oldestPage));
1456 
1457  /*
1458  * GetNewTransactionId() has assigned the last XID it can safely use, and
1459  * that XID is in the *FIRST* page of the second segment. We must not
1460  * delete that segment.
1461  */
1462  newestPage = SLRU_PAGES_PER_SEGMENT;
1463  newestXact = newestPage * per_page + offset;
1464  Assert(newestXact / per_page == newestPage);
1465  oldestXact = newestXact + 1;
1466  oldestXact -= 1U << 31;
1467  oldestPage = oldestXact / per_page;
1469  (newestPage -
1470  newestPage % SLRU_PAGES_PER_SEGMENT),
1471  oldestPage));
1472 }
1473 
1474 /*
1475  * Unit-test a PagePrecedes function.
1476  *
1477  * This assumes every uint32 >= FirstNormalTransactionId is a valid key. It
1478  * assumes each value occupies a contiguous, fixed-size region of SLRU bytes.
1479  * (MultiXactMemberCtl separates flags from XIDs. AsyncCtl has
1480  * variable-length entries, no keys, and no random access. These unit tests
1481  * do not apply to them.)
1482  */
1483 void
1484 SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page)
1485 {
1486  /* Test first, middle and last entries of a page. */
1487  SlruPagePrecedesTestOffset(ctl, per_page, 0);
1488  SlruPagePrecedesTestOffset(ctl, per_page, per_page / 2);
1489  SlruPagePrecedesTestOffset(ctl, per_page, per_page - 1);
1490 }
1491 #endif
1492 
1493 /*
1494  * SlruScanDirectory callback
1495  * This callback reports true if there's any segment wholly prior to the
1496  * one containing the page passed as "data".
1497  */
1498 bool
1499 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
1500 {
1501  int cutoffPage = *(int *) data;
1502 
1503  if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1504  return true; /* found one; don't iterate any more */
1505 
1506  return false; /* keep going */
1507 }
1508 
1509 /*
1510  * SlruScanDirectory callback.
1511  * This callback deletes segments prior to the one passed in as "data".
1512  */
1513 static bool
1514 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
1515 {
1516  int cutoffPage = *(int *) data;
1517 
1518  if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1520 
1521  return false; /* keep going */
1522 }
1523 
1524 /*
1525  * SlruScanDirectory callback.
1526  * This callback deletes all segments.
1527  */
1528 bool
1529 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
1530 {
1532 
1533  return false; /* keep going */
1534 }
1535 
1536 /*
1537  * Scan the SimpleLru directory and apply a callback to each file found in it.
1538  *
1539  * If the callback returns true, the scan is stopped. The last return value
1540  * from the callback is returned.
1541  *
1542  * The callback receives the following arguments: 1. the SlruCtl struct for the
1543  * slru being truncated; 2. the filename being considered; 3. the page number
1544  * for the first page of that file; 4. a pointer to the opaque data given to us
1545  * by the caller.
1546  *
1547  * Note that the ordering in which the directory is scanned is not guaranteed.
1548  *
1549  * Note that no locking is applied.
1550  */
1551 bool
1553 {
1554  bool retval = false;
1555  DIR *cldir;
1556  struct dirent *clde;
1557  int segno;
1558  int segpage;
1559 
1560  cldir = AllocateDir(ctl->Dir);
1561  while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1562  {
1563  size_t len;
1564 
1565  len = strlen(clde->d_name);
1566 
1567  if ((len == 4 || len == 5 || len == 6) &&
1568  strspn(clde->d_name, "0123456789ABCDEF") == len)
1569  {
1570  segno = (int) strtol(clde->d_name, NULL, 16);
1571  segpage = segno * SLRU_PAGES_PER_SEGMENT;
1572 
1573  elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s",
1574  ctl->Dir, clde->d_name);
1575  retval = callback(ctl, clde->d_name, segpage, data);
1576  if (retval)
1577  break;
1578  }
1579  }
1580  FreeDir(cldir);
1581 
1582  return retval;
1583 }
1584 
1585 /*
1586  * Individual SLRUs (clog, ...) have to provide a sync.c handler function so
1587  * that they can provide the correct "SlruCtl" (otherwise we don't know how to
1588  * build the path), but they just forward to this common implementation that
1589  * performs the fsync.
1590  */
1591 int
1592 SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path)
1593 {
1594  int fd;
1595  int save_errno;
1596  int result;
1597 
1598  SlruFileName(ctl, path, ftag->segno);
1599 
1600  fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1601  if (fd < 0)
1602  return -1;
1603 
1604  result = pg_fsync(fd);
1605  save_errno = errno;
1606 
1607  CloseTransientFile(fd);
1608 
1609  errno = save_errno;
1610  return result;
1611 }
uint32 segno
Definition: sync.h:55
LWLock * ControlLock
Definition: slru.h:55
int * page_number
Definition: slru.h:67
Definition: lwlock.h:31
SlruPageStatus
Definition: slru.h:42
int SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path)
Definition: slru.c:1592
uint32 TransactionId
Definition: c.h:575
#define INIT_SLRUFILETAG(a, xx_handler, xx_segno)
Definition: slru.c:87
bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1529
SyncRequestHandler
Definition: sync.h:35
int latest_page_number
Definition: slru.h:98
char ** page_buffer
Definition: slru.h:64
void SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
Definition: slru.c:1225
bool InRecovery
Definition: xlog.c:206
#define END_CRIT_SECTION()
Definition: miscadmin.h:135
void fsync_fname(const char *fname, bool isdir)
Definition: fd.c:661
SlruErrorCause
Definition: slru.c:123
bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:349
#define START_CRIT_SECTION()
Definition: miscadmin.h:133
#define MemSet(start, val, len)
Definition: c.h:996
static SlruErrorCause slru_errcause
Definition: slru.c:133
int cur_lru_count
Definition: slru.h:91
static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
Definition: slru.c:323
int lsn_groups_per_page
Definition: slru.h:80
void pgstat_count_slru_flush(int slru_idx)
Definition: pgstat.c:7402
#define LOG
Definition: elog.h:26
Definition: dirent.h:9
Size SimpleLruShmemSize(int nslots, int nlsns)
Definition: slru.c:155
void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, LWLock *ctllock, const char *subdir, int tranche_id, SyncRequestHandler sync_handler)
Definition: slru.c:186
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2860
static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:930
#define PG_BINARY
Definition: c.h:1259
ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, off_t offset)
Definition: pwrite.c:27
int pgstat_slru_index(const char *name)
Definition: pgstat.c:7317
ssize_t pg_pread(int fd, void *buf, size_t nbyte, off_t offset)
Definition: pread.c:27
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1808
int segno[MAX_WRITEALL_BUFFERS]
Definition: slru.c:77
int fd[MAX_WRITEALL_BUFFERS]
Definition: slru.c:76
SlruPageStatus * page_status
Definition: slru.h:65
Definition: dirent.c:25
#define ERROR
Definition: elog.h:45
int num_files
Definition: slru.c:75
int OpenTransientFile(const char *fileName, int fileFlags)
Definition: fd.c:2404
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
struct SlruWriteAllData SlruWriteAllData
#define MAX_WRITEALL_BUFFERS
Definition: slru.c:71
int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid)
Definition: slru.c:394
#define MAXPGPATH
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
static XLogRecPtr endpos
Definition: pg_receivewal.c:46
static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata)
Definition: slru.c:538
#define DEBUG2
Definition: elog.h:24
void pgstat_count_slru_page_zeroed(int slru_idx)
Definition: pgstat.c:7372
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1378
bool IsUnderPostmaster
Definition: globals.c:110
LWLockPadded * buffer_locks
Definition: slru.h:69
int errdetail(const char *fmt,...)
Definition: elog.c:1048
int errcode_for_file_access(void)
Definition: elog.c:727
void SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied)
Definition: slru.c:1155
#define InvalidTransactionId
Definition: transam.h:31
void pgstat_count_slru_page_read(int slru_idx)
Definition: pgstat.c:7390
unsigned int uint32
Definition: c.h:429
XLogRecPtr * group_lsn
Definition: slru.h:79
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2615
bool SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
Definition: slru.c:625
void SimpleLruWritePage(SlruCtl ctl, int slotno)
Definition: slru.c:613
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1512
static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
Definition: slru.c:755
static void SimpleLruWaitIO(SlruCtl ctl, int slotno)
Definition: slru.c:340
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:743
int CloseTransientFile(int fd)
Definition: fd.c:2581
int ckpt_bufs_written
Definition: xlog.h:252
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
void pgstat_count_slru_page_exists(int slru_idx)
Definition: pgstat.c:7384
bool(* SlruScanCallback)(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.h:161
SlruSharedData * SlruShared
Definition: slru.h:104
bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1499
struct SlruWriteAllData * SlruWriteAll
Definition: slru.c:80
static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1514
#define SlruFileName(ctl, path, seg)
Definition: slru.c:62
int data_sync_elevel(int elevel)
Definition: fd.c:3635
char Dir[64]
Definition: slru.h:136
int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:494
void pgstat_count_slru_truncate(int slru_idx)
Definition: pgstat.c:7408
bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOnError)
Definition: sync.c:553
#define ereport(elevel,...)
Definition: elog.h:155
LWLock lock
Definition: lwlock.h:78
static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
Definition: slru.c:683
static void SlruInternalDeleteSegment(SlruCtl ctl, int segno)
Definition: slru.c:1303
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
int * page_lru_count
Definition: slru.h:68
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:792
bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
Definition: slru.c:1552
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2681
CheckpointStatsData CheckpointStats
Definition: xlog.c:187
size_t Size
Definition: c.h:528
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1488
void pgstat_count_slru_page_hit(int slru_idx)
Definition: pgstat.c:7378
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1206
#define MAXALIGN(LEN)
Definition: c.h:745
SyncRequestHandler sync_handler
Definition: slru.h:118
int slru_stats_idx
Definition: slru.h:101
int num_slots
Definition: slru.h:58
const char * name
Definition: encode.c:515
static int SlruSelectLRUPage(SlruCtl ctl, int pageno)
Definition: slru.c:1015
static bool SlruMayDeleteSegment(SlruCtl ctl, int segpage, int cutoffPage)
Definition: slru.c:1390
static int slru_errno
Definition: slru.c:134
static char * filename
Definition: pg_dumpall.c:91
int errmsg(const char *fmt,...)
Definition: elog.c:915
bool * page_dirty
Definition: slru.h:66
#define elog(elevel,...)
Definition: elog.h:228
int i
SlruShared shared
Definition: slru.h:112
#define SlruPagePrecedesUnitTests(ctl, per_page)
Definition: slru.h:156
#define BUFFERALIGN(LEN)
Definition: c.h:747
bool(* PagePrecedes)(int, int)
Definition: slru.h:130
void SlruDeleteSegment(SlruCtl ctl, int segno)
Definition: slru.c:1326
int pg_fsync(int fd)
Definition: fd.c:347
char d_name[MAX_PATH]
Definition: dirent.h:15
#define SLRU_PAGES_PER_SEGMENT
Definition: slru.h:34
int SimpleLruZeroPage(SlruCtl ctl, int pageno)
Definition: slru.c:279
Definition: sync.h:50
int FreeDir(DIR *dir)
Definition: fd.c:2733
#define SlruRecentlyUsed(shared, slotno)
Definition: slru.c:113
void pgstat_count_slru_page_written(int slru_idx)
Definition: pgstat.c:7396