PostgreSQL Source Code  git master
slru.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slru.c
4  * Simple LRU buffering for transaction status logfiles
5  *
6  * We use a simple least-recently-used scheme to manage a pool of page
7  * buffers. Under ordinary circumstances we expect that write
8  * traffic will occur mostly to the latest page (and to the just-prior
9  * page, soon after a page transition). Read traffic will probably touch
10  * a larger span of pages, but in any case a fairly small number of page
11  * buffers should be sufficient. So, we just search the buffers using plain
12  * linear search; there's no need for a hashtable or anything fancy.
13  * The management algorithm is straight LRU except that we will never swap
14  * out the latest page (since we know it's going to be hit again eventually).
15  *
16  * We use a control LWLock to protect the shared data structures, plus
17  * per-buffer LWLocks that synchronize I/O for each buffer. The control lock
18  * must be held to examine or modify any shared state. A process that is
19  * reading in or writing out a page buffer does not hold the control lock,
20  * only the per-buffer lock for the buffer it is working on.
21  *
22  * "Holding the control lock" means exclusive lock in all cases except for
23  * SimpleLruReadPage_ReadOnly(); see comments for SlruRecentlyUsed() for
24  * the implications of that.
25  *
26  * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
27  * before releasing the control lock. The per-buffer lock is released after
28  * completing the I/O, re-acquiring the control lock, and updating the shared
29  * state. (Deadlock is not possible here, because we never try to initiate
30  * I/O when someone else is already doing I/O on the same buffer.)
31  * To wait for I/O to complete, release the control lock, acquire the
32  * per-buffer lock in shared mode, immediately release the per-buffer lock,
33  * reacquire the control lock, and then recheck state (since arbitrary things
34  * could have happened while we didn't have the lock).
35  *
36  * As with the regular buffer manager, it is possible for another process
37  * to re-dirty a page that is currently being written out. This is handled
38  * by re-setting the page's page_dirty flag.
39  *
40  *
41  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
42  * Portions Copyright (c) 1994, Regents of the University of California
43  *
44  * src/backend/access/transam/slru.c
45  *
46  *-------------------------------------------------------------------------
47  */
48 #include "postgres.h"
49 
50 #include <fcntl.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 
54 #include "access/slru.h"
55 #include "access/transam.h"
56 #include "access/xlog.h"
57 #include "access/xlogutils.h"
58 #include "miscadmin.h"
59 #include "pgstat.h"
60 #include "storage/fd.h"
61 #include "storage/shmem.h"
62 
63 #define SlruFileName(ctl, path, seg) \
64  snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg)
65 
66 /*
67  * During SimpleLruWriteAll(), we will usually not need to write more than one
68  * or two physical files, but we may need to write several pages per file. We
69  * can consolidate the I/O requests by leaving files open until control returns
70  * to SimpleLruWriteAll(). This data structure remembers which files are open.
71  */
72 #define MAX_WRITEALL_BUFFERS 16
73 
74 typedef struct SlruWriteAllData
75 {
76  int num_files; /* # files actually open */
77  int fd[MAX_WRITEALL_BUFFERS]; /* their FD's */
78  int segno[MAX_WRITEALL_BUFFERS]; /* their log seg#s */
80 
82 
83 /*
84  * Populate a file tag describing a segment file. We only use the segment
85  * number, since we can derive everything else we need by having separate
86  * sync handler functions for clog, multixact etc.
87  */
88 #define INIT_SLRUFILETAG(a,xx_handler,xx_segno) \
89 ( \
90  memset(&(a), 0, sizeof(FileTag)), \
91  (a).handler = (xx_handler), \
92  (a).segno = (xx_segno) \
93 )
94 
95 /*
96  * Macro to mark a buffer slot "most recently used". Note multiple evaluation
97  * of arguments!
98  *
99  * The reason for the if-test is that there are often many consecutive
100  * accesses to the same page (particularly the latest page). By suppressing
101  * useless increments of cur_lru_count, we reduce the probability that old
102  * pages' counts will "wrap around" and make them appear recently used.
103  *
104  * We allow this code to be executed concurrently by multiple processes within
105  * SimpleLruReadPage_ReadOnly(). As long as int reads and writes are atomic,
106  * this should not cause any completely-bogus values to enter the computation.
107  * However, it is possible for either cur_lru_count or individual
108  * page_lru_count entries to be "reset" to lower values than they should have,
109  * in case a process is delayed while it executes this macro. With care in
110  * SlruSelectLRUPage(), this does little harm, and in any case the absolute
111  * worst possible consequence is a nonoptimal choice of page to evict. The
112  * gain from allowing concurrent reads of SLRU pages seems worth it.
113  */
114 #define SlruRecentlyUsed(shared, slotno) \
115  do { \
116  int new_lru_count = (shared)->cur_lru_count; \
117  if (new_lru_count != (shared)->page_lru_count[slotno]) { \
118  (shared)->cur_lru_count = ++new_lru_count; \
119  (shared)->page_lru_count[slotno] = new_lru_count; \
120  } \
121  } while (0)
122 
123 /* Saved info for SlruReportIOError */
124 typedef enum
125 {
133 
135 static int slru_errno;
136 
137 
138 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
139 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
140 static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata);
141 static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
142 static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
143  SlruWriteAll fdata);
144 static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
145 static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
146 
147 static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename,
148  int segpage, void *data);
149 static void SlruInternalDeleteSegment(SlruCtl ctl, int segno);
150 
151 /*
152  * Initialization of shared memory
153  */
154 
155 Size
156 SimpleLruShmemSize(int nslots, int nlsns)
157 {
158  Size sz;
159 
160  /* we assume nslots isn't so large as to risk overflow */
161  sz = MAXALIGN(sizeof(SlruSharedData));
162  sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
163  sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
164  sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
165  sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
166  sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
167  sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
168 
169  if (nlsns > 0)
170  sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
171 
172  return BUFFERALIGN(sz) + BLCKSZ * nslots;
173 }
174 
175 /*
176  * Initialize, or attach to, a simple LRU cache in shared memory.
177  *
178  * ctl: address of local (unshared) control structure.
179  * name: name of SLRU. (This is user-visible, pick with care!)
180  * nslots: number of page slots to use.
181  * nlsns: number of LSN groups per page (set to zero if not relevant).
182  * ctllock: LWLock to use to control access to the shared control structure.
183  * subdir: PGDATA-relative subdirectory that will contain the files.
184  * tranche_id: LWLock tranche ID to use for the SLRU's per-buffer LWLocks.
185  */
186 void
187 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
188  LWLock *ctllock, const char *subdir, int tranche_id,
189  SyncRequestHandler sync_handler)
190 {
191  SlruShared shared;
192  bool found;
193 
194  shared = (SlruShared) ShmemInitStruct(name,
195  SimpleLruShmemSize(nslots, nlsns),
196  &found);
197 
198  if (!IsUnderPostmaster)
199  {
200  /* Initialize locks and shared memory area */
201  char *ptr;
202  Size offset;
203  int slotno;
204 
205  Assert(!found);
206 
207  memset(shared, 0, sizeof(SlruSharedData));
208 
209  shared->ControlLock = ctllock;
210 
211  shared->num_slots = nslots;
212  shared->lsn_groups_per_page = nlsns;
213 
214  shared->cur_lru_count = 0;
215 
216  /* shared->latest_page_number will be set later */
217 
218  shared->slru_stats_idx = pgstat_slru_index(name);
219 
220  ptr = (char *) shared;
221  offset = MAXALIGN(sizeof(SlruSharedData));
222  shared->page_buffer = (char **) (ptr + offset);
223  offset += MAXALIGN(nslots * sizeof(char *));
224  shared->page_status = (SlruPageStatus *) (ptr + offset);
225  offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
226  shared->page_dirty = (bool *) (ptr + offset);
227  offset += MAXALIGN(nslots * sizeof(bool));
228  shared->page_number = (int *) (ptr + offset);
229  offset += MAXALIGN(nslots * sizeof(int));
230  shared->page_lru_count = (int *) (ptr + offset);
231  offset += MAXALIGN(nslots * sizeof(int));
232 
233  /* Initialize LWLocks */
234  shared->buffer_locks = (LWLockPadded *) (ptr + offset);
235  offset += MAXALIGN(nslots * sizeof(LWLockPadded));
236 
237  if (nlsns > 0)
238  {
239  shared->group_lsn = (XLogRecPtr *) (ptr + offset);
240  offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
241  }
242 
243  ptr += BUFFERALIGN(offset);
244  for (slotno = 0; slotno < nslots; slotno++)
245  {
246  LWLockInitialize(&shared->buffer_locks[slotno].lock,
247  tranche_id);
248 
249  shared->page_buffer[slotno] = ptr;
250  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
251  shared->page_dirty[slotno] = false;
252  shared->page_lru_count[slotno] = 0;
253  ptr += BLCKSZ;
254  }
255 
256  /* Should fit to estimated shmem size */
257  Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
258  }
259  else
260  Assert(found);
261 
262  /*
263  * Initialize the unshared control struct, including directory path. We
264  * assume caller set PagePrecedes.
265  */
266  ctl->shared = shared;
267  ctl->sync_handler = sync_handler;
268  strlcpy(ctl->Dir, subdir, sizeof(ctl->Dir));
269 }
270 
271 /*
272  * Initialize (or reinitialize) a page to zeroes.
273  *
274  * The page is not actually written, just set up in shared memory.
275  * The slot number of the new page is returned.
276  *
277  * Control lock must be held at entry, and will be held at exit.
278  */
279 int
280 SimpleLruZeroPage(SlruCtl ctl, int pageno)
281 {
282  SlruShared shared = ctl->shared;
283  int slotno;
284 
285  /* Find a suitable buffer slot for the page */
286  slotno = SlruSelectLRUPage(ctl, pageno);
287  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
288  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
289  !shared->page_dirty[slotno]) ||
290  shared->page_number[slotno] == pageno);
291 
292  /* Mark the slot as containing this page */
293  shared->page_number[slotno] = pageno;
294  shared->page_status[slotno] = SLRU_PAGE_VALID;
295  shared->page_dirty[slotno] = true;
296  SlruRecentlyUsed(shared, slotno);
297 
298  /* Set the buffer to zeroes */
299  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
300 
301  /* Set the LSNs for this new page to zero */
302  SimpleLruZeroLSNs(ctl, slotno);
303 
304  /* Assume this page is now the latest active page */
305  shared->latest_page_number = pageno;
306 
307  /* update the stats counter of zeroed pages */
309 
310  return slotno;
311 }
312 
313 /*
314  * Zero all the LSNs we store for this slru page.
315  *
316  * This should be called each time we create a new page, and each time we read
317  * in a page from disk into an existing buffer. (Such an old page cannot
318  * have any interesting LSNs, since we'd have flushed them before writing
319  * the page in the first place.)
320  *
321  * This assumes that InvalidXLogRecPtr is bitwise-all-0.
322  */
323 static void
324 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
325 {
326  SlruShared shared = ctl->shared;
327 
328  if (shared->lsn_groups_per_page > 0)
329  MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
330  shared->lsn_groups_per_page * sizeof(XLogRecPtr));
331 }
332 
333 /*
334  * Wait for any active I/O on a page slot to finish. (This does not
335  * guarantee that new I/O hasn't been started before we return, though.
336  * In fact the slot might not even contain the same page anymore.)
337  *
338  * Control lock must be held at entry, and will be held at exit.
339  */
340 static void
341 SimpleLruWaitIO(SlruCtl ctl, int slotno)
342 {
343  SlruShared shared = ctl->shared;
344 
345  /* See notes at top of file */
346  LWLockRelease(shared->ControlLock);
347  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED);
348  LWLockRelease(&shared->buffer_locks[slotno].lock);
350 
351  /*
352  * If the slot is still in an io-in-progress state, then either someone
353  * already started a new I/O on the slot, or a previous I/O failed and
354  * neglected to reset the page state. That shouldn't happen, really, but
355  * it seems worth a few extra cycles to check and recover from it. We can
356  * cheaply test for failure by seeing if the buffer lock is still held (we
357  * assume that transaction abort would release the lock).
358  */
359  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
360  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
361  {
362  if (LWLockConditionalAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED))
363  {
364  /* indeed, the I/O must have failed */
365  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
366  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
367  else /* write_in_progress */
368  {
369  shared->page_status[slotno] = SLRU_PAGE_VALID;
370  shared->page_dirty[slotno] = true;
371  }
372  LWLockRelease(&shared->buffer_locks[slotno].lock);
373  }
374  }
375 }
376 
377 /*
378  * Find a page in a shared buffer, reading it in if necessary.
379  * The page number must correspond to an already-initialized page.
380  *
381  * If write_ok is true then it is OK to return a page that is in
382  * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
383  * that modification of the page is safe. If write_ok is false then we
384  * will not return the page until it is not undergoing active I/O.
385  *
386  * The passed-in xid is used only for error reporting, and may be
387  * InvalidTransactionId if no specific xid is associated with the action.
388  *
389  * Return value is the shared-buffer slot number now holding the page.
390  * The buffer's LRU access info is updated.
391  *
392  * Control lock must be held at entry, and will be held at exit.
393  */
394 int
395 SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
396  TransactionId xid)
397 {
398  SlruShared shared = ctl->shared;
399 
400  /* Outer loop handles restart if we must wait for someone else's I/O */
401  for (;;)
402  {
403  int slotno;
404  bool ok;
405 
406  /* See if page already is in memory; if not, pick victim slot */
407  slotno = SlruSelectLRUPage(ctl, pageno);
408 
409  /* Did we find the page in memory? */
410  if (shared->page_number[slotno] == pageno &&
411  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
412  {
413  /*
414  * If page is still being read in, we must wait for I/O. Likewise
415  * if the page is being written and the caller said that's not OK.
416  */
417  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
418  (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
419  !write_ok))
420  {
421  SimpleLruWaitIO(ctl, slotno);
422  /* Now we must recheck state from the top */
423  continue;
424  }
425  /* Otherwise, it's ready to use */
426  SlruRecentlyUsed(shared, slotno);
427 
428  /* update the stats counter of pages found in the SLRU */
430 
431  return slotno;
432  }
433 
434  /* We found no match; assert we selected a freeable slot */
435  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
436  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
437  !shared->page_dirty[slotno]));
438 
439  /* Mark the slot read-busy */
440  shared->page_number[slotno] = pageno;
441  shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
442  shared->page_dirty[slotno] = false;
443 
444  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
445  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
446 
447  /* Release control lock while doing I/O */
448  LWLockRelease(shared->ControlLock);
449 
450  /* Do the read */
451  ok = SlruPhysicalReadPage(ctl, pageno, slotno);
452 
453  /* Set the LSNs for this newly read-in page to zero */
454  SimpleLruZeroLSNs(ctl, slotno);
455 
456  /* Re-acquire control lock and update page state */
458 
459  Assert(shared->page_number[slotno] == pageno &&
460  shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
461  !shared->page_dirty[slotno]);
462 
463  shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
464 
465  LWLockRelease(&shared->buffer_locks[slotno].lock);
466 
467  /* Now it's okay to ereport if we failed */
468  if (!ok)
469  SlruReportIOError(ctl, pageno, xid);
470 
471  SlruRecentlyUsed(shared, slotno);
472 
473  /* update the stats counter of pages not found in SLRU */
475 
476  return slotno;
477  }
478 }
479 
480 /*
481  * Find a page in a shared buffer, reading it in if necessary.
482  * The page number must correspond to an already-initialized page.
483  * The caller must intend only read-only access to the page.
484  *
485  * The passed-in xid is used only for error reporting, and may be
486  * InvalidTransactionId if no specific xid is associated with the action.
487  *
488  * Return value is the shared-buffer slot number now holding the page.
489  * The buffer's LRU access info is updated.
490  *
491  * Control lock must NOT be held at entry, but will be held at exit.
492  * It is unspecified whether the lock will be shared or exclusive.
493  */
494 int
496 {
497  SlruShared shared = ctl->shared;
498  int slotno;
499 
500  /* Try to find the page while holding only shared lock */
502 
503  /* See if page is already in a buffer */
504  for (slotno = 0; slotno < shared->num_slots; slotno++)
505  {
506  if (shared->page_number[slotno] == pageno &&
507  shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
508  shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
509  {
510  /* See comments for SlruRecentlyUsed macro */
511  SlruRecentlyUsed(shared, slotno);
512 
513  /* update the stats counter of pages found in the SLRU */
515 
516  return slotno;
517  }
518  }
519 
520  /* No luck, so switch to normal exclusive lock and do regular read */
521  LWLockRelease(shared->ControlLock);
523 
524  return SimpleLruReadPage(ctl, pageno, true, xid);
525 }
526 
527 /*
528  * Write a page from a shared buffer, if necessary.
529  * Does nothing if the specified slot is not dirty.
530  *
531  * NOTE: only one write attempt is made here. Hence, it is possible that
532  * the page is still dirty at exit (if someone else re-dirtied it during
533  * the write). However, we *do* attempt a fresh write even if the page
534  * is already being written; this is for checkpoints.
535  *
536  * Control lock must be held at entry, and will be held at exit.
537  */
538 static void
539 SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata)
540 {
541  SlruShared shared = ctl->shared;
542  int pageno = shared->page_number[slotno];
543  bool ok;
544 
545  /* If a write is in progress, wait for it to finish */
546  while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
547  shared->page_number[slotno] == pageno)
548  {
549  SimpleLruWaitIO(ctl, slotno);
550  }
551 
552  /*
553  * Do nothing if page is not dirty, or if buffer no longer contains the
554  * same page we were called for.
555  */
556  if (!shared->page_dirty[slotno] ||
557  shared->page_status[slotno] != SLRU_PAGE_VALID ||
558  shared->page_number[slotno] != pageno)
559  return;
560 
561  /*
562  * Mark the slot write-busy, and clear the dirtybit. After this point, a
563  * transaction status update on this page will mark it dirty again.
564  */
565  shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
566  shared->page_dirty[slotno] = false;
567 
568  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
569  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
570 
571  /* Release control lock while doing I/O */
572  LWLockRelease(shared->ControlLock);
573 
574  /* Do the write */
575  ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
576 
577  /* If we failed, and we're in a flush, better close the files */
578  if (!ok && fdata)
579  {
580  int i;
581 
582  for (i = 0; i < fdata->num_files; i++)
583  CloseTransientFile(fdata->fd[i]);
584  }
585 
586  /* Re-acquire control lock and update page state */
588 
589  Assert(shared->page_number[slotno] == pageno &&
590  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
591 
592  /* If we failed to write, mark the page dirty again */
593  if (!ok)
594  shared->page_dirty[slotno] = true;
595 
596  shared->page_status[slotno] = SLRU_PAGE_VALID;
597 
598  LWLockRelease(&shared->buffer_locks[slotno].lock);
599 
600  /* Now it's okay to ereport if we failed */
601  if (!ok)
603 
604  /* If part of a checkpoint, count this as a buffer written. */
605  if (fdata)
607 }
608 
609 /*
610  * Wrapper of SlruInternalWritePage, for external callers.
611  * fdata is always passed a NULL here.
612  */
613 void
614 SimpleLruWritePage(SlruCtl ctl, int slotno)
615 {
616  SlruInternalWritePage(ctl, slotno, NULL);
617 }
618 
619 /*
620  * Return whether the given page exists on disk.
621  *
622  * A false return means that either the file does not exist, or that it's not
623  * large enough to contain the given page.
624  */
625 bool
627 {
628  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
629  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
630  int offset = rpageno * BLCKSZ;
631  char path[MAXPGPATH];
632  int fd;
633  bool result;
634  off_t endpos;
635 
636  /* update the stats counter of checked pages */
638 
639  SlruFileName(ctl, path, segno);
640 
641  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
642  if (fd < 0)
643  {
644  /* expected: file doesn't exist */
645  if (errno == ENOENT)
646  return false;
647 
648  /* report error normally */
650  slru_errno = errno;
651  SlruReportIOError(ctl, pageno, 0);
652  }
653 
654  if ((endpos = lseek(fd, 0, SEEK_END)) < 0)
655  {
657  slru_errno = errno;
658  SlruReportIOError(ctl, pageno, 0);
659  }
660 
661  result = endpos >= (off_t) (offset + BLCKSZ);
662 
663  if (CloseTransientFile(fd) != 0)
664  {
666  slru_errno = errno;
667  return false;
668  }
669 
670  return result;
671 }
672 
673 /*
674  * Physical read of a (previously existing) page into a buffer slot
675  *
676  * On failure, we cannot just ereport(ERROR) since caller has put state in
677  * shared memory that must be undone. So, we return false and save enough
678  * info in static variables to let SlruReportIOError make the report.
679  *
680  * For now, assume it's not worth keeping a file pointer open across
681  * read/write operations. We could cache one virtual file pointer ...
682  */
683 static bool
684 SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
685 {
686  SlruShared shared = ctl->shared;
687  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
688  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
689  off_t offset = rpageno * BLCKSZ;
690  char path[MAXPGPATH];
691  int fd;
692 
693  SlruFileName(ctl, path, segno);
694 
695  /*
696  * In a crash-and-restart situation, it's possible for us to receive
697  * commands to set the commit status of transactions whose bits are in
698  * already-truncated segments of the commit log (see notes in
699  * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
700  * where the file doesn't exist, and return zeroes instead.
701  */
702  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
703  if (fd < 0)
704  {
705  if (errno != ENOENT || !InRecovery)
706  {
708  slru_errno = errno;
709  return false;
710  }
711 
712  ereport(LOG,
713  (errmsg("file \"%s\" doesn't exist, reading as zeroes",
714  path)));
715  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
716  return true;
717  }
718 
719  errno = 0;
721  if (pg_pread(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
722  {
725  slru_errno = errno;
726  CloseTransientFile(fd);
727  return false;
728  }
730 
731  if (CloseTransientFile(fd) != 0)
732  {
734  slru_errno = errno;
735  return false;
736  }
737 
738  return true;
739 }
740 
741 /*
742  * Physical write of a page from a buffer slot
743  *
744  * On failure, we cannot just ereport(ERROR) since caller has put state in
745  * shared memory that must be undone. So, we return false and save enough
746  * info in static variables to let SlruReportIOError make the report.
747  *
748  * For now, assume it's not worth keeping a file pointer open across
749  * independent read/write operations. We do batch operations during
750  * SimpleLruWriteAll, though.
751  *
752  * fdata is NULL for a standalone write, pointer to open-file info during
753  * SimpleLruWriteAll.
754  */
755 static bool
756 SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
757 {
758  SlruShared shared = ctl->shared;
759  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
760  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
761  off_t offset = rpageno * BLCKSZ;
762  char path[MAXPGPATH];
763  int fd = -1;
764 
765  /* update the stats counter of written pages */
767 
768  /*
769  * Honor the write-WAL-before-data rule, if appropriate, so that we do not
770  * write out data before associated WAL records. This is the same action
771  * performed during FlushBuffer() in the main buffer manager.
772  */
773  if (shared->group_lsn != NULL)
774  {
775  /*
776  * We must determine the largest async-commit LSN for the page. This
777  * is a bit tedious, but since this entire function is a slow path
778  * anyway, it seems better to do this here than to maintain a per-page
779  * LSN variable (which'd need an extra comparison in the
780  * transaction-commit path).
781  */
782  XLogRecPtr max_lsn;
783  int lsnindex,
784  lsnoff;
785 
786  lsnindex = slotno * shared->lsn_groups_per_page;
787  max_lsn = shared->group_lsn[lsnindex++];
788  for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
789  {
790  XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
791 
792  if (max_lsn < this_lsn)
793  max_lsn = this_lsn;
794  }
795 
796  if (!XLogRecPtrIsInvalid(max_lsn))
797  {
798  /*
799  * As noted above, elog(ERROR) is not acceptable here, so if
800  * XLogFlush were to fail, we must PANIC. This isn't much of a
801  * restriction because XLogFlush is just about all critical
802  * section anyway, but let's make sure.
803  */
805  XLogFlush(max_lsn);
807  }
808  }
809 
810  /*
811  * During a WriteAll, we may already have the desired file open.
812  */
813  if (fdata)
814  {
815  int i;
816 
817  for (i = 0; i < fdata->num_files; i++)
818  {
819  if (fdata->segno[i] == segno)
820  {
821  fd = fdata->fd[i];
822  break;
823  }
824  }
825  }
826 
827  if (fd < 0)
828  {
829  /*
830  * If the file doesn't already exist, we should create it. It is
831  * possible for this to need to happen when writing a page that's not
832  * first in its segment; we assume the OS can cope with that. (Note:
833  * it might seem that it'd be okay to create files only when
834  * SimpleLruZeroPage is called for the first page of a segment.
835  * However, if after a crash and restart the REDO logic elects to
836  * replay the log from a checkpoint before the latest one, then it's
837  * possible that we will get commands to set transaction status of
838  * transactions that have already been truncated from the commit log.
839  * Easiest way to deal with that is to accept references to
840  * nonexistent files here and in SlruPhysicalReadPage.)
841  *
842  * Note: it is possible for more than one backend to be executing this
843  * code simultaneously for different pages of the same file. Hence,
844  * don't use O_EXCL or O_TRUNC or anything like that.
845  */
846  SlruFileName(ctl, path, segno);
847  fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY);
848  if (fd < 0)
849  {
851  slru_errno = errno;
852  return false;
853  }
854 
855  if (fdata)
856  {
857  if (fdata->num_files < MAX_WRITEALL_BUFFERS)
858  {
859  fdata->fd[fdata->num_files] = fd;
860  fdata->segno[fdata->num_files] = segno;
861  fdata->num_files++;
862  }
863  else
864  {
865  /*
866  * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
867  * fall back to treating it as a standalone write.
868  */
869  fdata = NULL;
870  }
871  }
872  }
873 
874  errno = 0;
876  if (pg_pwrite(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
877  {
879  /* if write didn't set errno, assume problem is no disk space */
880  if (errno == 0)
881  errno = ENOSPC;
883  slru_errno = errno;
884  if (!fdata)
885  CloseTransientFile(fd);
886  return false;
887  }
889 
890  /* Queue up a sync request for the checkpointer. */
891  if (ctl->sync_handler != SYNC_HANDLER_NONE)
892  {
893  FileTag tag;
894 
895  INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
896  if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false))
897  {
898  /* No space to enqueue sync request. Do it synchronously. */
900  if (pg_fsync(fd) != 0)
901  {
904  slru_errno = errno;
905  CloseTransientFile(fd);
906  return false;
907  }
909  }
910  }
911 
912  /* Close file, unless part of flush request. */
913  if (!fdata)
914  {
915  if (CloseTransientFile(fd) != 0)
916  {
918  slru_errno = errno;
919  return false;
920  }
921  }
922 
923  return true;
924 }
925 
926 /*
927  * Issue the error message after failure of SlruPhysicalReadPage or
928  * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
929  */
930 static void
932 {
933  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
934  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
935  int offset = rpageno * BLCKSZ;
936  char path[MAXPGPATH];
937 
938  SlruFileName(ctl, path, segno);
939  errno = slru_errno;
940  switch (slru_errcause)
941  {
942  case SLRU_OPEN_FAILED:
943  ereport(ERROR,
945  errmsg("could not access status of transaction %u", xid),
946  errdetail("Could not open file \"%s\": %m.", path)));
947  break;
948  case SLRU_SEEK_FAILED:
949  ereport(ERROR,
951  errmsg("could not access status of transaction %u", xid),
952  errdetail("Could not seek in file \"%s\" to offset %u: %m.",
953  path, offset)));
954  break;
955  case SLRU_READ_FAILED:
956  if (errno)
957  ereport(ERROR,
959  errmsg("could not access status of transaction %u", xid),
960  errdetail("Could not read from file \"%s\" at offset %u: %m.",
961  path, offset)));
962  else
963  ereport(ERROR,
964  (errmsg("could not access status of transaction %u", xid),
965  errdetail("Could not read from file \"%s\" at offset %u: read too few bytes.", path, offset)));
966  break;
967  case SLRU_WRITE_FAILED:
968  if (errno)
969  ereport(ERROR,
971  errmsg("could not access status of transaction %u", xid),
972  errdetail("Could not write to file \"%s\" at offset %u: %m.",
973  path, offset)));
974  else
975  ereport(ERROR,
976  (errmsg("could not access status of transaction %u", xid),
977  errdetail("Could not write to file \"%s\" at offset %u: wrote too few bytes.",
978  path, offset)));
979  break;
980  case SLRU_FSYNC_FAILED:
983  errmsg("could not access status of transaction %u", xid),
984  errdetail("Could not fsync file \"%s\": %m.",
985  path)));
986  break;
987  case SLRU_CLOSE_FAILED:
988  ereport(ERROR,
990  errmsg("could not access status of transaction %u", xid),
991  errdetail("Could not close file \"%s\": %m.",
992  path)));
993  break;
994  default:
995  /* can't get here, we trust */
996  elog(ERROR, "unrecognized SimpleLru error cause: %d",
997  (int) slru_errcause);
998  break;
999  }
1000 }
1001 
1002 /*
1003  * Select the slot to re-use when we need a free slot.
1004  *
1005  * The target page number is passed because we need to consider the
1006  * possibility that some other process reads in the target page while
1007  * we are doing I/O to free a slot. Hence, check or recheck to see if
1008  * any slot already holds the target page, and return that slot if so.
1009  * Thus, the returned slot is *either* a slot already holding the pageno
1010  * (could be any state except EMPTY), *or* a freeable slot (state EMPTY
1011  * or CLEAN).
1012  *
1013  * Control lock must be held at entry, and will be held at exit.
1014  */
1015 static int
1016 SlruSelectLRUPage(SlruCtl ctl, int pageno)
1017 {
1018  SlruShared shared = ctl->shared;
1019 
1020  /* Outer loop handles restart after I/O */
1021  for (;;)
1022  {
1023  int slotno;
1024  int cur_count;
1025  int bestvalidslot = 0; /* keep compiler quiet */
1026  int best_valid_delta = -1;
1027  int best_valid_page_number = 0; /* keep compiler quiet */
1028  int bestinvalidslot = 0; /* keep compiler quiet */
1029  int best_invalid_delta = -1;
1030  int best_invalid_page_number = 0; /* keep compiler quiet */
1031 
1032  /* See if page already has a buffer assigned */
1033  for (slotno = 0; slotno < shared->num_slots; slotno++)
1034  {
1035  if (shared->page_number[slotno] == pageno &&
1036  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
1037  return slotno;
1038  }
1039 
1040  /*
1041  * If we find any EMPTY slot, just select that one. Else choose a
1042  * victim page to replace. We normally take the least recently used
1043  * valid page, but we will never take the slot containing
1044  * latest_page_number, even if it appears least recently used. We
1045  * will select a slot that is already I/O busy only if there is no
1046  * other choice: a read-busy slot will not be least recently used once
1047  * the read finishes, and waiting for an I/O on a write-busy slot is
1048  * inferior to just picking some other slot. Testing shows the slot
1049  * we pick instead will often be clean, allowing us to begin a read at
1050  * once.
1051  *
1052  * Normally the page_lru_count values will all be different and so
1053  * there will be a well-defined LRU page. But since we allow
1054  * concurrent execution of SlruRecentlyUsed() within
1055  * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
1056  * acquire the same lru_count values. In that case we break ties by
1057  * choosing the furthest-back page.
1058  *
1059  * Notice that this next line forcibly advances cur_lru_count to a
1060  * value that is certainly beyond any value that will be in the
1061  * page_lru_count array after the loop finishes. This ensures that
1062  * the next execution of SlruRecentlyUsed will mark the page newly
1063  * used, even if it's for a page that has the current counter value.
1064  * That gets us back on the path to having good data when there are
1065  * multiple pages with the same lru_count.
1066  */
1067  cur_count = (shared->cur_lru_count)++;
1068  for (slotno = 0; slotno < shared->num_slots; slotno++)
1069  {
1070  int this_delta;
1071  int this_page_number;
1072 
1073  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1074  return slotno;
1075  this_delta = cur_count - shared->page_lru_count[slotno];
1076  if (this_delta < 0)
1077  {
1078  /*
1079  * Clean up in case shared updates have caused cur_count
1080  * increments to get "lost". We back off the page counts,
1081  * rather than trying to increase cur_count, to avoid any
1082  * question of infinite loops or failure in the presence of
1083  * wrapped-around counts.
1084  */
1085  shared->page_lru_count[slotno] = cur_count;
1086  this_delta = 0;
1087  }
1088  this_page_number = shared->page_number[slotno];
1089  if (this_page_number == shared->latest_page_number)
1090  continue;
1091  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1092  {
1093  if (this_delta > best_valid_delta ||
1094  (this_delta == best_valid_delta &&
1095  ctl->PagePrecedes(this_page_number,
1096  best_valid_page_number)))
1097  {
1098  bestvalidslot = slotno;
1099  best_valid_delta = this_delta;
1100  best_valid_page_number = this_page_number;
1101  }
1102  }
1103  else
1104  {
1105  if (this_delta > best_invalid_delta ||
1106  (this_delta == best_invalid_delta &&
1107  ctl->PagePrecedes(this_page_number,
1108  best_invalid_page_number)))
1109  {
1110  bestinvalidslot = slotno;
1111  best_invalid_delta = this_delta;
1112  best_invalid_page_number = this_page_number;
1113  }
1114  }
1115  }
1116 
1117  /*
1118  * If all pages (except possibly the latest one) are I/O busy, we'll
1119  * have to wait for an I/O to complete and then retry. In that
1120  * unhappy case, we choose to wait for the I/O on the least recently
1121  * used slot, on the assumption that it was likely initiated first of
1122  * all the I/Os in progress and may therefore finish first.
1123  */
1124  if (best_valid_delta < 0)
1125  {
1126  SimpleLruWaitIO(ctl, bestinvalidslot);
1127  continue;
1128  }
1129 
1130  /*
1131  * If the selected page is clean, we're set.
1132  */
1133  if (!shared->page_dirty[bestvalidslot])
1134  return bestvalidslot;
1135 
1136  /*
1137  * Write the page.
1138  */
1139  SlruInternalWritePage(ctl, bestvalidslot, NULL);
1140 
1141  /*
1142  * Now loop back and try again. This is the easiest way of dealing
1143  * with corner cases such as the victim page being re-dirtied while we
1144  * wrote it.
1145  */
1146  }
1147 }
1148 
1149 /*
1150  * Write dirty pages to disk during checkpoint or database shutdown. Flushing
1151  * is deferred until the next call to ProcessSyncRequests(), though we do fsync
1152  * the containing directory here to make sure that newly created directory
1153  * entries are on disk.
1154  */
1155 void
1156 SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied)
1157 {
1158  SlruShared shared = ctl->shared;
1159  SlruWriteAllData fdata;
1160  int slotno;
1161  int pageno = 0;
1162  int i;
1163  bool ok;
1164 
1165  /* update the stats counter of flushes */
1167 
1168  /*
1169  * Find and write dirty pages
1170  */
1171  fdata.num_files = 0;
1172 
1174 
1175  for (slotno = 0; slotno < shared->num_slots; slotno++)
1176  {
1177  SlruInternalWritePage(ctl, slotno, &fdata);
1178 
1179  /*
1180  * In some places (e.g. checkpoints), we cannot assert that the slot
1181  * is clean now, since another process might have re-dirtied it
1182  * already. That's okay.
1183  */
1184  Assert(allow_redirtied ||
1185  shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1186  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1187  !shared->page_dirty[slotno]));
1188  }
1189 
1190  LWLockRelease(shared->ControlLock);
1191 
1192  /*
1193  * Now close any files that were open
1194  */
1195  ok = true;
1196  for (i = 0; i < fdata.num_files; i++)
1197  {
1198  if (CloseTransientFile(fdata.fd[i]) != 0)
1199  {
1201  slru_errno = errno;
1202  pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1203  ok = false;
1204  }
1205  }
1206  if (!ok)
1208 
1209  /* Ensure that directory entries for new files are on disk. */
1210  if (ctl->sync_handler != SYNC_HANDLER_NONE)
1211  fsync_fname(ctl->Dir, true);
1212 }
1213 
1214 /*
1215  * Remove all segments before the one holding the passed page number
1216  *
1217  * All SLRUs prevent concurrent calls to this function, either with an LWLock
1218  * or by calling it only as part of a checkpoint. Mutual exclusion must begin
1219  * before computing cutoffPage. Mutual exclusion must end after any limit
1220  * update that would permit other backends to write fresh data into the
1221  * segment immediately preceding the one containing cutoffPage. Otherwise,
1222  * when the SLRU is quite full, SimpleLruTruncate() might delete that segment
1223  * after it has accrued freshly-written data.
1224  */
1225 void
1226 SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
1227 {
1228  SlruShared shared = ctl->shared;
1229  int slotno;
1230 
1231  /* update the stats counter of truncates */
1233 
1234  /*
1235  * Scan shared memory and remove any pages preceding the cutoff page, to
1236  * ensure we won't rewrite them later. (Since this is normally called in
1237  * or just after a checkpoint, any dirty pages should have been flushed
1238  * already ... we're just being extra careful here.)
1239  */
1241 
1242 restart:;
1243 
1244  /*
1245  * While we are holding the lock, make an important safety check: the
1246  * current endpoint page must not be eligible for removal.
1247  */
1248  if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
1249  {
1250  LWLockRelease(shared->ControlLock);
1251  ereport(LOG,
1252  (errmsg("could not truncate directory \"%s\": apparent wraparound",
1253  ctl->Dir)));
1254  return;
1255  }
1256 
1257  for (slotno = 0; slotno < shared->num_slots; slotno++)
1258  {
1259  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1260  continue;
1261  if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1262  continue;
1263 
1264  /*
1265  * If page is clean, just change state to EMPTY (expected case).
1266  */
1267  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1268  !shared->page_dirty[slotno])
1269  {
1270  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1271  continue;
1272  }
1273 
1274  /*
1275  * Hmm, we have (or may have) I/O operations acting on the page, so
1276  * we've got to wait for them to finish and then start again. This is
1277  * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1278  * wouldn't it be OK to just discard it without writing it?
1279  * SlruMayDeleteSegment() uses a stricter qualification, so we might
1280  * not delete this page in the end; even if we don't delete it, we
1281  * won't have cause to read its data again. For now, keep the logic
1282  * the same as it was.)
1283  */
1284  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1285  SlruInternalWritePage(ctl, slotno, NULL);
1286  else
1287  SimpleLruWaitIO(ctl, slotno);
1288  goto restart;
1289  }
1290 
1291  LWLockRelease(shared->ControlLock);
1292 
1293  /* Now we can remove the old segment(s) */
1294  (void) SlruScanDirectory(ctl, SlruScanDirCbDeleteCutoff, &cutoffPage);
1295 }
1296 
1297 /*
1298  * Delete an individual SLRU segment.
1299  *
1300  * NB: This does not touch the SLRU buffers themselves, callers have to ensure
1301  * they either can't yet contain anything, or have already been cleaned out.
1302  */
1303 static void
1305 {
1306  char path[MAXPGPATH];
1307 
1308  /* Forget any fsync requests queued for this segment. */
1309  if (ctl->sync_handler != SYNC_HANDLER_NONE)
1310  {
1311  FileTag tag;
1312 
1313  INIT_SLRUFILETAG(tag, ctl->sync_handler, segno);
1315  }
1316 
1317  /* Unlink the file. */
1318  SlruFileName(ctl, path, segno);
1319  ereport(DEBUG2, (errmsg_internal("removing file \"%s\"", path)));
1320  unlink(path);
1321 }
1322 
1323 /*
1324  * Delete an individual SLRU segment, identified by the segment number.
1325  */
1326 void
1328 {
1329  SlruShared shared = ctl->shared;
1330  int slotno;
1331  bool did_write;
1332 
1333  /* Clean out any possibly existing references to the segment. */
1335 restart:
1336  did_write = false;
1337  for (slotno = 0; slotno < shared->num_slots; slotno++)
1338  {
1339  int pagesegno = shared->page_number[slotno] / SLRU_PAGES_PER_SEGMENT;
1340 
1341  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1342  continue;
1343 
1344  /* not the segment we're looking for */
1345  if (pagesegno != segno)
1346  continue;
1347 
1348  /* If page is clean, just change state to EMPTY (expected case). */
1349  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1350  !shared->page_dirty[slotno])
1351  {
1352  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1353  continue;
1354  }
1355 
1356  /* Same logic as SimpleLruTruncate() */
1357  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1358  SlruInternalWritePage(ctl, slotno, NULL);
1359  else
1360  SimpleLruWaitIO(ctl, slotno);
1361 
1362  did_write = true;
1363  }
1364 
1365  /*
1366  * Be extra careful and re-check. The IO functions release the control
1367  * lock, so new pages could have been read in.
1368  */
1369  if (did_write)
1370  goto restart;
1371 
1372  SlruInternalDeleteSegment(ctl, segno);
1373 
1374  LWLockRelease(shared->ControlLock);
1375 }
1376 
1377 /*
1378  * Determine whether a segment is okay to delete.
1379  *
1380  * segpage is the first page of the segment, and cutoffPage is the oldest (in
1381  * PagePrecedes order) page in the SLRU containing still-useful data. Since
1382  * every core PagePrecedes callback implements "wrap around", check the
1383  * segment's first and last pages:
1384  *
1385  * first<cutoff && last<cutoff: yes
1386  * first<cutoff && last>=cutoff: no; cutoff falls inside this segment
1387  * first>=cutoff && last<cutoff: no; wrap point falls inside this segment
1388  * first>=cutoff && last>=cutoff: no; every page of this segment is too young
1389  */
1390 static bool
1391 SlruMayDeleteSegment(SlruCtl ctl, int segpage, int cutoffPage)
1392 {
1393  int seg_last_page = segpage + SLRU_PAGES_PER_SEGMENT - 1;
1394 
1395  Assert(segpage % SLRU_PAGES_PER_SEGMENT == 0);
1396 
1397  return (ctl->PagePrecedes(segpage, cutoffPage) &&
1398  ctl->PagePrecedes(seg_last_page, cutoffPage));
1399 }
1400 
1401 #ifdef USE_ASSERT_CHECKING
1402 static void
1403 SlruPagePrecedesTestOffset(SlruCtl ctl, int per_page, uint32 offset)
1404 {
1405  TransactionId lhs,
1406  rhs;
1407  int newestPage,
1408  oldestPage;
1409  TransactionId newestXact,
1410  oldestXact;
1411 
1412  /*
1413  * Compare an XID pair having undefined order (see RFC 1982), a pair at
1414  * "opposite ends" of the XID space. TransactionIdPrecedes() treats each
1415  * as preceding the other. If RHS is oldestXact, LHS is the first XID we
1416  * must not assign.
1417  */
1418  lhs = per_page + offset; /* skip first page to avoid non-normal XIDs */
1419  rhs = lhs + (1U << 31);
1420  Assert(TransactionIdPrecedes(lhs, rhs));
1421  Assert(TransactionIdPrecedes(rhs, lhs));
1422  Assert(!TransactionIdPrecedes(lhs - 1, rhs));
1423  Assert(TransactionIdPrecedes(rhs, lhs - 1));
1424  Assert(TransactionIdPrecedes(lhs + 1, rhs));
1425  Assert(!TransactionIdPrecedes(rhs, lhs + 1));
1428  Assert(!ctl->PagePrecedes(lhs / per_page, lhs / per_page));
1429  Assert(!ctl->PagePrecedes(lhs / per_page, rhs / per_page));
1430  Assert(!ctl->PagePrecedes(rhs / per_page, lhs / per_page));
1431  Assert(!ctl->PagePrecedes((lhs - per_page) / per_page, rhs / per_page));
1432  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 3 * per_page) / per_page));
1433  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 2 * per_page) / per_page));
1434  Assert(ctl->PagePrecedes(rhs / per_page, (lhs - 1 * per_page) / per_page)
1435  || (1U << 31) % per_page != 0); /* See CommitTsPagePrecedes() */
1436  Assert(ctl->PagePrecedes((lhs + 1 * per_page) / per_page, rhs / per_page)
1437  || (1U << 31) % per_page != 0);
1438  Assert(ctl->PagePrecedes((lhs + 2 * per_page) / per_page, rhs / per_page));
1439  Assert(ctl->PagePrecedes((lhs + 3 * per_page) / per_page, rhs / per_page));
1440  Assert(!ctl->PagePrecedes(rhs / per_page, (lhs + per_page) / per_page));
1441 
1442  /*
1443  * GetNewTransactionId() has assigned the last XID it can safely use, and
1444  * that XID is in the *LAST* page of the second segment. We must not
1445  * delete that segment.
1446  */
1447  newestPage = 2 * SLRU_PAGES_PER_SEGMENT - 1;
1448  newestXact = newestPage * per_page + offset;
1449  Assert(newestXact / per_page == newestPage);
1450  oldestXact = newestXact + 1;
1451  oldestXact -= 1U << 31;
1452  oldestPage = oldestXact / per_page;
1454  (newestPage -
1455  newestPage % SLRU_PAGES_PER_SEGMENT),
1456  oldestPage));
1457 
1458  /*
1459  * GetNewTransactionId() has assigned the last XID it can safely use, and
1460  * that XID is in the *FIRST* page of the second segment. We must not
1461  * delete that segment.
1462  */
1463  newestPage = SLRU_PAGES_PER_SEGMENT;
1464  newestXact = newestPage * per_page + offset;
1465  Assert(newestXact / per_page == newestPage);
1466  oldestXact = newestXact + 1;
1467  oldestXact -= 1U << 31;
1468  oldestPage = oldestXact / per_page;
1470  (newestPage -
1471  newestPage % SLRU_PAGES_PER_SEGMENT),
1472  oldestPage));
1473 }
1474 
1475 /*
1476  * Unit-test a PagePrecedes function.
1477  *
1478  * This assumes every uint32 >= FirstNormalTransactionId is a valid key. It
1479  * assumes each value occupies a contiguous, fixed-size region of SLRU bytes.
1480  * (MultiXactMemberCtl separates flags from XIDs. AsyncCtl has
1481  * variable-length entries, no keys, and no random access. These unit tests
1482  * do not apply to them.)
1483  */
1484 void
1485 SlruPagePrecedesUnitTests(SlruCtl ctl, int per_page)
1486 {
1487  /* Test first, middle and last entries of a page. */
1488  SlruPagePrecedesTestOffset(ctl, per_page, 0);
1489  SlruPagePrecedesTestOffset(ctl, per_page, per_page / 2);
1490  SlruPagePrecedesTestOffset(ctl, per_page, per_page - 1);
1491 }
1492 #endif
1493 
1494 /*
1495  * SlruScanDirectory callback
1496  * This callback reports true if there's any segment wholly prior to the
1497  * one containing the page passed as "data".
1498  */
1499 bool
1500 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
1501 {
1502  int cutoffPage = *(int *) data;
1503 
1504  if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1505  return true; /* found one; don't iterate any more */
1506 
1507  return false; /* keep going */
1508 }
1509 
1510 /*
1511  * SlruScanDirectory callback.
1512  * This callback deletes segments prior to the one passed in as "data".
1513  */
1514 static bool
1515 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
1516 {
1517  int cutoffPage = *(int *) data;
1518 
1519  if (SlruMayDeleteSegment(ctl, segpage, cutoffPage))
1521 
1522  return false; /* keep going */
1523 }
1524 
1525 /*
1526  * SlruScanDirectory callback.
1527  * This callback deletes all segments.
1528  */
1529 bool
1530 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
1531 {
1533 
1534  return false; /* keep going */
1535 }
1536 
1537 /*
1538  * Scan the SimpleLru directory and apply a callback to each file found in it.
1539  *
1540  * If the callback returns true, the scan is stopped. The last return value
1541  * from the callback is returned.
1542  *
1543  * The callback receives the following arguments: 1. the SlruCtl struct for the
1544  * slru being truncated; 2. the filename being considered; 3. the page number
1545  * for the first page of that file; 4. a pointer to the opaque data given to us
1546  * by the caller.
1547  *
1548  * Note that the ordering in which the directory is scanned is not guaranteed.
1549  *
1550  * Note that no locking is applied.
1551  */
1552 bool
1554 {
1555  bool retval = false;
1556  DIR *cldir;
1557  struct dirent *clde;
1558  int segno;
1559  int segpage;
1560 
1561  cldir = AllocateDir(ctl->Dir);
1562  while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1563  {
1564  size_t len;
1565 
1566  len = strlen(clde->d_name);
1567 
1568  if ((len == 4 || len == 5 || len == 6) &&
1569  strspn(clde->d_name, "0123456789ABCDEF") == len)
1570  {
1571  segno = (int) strtol(clde->d_name, NULL, 16);
1572  segpage = segno * SLRU_PAGES_PER_SEGMENT;
1573 
1574  elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s",
1575  ctl->Dir, clde->d_name);
1576  retval = callback(ctl, clde->d_name, segpage, data);
1577  if (retval)
1578  break;
1579  }
1580  }
1581  FreeDir(cldir);
1582 
1583  return retval;
1584 }
1585 
1586 /*
1587  * Individual SLRUs (clog, ...) have to provide a sync.c handler function so
1588  * that they can provide the correct "SlruCtl" (otherwise we don't know how to
1589  * build the path), but they just forward to this common implementation that
1590  * performs the fsync.
1591  */
1592 int
1593 SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path)
1594 {
1595  int fd;
1596  int save_errno;
1597  int result;
1598 
1599  SlruFileName(ctl, path, ftag->segno);
1600 
1601  fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1602  if (fd < 0)
1603  return -1;
1604 
1605  result = pg_fsync(fd);
1606  save_errno = errno;
1607 
1608  CloseTransientFile(fd);
1609 
1610  errno = save_errno;
1611  return result;
1612 }
uint32 segno
Definition: sync.h:55
LWLock * ControlLock
Definition: slru.h:55
int * page_number
Definition: slru.h:67
Definition: lwlock.h:31
static void pgstat_report_wait_end(void)
Definition: wait_event.h:278
SlruPageStatus
Definition: slru.h:42
int SlruSyncFileTag(SlruCtl ctl, const FileTag *ftag, char *path)
Definition: slru.c:1593
uint32 TransactionId
Definition: c.h:587
#define INIT_SLRUFILETAG(a, xx_handler, xx_segno)
Definition: slru.c:88
bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1530
SyncRequestHandler
Definition: sync.h:35
int latest_page_number
Definition: slru.h:98
char ** page_buffer
Definition: slru.h:64
void SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
Definition: slru.c:1226
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
void fsync_fname(const char *fname, bool isdir)
Definition: fd.c:671
SlruErrorCause
Definition: slru.c:124
bool TransactionIdFollowsOrEquals(TransactionId id1, TransactionId id2)
Definition: transam.c:349
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define MemSet(start, val, len)
Definition: c.h:1008
static SlruErrorCause slru_errcause
Definition: slru.c:134
int cur_lru_count
Definition: slru.h:91
static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
Definition: slru.c:324
int lsn_groups_per_page
Definition: slru.h:80
void pgstat_count_slru_flush(int slru_idx)
Definition: pgstat.c:6032
#define LOG
Definition: elog.h:26
Definition: dirent.h:9
Size SimpleLruShmemSize(int nslots, int nlsns)
Definition: slru.c:156
void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, LWLock *ctllock, const char *subdir, int tranche_id, SyncRequestHandler sync_handler)
Definition: slru.c:187
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2860
static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:931
#define PG_BINARY
Definition: c.h:1271
ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, off_t offset)
Definition: pwrite.c:27
int pgstat_slru_index(const char *name)
Definition: pgstat.c:5945
ssize_t pg_pread(int fd, void *buf, size_t nbyte, off_t offset)
Definition: pread.c:27
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1803
int segno[MAX_WRITEALL_BUFFERS]
Definition: slru.c:78
int fd[MAX_WRITEALL_BUFFERS]
Definition: slru.c:77
SlruPageStatus * page_status
Definition: slru.h:65
Definition: dirent.c:25
#define ERROR
Definition: elog.h:46
int num_files
Definition: slru.c:76
int OpenTransientFile(const char *fileName, int fileFlags)
Definition: fd.c:2509
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:396
struct SlruWriteAllData SlruWriteAllData
#define MAX_WRITEALL_BUFFERS
Definition: slru.c:72
int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid)
Definition: slru.c:395
#define MAXPGPATH
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
static XLogRecPtr endpos
Definition: pg_receivewal.c:48
static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruWriteAll fdata)
Definition: slru.c:539
#define DEBUG2
Definition: elog.h:24
void pgstat_count_slru_page_zeroed(int slru_idx)
Definition: pgstat.c:6002
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1370
bool IsUnderPostmaster
Definition: globals.c:112
LWLockPadded * buffer_locks
Definition: slru.h:69
int errdetail(const char *fmt,...)
Definition: elog.c:1042
int errcode_for_file_access(void)
Definition: elog.c:721
void SimpleLruWriteAll(SlruCtl ctl, bool allow_redirtied)
Definition: slru.c:1156
#define InvalidTransactionId
Definition: transam.h:31
void pgstat_count_slru_page_read(int slru_idx)
Definition: pgstat.c:6020
unsigned int uint32
Definition: c.h:441
XLogRecPtr * group_lsn
Definition: slru.h:79
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2720
bool SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
Definition: slru.c:626
void SimpleLruWritePage(SlruCtl ctl, int slotno)
Definition: slru.c:614
static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruWriteAll fdata)
Definition: slru.c:756
static void SimpleLruWaitIO(SlruCtl ctl, int slotno)
Definition: slru.c:341
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:262
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:736
int CloseTransientFile(int fd)
Definition: fd.c:2686
int ckpt_bufs_written
Definition: xlog.h:227
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
void pgstat_count_slru_page_exists(int slru_idx)
Definition: pgstat.c:6014
bool(* SlruScanCallback)(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.h:161
SlruSharedData * SlruShared
Definition: slru.h:104
bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1500
struct SlruWriteAllData * SlruWriteAll
Definition: slru.c:81
static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1515
#define SlruFileName(ctl, path, seg)
Definition: slru.c:63
int data_sync_elevel(int elevel)
Definition: fd.c:3805
char Dir[64]
Definition: slru.h:136
int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:495
void pgstat_count_slru_truncate(int slru_idx)
Definition: pgstat.c:6038
bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOnError)
Definition: sync.c:553
#define ereport(elevel,...)
Definition: elog.h:157
LWLock lock
Definition: lwlock.h:57
static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
Definition: slru.c:684
static void SlruInternalDeleteSegment(SlruCtl ctl, int segno)
Definition: slru.c:1304
bool InRecovery
Definition: xlogutils.c:52
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition: strlcpy.c:45
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
int * page_lru_count
Definition: slru.h:68
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
Definition: slru.c:1553
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2786
CheckpointStatsData CheckpointStats
Definition: xlog.c:188
size_t Size
Definition: c.h:540
void pgstat_count_slru_page_hit(int slru_idx)
Definition: pgstat.c:6008
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1199
#define MAXALIGN(LEN)
Definition: c.h:757
SyncRequestHandler sync_handler
Definition: slru.h:118
int slru_stats_idx
Definition: slru.h:101
int num_slots
Definition: slru.h:58
const char * name
Definition: encode.c:561
static int SlruSelectLRUPage(SlruCtl ctl, int pageno)
Definition: slru.c:1016
static bool SlruMayDeleteSegment(SlruCtl ctl, int segpage, int cutoffPage)
Definition: slru.c:1391
static int slru_errno
Definition: slru.c:135
static char * filename
Definition: pg_dumpall.c:92
int errmsg(const char *fmt,...)
Definition: elog.c:909
bool * page_dirty
Definition: slru.h:66
#define elog(elevel,...)
Definition: elog.h:232
int i
SlruShared shared
Definition: slru.h:112
#define SlruPagePrecedesUnitTests(ctl, per_page)
Definition: slru.h:156
#define BUFFERALIGN(LEN)
Definition: c.h:759
bool(* PagePrecedes)(int, int)
Definition: slru.h:130
void SlruDeleteSegment(SlruCtl ctl, int segno)
Definition: slru.c:1327
int pg_fsync(int fd)
Definition: fd.c:357
char d_name[MAX_PATH]
Definition: dirent.h:15
#define SLRU_PAGES_PER_SEGMENT
Definition: slru.h:34
int SimpleLruZeroPage(SlruCtl ctl, int pageno)
Definition: slru.c:280
Definition: sync.h:50
int FreeDir(DIR *dir)
Definition: fd.c:2838
#define SlruRecentlyUsed(shared, slotno)
Definition: slru.c:114
void pgstat_count_slru_page_written(int slru_idx)
Definition: pgstat.c:6026