PostgreSQL Source Code  git master
slru.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * slru.c
4  * Simple LRU buffering for transaction status logfiles
5  *
6  * We use a simple least-recently-used scheme to manage a pool of page
7  * buffers. Under ordinary circumstances we expect that write
8  * traffic will occur mostly to the latest page (and to the just-prior
9  * page, soon after a page transition). Read traffic will probably touch
10  * a larger span of pages, but in any case a fairly small number of page
11  * buffers should be sufficient. So, we just search the buffers using plain
12  * linear search; there's no need for a hashtable or anything fancy.
13  * The management algorithm is straight LRU except that we will never swap
14  * out the latest page (since we know it's going to be hit again eventually).
15  *
16  * We use a control LWLock to protect the shared data structures, plus
17  * per-buffer LWLocks that synchronize I/O for each buffer. The control lock
18  * must be held to examine or modify any shared state. A process that is
19  * reading in or writing out a page buffer does not hold the control lock,
20  * only the per-buffer lock for the buffer it is working on.
21  *
22  * "Holding the control lock" means exclusive lock in all cases except for
23  * SimpleLruReadPage_ReadOnly(); see comments for SlruRecentlyUsed() for
24  * the implications of that.
25  *
26  * When initiating I/O on a buffer, we acquire the per-buffer lock exclusively
27  * before releasing the control lock. The per-buffer lock is released after
28  * completing the I/O, re-acquiring the control lock, and updating the shared
29  * state. (Deadlock is not possible here, because we never try to initiate
30  * I/O when someone else is already doing I/O on the same buffer.)
31  * To wait for I/O to complete, release the control lock, acquire the
32  * per-buffer lock in shared mode, immediately release the per-buffer lock,
33  * reacquire the control lock, and then recheck state (since arbitrary things
34  * could have happened while we didn't have the lock).
35  *
36  * As with the regular buffer manager, it is possible for another process
37  * to re-dirty a page that is currently being written out. This is handled
38  * by re-setting the page's page_dirty flag.
39  *
40  *
41  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
42  * Portions Copyright (c) 1994, Regents of the University of California
43  *
44  * src/backend/access/transam/slru.c
45  *
46  *-------------------------------------------------------------------------
47  */
48 #include "postgres.h"
49 
50 #include <fcntl.h>
51 #include <sys/stat.h>
52 #include <unistd.h>
53 
54 #include "access/slru.h"
55 #include "access/transam.h"
56 #include "access/xlog.h"
57 #include "miscadmin.h"
58 #include "pgstat.h"
59 #include "storage/fd.h"
60 #include "storage/shmem.h"
61 
62 #define SlruFileName(ctl, path, seg) \
63  snprintf(path, MAXPGPATH, "%s/%04X", (ctl)->Dir, seg)
64 
65 /*
66  * During SimpleLruFlush(), we will usually not need to write/fsync more
67  * than one or two physical files, but we may need to write several pages
68  * per file. We can consolidate the I/O requests by leaving files open
69  * until control returns to SimpleLruFlush(). This data structure remembers
70  * which files are open.
71  */
72 #define MAX_FLUSH_BUFFERS 16
73 
74 typedef struct SlruFlushData
75 {
76  int num_files; /* # files actually open */
77  int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
78  int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
80 
81 typedef struct SlruFlushData *SlruFlush;
82 
83 /*
84  * Macro to mark a buffer slot "most recently used". Note multiple evaluation
85  * of arguments!
86  *
87  * The reason for the if-test is that there are often many consecutive
88  * accesses to the same page (particularly the latest page). By suppressing
89  * useless increments of cur_lru_count, we reduce the probability that old
90  * pages' counts will "wrap around" and make them appear recently used.
91  *
92  * We allow this code to be executed concurrently by multiple processes within
93  * SimpleLruReadPage_ReadOnly(). As long as int reads and writes are atomic,
94  * this should not cause any completely-bogus values to enter the computation.
95  * However, it is possible for either cur_lru_count or individual
96  * page_lru_count entries to be "reset" to lower values than they should have,
97  * in case a process is delayed while it executes this macro. With care in
98  * SlruSelectLRUPage(), this does little harm, and in any case the absolute
99  * worst possible consequence is a nonoptimal choice of page to evict. The
100  * gain from allowing concurrent reads of SLRU pages seems worth it.
101  */
102 #define SlruRecentlyUsed(shared, slotno) \
103  do { \
104  int new_lru_count = (shared)->cur_lru_count; \
105  if (new_lru_count != (shared)->page_lru_count[slotno]) { \
106  (shared)->cur_lru_count = ++new_lru_count; \
107  (shared)->page_lru_count[slotno] = new_lru_count; \
108  } \
109  } while (0)
110 
111 /* Saved info for SlruReportIOError */
112 typedef enum
113 {
121 
123 static int slru_errno;
124 
125 
126 static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno);
127 static void SimpleLruWaitIO(SlruCtl ctl, int slotno);
128 static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata);
129 static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
130 static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
131  SlruFlush fdata);
132 static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
133 static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
134 
135 static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename,
136  int segpage, void *data);
137 static void SlruInternalDeleteSegment(SlruCtl ctl, char *filename);
138 
139 /*
140  * Initialization of shared memory
141  */
142 
143 Size
144 SimpleLruShmemSize(int nslots, int nlsns)
145 {
146  Size sz;
147 
148  /* we assume nslots isn't so large as to risk overflow */
149  sz = MAXALIGN(sizeof(SlruSharedData));
150  sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
151  sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
152  sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
153  sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
154  sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
155  sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
156 
157  if (nlsns > 0)
158  sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
159 
160  return BUFFERALIGN(sz) + BLCKSZ * nslots;
161 }
162 
163 /*
164  * Initialize, or attach to, a simple LRU cache in shared memory.
165  *
166  * ctl: address of local (unshared) control structure.
167  * name: name of SLRU. (This is user-visible, pick with care!)
168  * nslots: number of page slots to use.
169  * nlsns: number of LSN groups per page (set to zero if not relevant).
170  * ctllock: LWLock to use to control access to the shared control structure.
171  * subdir: PGDATA-relative subdirectory that will contain the files.
172  * tranche_id: LWLock tranche ID to use for the SLRU's per-buffer LWLocks.
173  */
174 void
175 SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns,
176  LWLock *ctllock, const char *subdir, int tranche_id)
177 {
178  SlruShared shared;
179  bool found;
180 
181  shared = (SlruShared) ShmemInitStruct(name,
182  SimpleLruShmemSize(nslots, nlsns),
183  &found);
184 
185  if (!IsUnderPostmaster)
186  {
187  /* Initialize locks and shared memory area */
188  char *ptr;
189  Size offset;
190  int slotno;
191 
192  Assert(!found);
193 
194  memset(shared, 0, sizeof(SlruSharedData));
195 
196  shared->ControlLock = ctllock;
197 
198  shared->num_slots = nslots;
199  shared->lsn_groups_per_page = nlsns;
200 
201  shared->cur_lru_count = 0;
202 
203  /* shared->latest_page_number will be set later */
204 
205  shared->slru_stats_idx = pgstat_slru_index(name);
206 
207  ptr = (char *) shared;
208  offset = MAXALIGN(sizeof(SlruSharedData));
209  shared->page_buffer = (char **) (ptr + offset);
210  offset += MAXALIGN(nslots * sizeof(char *));
211  shared->page_status = (SlruPageStatus *) (ptr + offset);
212  offset += MAXALIGN(nslots * sizeof(SlruPageStatus));
213  shared->page_dirty = (bool *) (ptr + offset);
214  offset += MAXALIGN(nslots * sizeof(bool));
215  shared->page_number = (int *) (ptr + offset);
216  offset += MAXALIGN(nslots * sizeof(int));
217  shared->page_lru_count = (int *) (ptr + offset);
218  offset += MAXALIGN(nslots * sizeof(int));
219 
220  /* Initialize LWLocks */
221  shared->buffer_locks = (LWLockPadded *) (ptr + offset);
222  offset += MAXALIGN(nslots * sizeof(LWLockPadded));
223 
224  if (nlsns > 0)
225  {
226  shared->group_lsn = (XLogRecPtr *) (ptr + offset);
227  offset += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr));
228  }
229 
230  ptr += BUFFERALIGN(offset);
231  for (slotno = 0; slotno < nslots; slotno++)
232  {
233  LWLockInitialize(&shared->buffer_locks[slotno].lock,
234  tranche_id);
235 
236  shared->page_buffer[slotno] = ptr;
237  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
238  shared->page_dirty[slotno] = false;
239  shared->page_lru_count[slotno] = 0;
240  ptr += BLCKSZ;
241  }
242 
243  /* Should fit to estimated shmem size */
244  Assert(ptr - (char *) shared <= SimpleLruShmemSize(nslots, nlsns));
245  }
246  else
247  Assert(found);
248 
249  /*
250  * Initialize the unshared control struct, including directory path. We
251  * assume caller set PagePrecedes.
252  */
253  ctl->shared = shared;
254  ctl->do_fsync = true; /* default behavior */
255  StrNCpy(ctl->Dir, subdir, sizeof(ctl->Dir));
256 }
257 
258 /*
259  * Initialize (or reinitialize) a page to zeroes.
260  *
261  * The page is not actually written, just set up in shared memory.
262  * The slot number of the new page is returned.
263  *
264  * Control lock must be held at entry, and will be held at exit.
265  */
266 int
267 SimpleLruZeroPage(SlruCtl ctl, int pageno)
268 {
269  SlruShared shared = ctl->shared;
270  int slotno;
271 
272  /* Find a suitable buffer slot for the page */
273  slotno = SlruSelectLRUPage(ctl, pageno);
274  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
275  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
276  !shared->page_dirty[slotno]) ||
277  shared->page_number[slotno] == pageno);
278 
279  /* Mark the slot as containing this page */
280  shared->page_number[slotno] = pageno;
281  shared->page_status[slotno] = SLRU_PAGE_VALID;
282  shared->page_dirty[slotno] = true;
283  SlruRecentlyUsed(shared, slotno);
284 
285  /* Set the buffer to zeroes */
286  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
287 
288  /* Set the LSNs for this new page to zero */
289  SimpleLruZeroLSNs(ctl, slotno);
290 
291  /* Assume this page is now the latest active page */
292  shared->latest_page_number = pageno;
293 
294  /* update the stats counter of zeroed pages */
296 
297  return slotno;
298 }
299 
300 /*
301  * Zero all the LSNs we store for this slru page.
302  *
303  * This should be called each time we create a new page, and each time we read
304  * in a page from disk into an existing buffer. (Such an old page cannot
305  * have any interesting LSNs, since we'd have flushed them before writing
306  * the page in the first place.)
307  *
308  * This assumes that InvalidXLogRecPtr is bitwise-all-0.
309  */
310 static void
311 SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
312 {
313  SlruShared shared = ctl->shared;
314 
315  if (shared->lsn_groups_per_page > 0)
316  MemSet(&shared->group_lsn[slotno * shared->lsn_groups_per_page], 0,
317  shared->lsn_groups_per_page * sizeof(XLogRecPtr));
318 }
319 
320 /*
321  * Wait for any active I/O on a page slot to finish. (This does not
322  * guarantee that new I/O hasn't been started before we return, though.
323  * In fact the slot might not even contain the same page anymore.)
324  *
325  * Control lock must be held at entry, and will be held at exit.
326  */
327 static void
328 SimpleLruWaitIO(SlruCtl ctl, int slotno)
329 {
330  SlruShared shared = ctl->shared;
331 
332  /* See notes at top of file */
333  LWLockRelease(shared->ControlLock);
334  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED);
335  LWLockRelease(&shared->buffer_locks[slotno].lock);
337 
338  /*
339  * If the slot is still in an io-in-progress state, then either someone
340  * already started a new I/O on the slot, or a previous I/O failed and
341  * neglected to reset the page state. That shouldn't happen, really, but
342  * it seems worth a few extra cycles to check and recover from it. We can
343  * cheaply test for failure by seeing if the buffer lock is still held (we
344  * assume that transaction abort would release the lock).
345  */
346  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
347  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
348  {
349  if (LWLockConditionalAcquire(&shared->buffer_locks[slotno].lock, LW_SHARED))
350  {
351  /* indeed, the I/O must have failed */
352  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
353  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
354  else /* write_in_progress */
355  {
356  shared->page_status[slotno] = SLRU_PAGE_VALID;
357  shared->page_dirty[slotno] = true;
358  }
359  LWLockRelease(&shared->buffer_locks[slotno].lock);
360  }
361  }
362 }
363 
364 /*
365  * Find a page in a shared buffer, reading it in if necessary.
366  * The page number must correspond to an already-initialized page.
367  *
368  * If write_ok is true then it is OK to return a page that is in
369  * WRITE_IN_PROGRESS state; it is the caller's responsibility to be sure
370  * that modification of the page is safe. If write_ok is false then we
371  * will not return the page until it is not undergoing active I/O.
372  *
373  * The passed-in xid is used only for error reporting, and may be
374  * InvalidTransactionId if no specific xid is associated with the action.
375  *
376  * Return value is the shared-buffer slot number now holding the page.
377  * The buffer's LRU access info is updated.
378  *
379  * Control lock must be held at entry, and will be held at exit.
380  */
381 int
382 SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok,
383  TransactionId xid)
384 {
385  SlruShared shared = ctl->shared;
386 
387  /* Outer loop handles restart if we must wait for someone else's I/O */
388  for (;;)
389  {
390  int slotno;
391  bool ok;
392 
393  /* See if page already is in memory; if not, pick victim slot */
394  slotno = SlruSelectLRUPage(ctl, pageno);
395 
396  /* Did we find the page in memory? */
397  if (shared->page_number[slotno] == pageno &&
398  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
399  {
400  /*
401  * If page is still being read in, we must wait for I/O. Likewise
402  * if the page is being written and the caller said that's not OK.
403  */
404  if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
405  (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
406  !write_ok))
407  {
408  SimpleLruWaitIO(ctl, slotno);
409  /* Now we must recheck state from the top */
410  continue;
411  }
412  /* Otherwise, it's ready to use */
413  SlruRecentlyUsed(shared, slotno);
414 
415  /* update the stats counter of pages found in the SLRU */
417 
418  return slotno;
419  }
420 
421  /* We found no match; assert we selected a freeable slot */
422  Assert(shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
423  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
424  !shared->page_dirty[slotno]));
425 
426  /* Mark the slot read-busy */
427  shared->page_number[slotno] = pageno;
428  shared->page_status[slotno] = SLRU_PAGE_READ_IN_PROGRESS;
429  shared->page_dirty[slotno] = false;
430 
431  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
432  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
433 
434  /* Release control lock while doing I/O */
435  LWLockRelease(shared->ControlLock);
436 
437  /* Do the read */
438  ok = SlruPhysicalReadPage(ctl, pageno, slotno);
439 
440  /* Set the LSNs for this newly read-in page to zero */
441  SimpleLruZeroLSNs(ctl, slotno);
442 
443  /* Re-acquire control lock and update page state */
445 
446  Assert(shared->page_number[slotno] == pageno &&
447  shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS &&
448  !shared->page_dirty[slotno]);
449 
450  shared->page_status[slotno] = ok ? SLRU_PAGE_VALID : SLRU_PAGE_EMPTY;
451 
452  LWLockRelease(&shared->buffer_locks[slotno].lock);
453 
454  /* Now it's okay to ereport if we failed */
455  if (!ok)
456  SlruReportIOError(ctl, pageno, xid);
457 
458  SlruRecentlyUsed(shared, slotno);
459 
460  /* update the stats counter of pages not found in SLRU */
462 
463  return slotno;
464  }
465 }
466 
467 /*
468  * Find a page in a shared buffer, reading it in if necessary.
469  * The page number must correspond to an already-initialized page.
470  * The caller must intend only read-only access to the page.
471  *
472  * The passed-in xid is used only for error reporting, and may be
473  * InvalidTransactionId if no specific xid is associated with the action.
474  *
475  * Return value is the shared-buffer slot number now holding the page.
476  * The buffer's LRU access info is updated.
477  *
478  * Control lock must NOT be held at entry, but will be held at exit.
479  * It is unspecified whether the lock will be shared or exclusive.
480  */
481 int
483 {
484  SlruShared shared = ctl->shared;
485  int slotno;
486 
487  /* Try to find the page while holding only shared lock */
489 
490  /* See if page is already in a buffer */
491  for (slotno = 0; slotno < shared->num_slots; slotno++)
492  {
493  if (shared->page_number[slotno] == pageno &&
494  shared->page_status[slotno] != SLRU_PAGE_EMPTY &&
495  shared->page_status[slotno] != SLRU_PAGE_READ_IN_PROGRESS)
496  {
497  /* See comments for SlruRecentlyUsed macro */
498  SlruRecentlyUsed(shared, slotno);
499 
500  /* update the stats counter of pages found in the SLRU */
502 
503  return slotno;
504  }
505  }
506 
507  /* No luck, so switch to normal exclusive lock and do regular read */
508  LWLockRelease(shared->ControlLock);
510 
511  return SimpleLruReadPage(ctl, pageno, true, xid);
512 }
513 
514 /*
515  * Write a page from a shared buffer, if necessary.
516  * Does nothing if the specified slot is not dirty.
517  *
518  * NOTE: only one write attempt is made here. Hence, it is possible that
519  * the page is still dirty at exit (if someone else re-dirtied it during
520  * the write). However, we *do* attempt a fresh write even if the page
521  * is already being written; this is for checkpoints.
522  *
523  * Control lock must be held at entry, and will be held at exit.
524  */
525 static void
526 SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
527 {
528  SlruShared shared = ctl->shared;
529  int pageno = shared->page_number[slotno];
530  bool ok;
531 
532  /* If a write is in progress, wait for it to finish */
533  while (shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS &&
534  shared->page_number[slotno] == pageno)
535  {
536  SimpleLruWaitIO(ctl, slotno);
537  }
538 
539  /*
540  * Do nothing if page is not dirty, or if buffer no longer contains the
541  * same page we were called for.
542  */
543  if (!shared->page_dirty[slotno] ||
544  shared->page_status[slotno] != SLRU_PAGE_VALID ||
545  shared->page_number[slotno] != pageno)
546  return;
547 
548  /*
549  * Mark the slot write-busy, and clear the dirtybit. After this point, a
550  * transaction status update on this page will mark it dirty again.
551  */
552  shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
553  shared->page_dirty[slotno] = false;
554 
555  /* Acquire per-buffer lock (cannot deadlock, see notes at top) */
556  LWLockAcquire(&shared->buffer_locks[slotno].lock, LW_EXCLUSIVE);
557 
558  /* Release control lock while doing I/O */
559  LWLockRelease(shared->ControlLock);
560 
561  /* Do the write */
562  ok = SlruPhysicalWritePage(ctl, pageno, slotno, fdata);
563 
564  /* If we failed, and we're in a flush, better close the files */
565  if (!ok && fdata)
566  {
567  int i;
568 
569  for (i = 0; i < fdata->num_files; i++)
570  CloseTransientFile(fdata->fd[i]);
571  }
572 
573  /* Re-acquire control lock and update page state */
575 
576  Assert(shared->page_number[slotno] == pageno &&
577  shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS);
578 
579  /* If we failed to write, mark the page dirty again */
580  if (!ok)
581  shared->page_dirty[slotno] = true;
582 
583  shared->page_status[slotno] = SLRU_PAGE_VALID;
584 
585  LWLockRelease(&shared->buffer_locks[slotno].lock);
586 
587  /* Now it's okay to ereport if we failed */
588  if (!ok)
590 }
591 
592 /*
593  * Wrapper of SlruInternalWritePage, for external callers.
594  * fdata is always passed a NULL here.
595  */
596 void
597 SimpleLruWritePage(SlruCtl ctl, int slotno)
598 {
599  SlruInternalWritePage(ctl, slotno, NULL);
600 }
601 
602 /*
603  * Return whether the given page exists on disk.
604  *
605  * A false return means that either the file does not exist, or that it's not
606  * large enough to contain the given page.
607  */
608 bool
610 {
611  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
612  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
613  int offset = rpageno * BLCKSZ;
614  char path[MAXPGPATH];
615  int fd;
616  bool result;
617  off_t endpos;
618 
619  /* update the stats counter of checked pages */
621 
622  SlruFileName(ctl, path, segno);
623 
624  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
625  if (fd < 0)
626  {
627  /* expected: file doesn't exist */
628  if (errno == ENOENT)
629  return false;
630 
631  /* report error normally */
633  slru_errno = errno;
634  SlruReportIOError(ctl, pageno, 0);
635  }
636 
637  if ((endpos = lseek(fd, 0, SEEK_END)) < 0)
638  {
640  slru_errno = errno;
641  SlruReportIOError(ctl, pageno, 0);
642  }
643 
644  result = endpos >= (off_t) (offset + BLCKSZ);
645 
646  if (CloseTransientFile(fd) != 0)
647  {
649  slru_errno = errno;
650  return false;
651  }
652 
653  return result;
654 }
655 
656 /*
657  * Physical read of a (previously existing) page into a buffer slot
658  *
659  * On failure, we cannot just ereport(ERROR) since caller has put state in
660  * shared memory that must be undone. So, we return false and save enough
661  * info in static variables to let SlruReportIOError make the report.
662  *
663  * For now, assume it's not worth keeping a file pointer open across
664  * read/write operations. We could cache one virtual file pointer ...
665  */
666 static bool
667 SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
668 {
669  SlruShared shared = ctl->shared;
670  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
671  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
672  off_t offset = rpageno * BLCKSZ;
673  char path[MAXPGPATH];
674  int fd;
675 
676  SlruFileName(ctl, path, segno);
677 
678  /*
679  * In a crash-and-restart situation, it's possible for us to receive
680  * commands to set the commit status of transactions whose bits are in
681  * already-truncated segments of the commit log (see notes in
682  * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case
683  * where the file doesn't exist, and return zeroes instead.
684  */
685  fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
686  if (fd < 0)
687  {
688  if (errno != ENOENT || !InRecovery)
689  {
691  slru_errno = errno;
692  return false;
693  }
694 
695  ereport(LOG,
696  (errmsg("file \"%s\" doesn't exist, reading as zeroes",
697  path)));
698  MemSet(shared->page_buffer[slotno], 0, BLCKSZ);
699  return true;
700  }
701 
702  errno = 0;
704  if (pg_pread(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
705  {
708  slru_errno = errno;
709  CloseTransientFile(fd);
710  return false;
711  }
713 
714  if (CloseTransientFile(fd) != 0)
715  {
717  slru_errno = errno;
718  return false;
719  }
720 
721  return true;
722 }
723 
724 /*
725  * Physical write of a page from a buffer slot
726  *
727  * On failure, we cannot just ereport(ERROR) since caller has put state in
728  * shared memory that must be undone. So, we return false and save enough
729  * info in static variables to let SlruReportIOError make the report.
730  *
731  * For now, assume it's not worth keeping a file pointer open across
732  * independent read/write operations. We do batch operations during
733  * SimpleLruFlush, though.
734  *
735  * fdata is NULL for a standalone write, pointer to open-file info during
736  * SimpleLruFlush.
737  */
738 static bool
739 SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
740 {
741  SlruShared shared = ctl->shared;
742  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
743  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
744  off_t offset = rpageno * BLCKSZ;
745  char path[MAXPGPATH];
746  int fd = -1;
747 
748  /* update the stats counter of written pages */
750 
751  /*
752  * Honor the write-WAL-before-data rule, if appropriate, so that we do not
753  * write out data before associated WAL records. This is the same action
754  * performed during FlushBuffer() in the main buffer manager.
755  */
756  if (shared->group_lsn != NULL)
757  {
758  /*
759  * We must determine the largest async-commit LSN for the page. This
760  * is a bit tedious, but since this entire function is a slow path
761  * anyway, it seems better to do this here than to maintain a per-page
762  * LSN variable (which'd need an extra comparison in the
763  * transaction-commit path).
764  */
765  XLogRecPtr max_lsn;
766  int lsnindex,
767  lsnoff;
768 
769  lsnindex = slotno * shared->lsn_groups_per_page;
770  max_lsn = shared->group_lsn[lsnindex++];
771  for (lsnoff = 1; lsnoff < shared->lsn_groups_per_page; lsnoff++)
772  {
773  XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
774 
775  if (max_lsn < this_lsn)
776  max_lsn = this_lsn;
777  }
778 
779  if (!XLogRecPtrIsInvalid(max_lsn))
780  {
781  /*
782  * As noted above, elog(ERROR) is not acceptable here, so if
783  * XLogFlush were to fail, we must PANIC. This isn't much of a
784  * restriction because XLogFlush is just about all critical
785  * section anyway, but let's make sure.
786  */
788  XLogFlush(max_lsn);
790  }
791  }
792 
793  /*
794  * During a Flush, we may already have the desired file open.
795  */
796  if (fdata)
797  {
798  int i;
799 
800  for (i = 0; i < fdata->num_files; i++)
801  {
802  if (fdata->segno[i] == segno)
803  {
804  fd = fdata->fd[i];
805  break;
806  }
807  }
808  }
809 
810  if (fd < 0)
811  {
812  /*
813  * If the file doesn't already exist, we should create it. It is
814  * possible for this to need to happen when writing a page that's not
815  * first in its segment; we assume the OS can cope with that. (Note:
816  * it might seem that it'd be okay to create files only when
817  * SimpleLruZeroPage is called for the first page of a segment.
818  * However, if after a crash and restart the REDO logic elects to
819  * replay the log from a checkpoint before the latest one, then it's
820  * possible that we will get commands to set transaction status of
821  * transactions that have already been truncated from the commit log.
822  * Easiest way to deal with that is to accept references to
823  * nonexistent files here and in SlruPhysicalReadPage.)
824  *
825  * Note: it is possible for more than one backend to be executing this
826  * code simultaneously for different pages of the same file. Hence,
827  * don't use O_EXCL or O_TRUNC or anything like that.
828  */
829  SlruFileName(ctl, path, segno);
830  fd = OpenTransientFile(path, O_RDWR | O_CREAT | PG_BINARY);
831  if (fd < 0)
832  {
834  slru_errno = errno;
835  return false;
836  }
837 
838  if (fdata)
839  {
840  if (fdata->num_files < MAX_FLUSH_BUFFERS)
841  {
842  fdata->fd[fdata->num_files] = fd;
843  fdata->segno[fdata->num_files] = segno;
844  fdata->num_files++;
845  }
846  else
847  {
848  /*
849  * In the unlikely event that we exceed MAX_FLUSH_BUFFERS,
850  * fall back to treating it as a standalone write.
851  */
852  fdata = NULL;
853  }
854  }
855  }
856 
857  errno = 0;
859  if (pg_pwrite(fd, shared->page_buffer[slotno], BLCKSZ, offset) != BLCKSZ)
860  {
862  /* if write didn't set errno, assume problem is no disk space */
863  if (errno == 0)
864  errno = ENOSPC;
866  slru_errno = errno;
867  if (!fdata)
868  CloseTransientFile(fd);
869  return false;
870  }
872 
873  /*
874  * If not part of Flush, need to fsync now. We assume this happens
875  * infrequently enough that it's not a performance issue.
876  */
877  if (!fdata)
878  {
880  if (ctl->do_fsync && pg_fsync(fd) != 0)
881  {
884  slru_errno = errno;
885  CloseTransientFile(fd);
886  return false;
887  }
889 
890  if (CloseTransientFile(fd) != 0)
891  {
893  slru_errno = errno;
894  return false;
895  }
896  }
897 
898  return true;
899 }
900 
901 /*
902  * Issue the error message after failure of SlruPhysicalReadPage or
903  * SlruPhysicalWritePage. Call this after cleaning up shared-memory state.
904  */
905 static void
907 {
908  int segno = pageno / SLRU_PAGES_PER_SEGMENT;
909  int rpageno = pageno % SLRU_PAGES_PER_SEGMENT;
910  int offset = rpageno * BLCKSZ;
911  char path[MAXPGPATH];
912 
913  SlruFileName(ctl, path, segno);
914  errno = slru_errno;
915  switch (slru_errcause)
916  {
917  case SLRU_OPEN_FAILED:
918  ereport(ERROR,
920  errmsg("could not access status of transaction %u", xid),
921  errdetail("Could not open file \"%s\": %m.", path)));
922  break;
923  case SLRU_SEEK_FAILED:
924  ereport(ERROR,
926  errmsg("could not access status of transaction %u", xid),
927  errdetail("Could not seek in file \"%s\" to offset %u: %m.",
928  path, offset)));
929  break;
930  case SLRU_READ_FAILED:
931  if (errno)
932  ereport(ERROR,
934  errmsg("could not access status of transaction %u", xid),
935  errdetail("Could not read from file \"%s\" at offset %u: %m.",
936  path, offset)));
937  else
938  ereport(ERROR,
939  (errmsg("could not access status of transaction %u", xid),
940  errdetail("Could not read from file \"%s\" at offset %u: read too few bytes.", path, offset)));
941  break;
942  case SLRU_WRITE_FAILED:
943  if (errno)
944  ereport(ERROR,
946  errmsg("could not access status of transaction %u", xid),
947  errdetail("Could not write to file \"%s\" at offset %u: %m.",
948  path, offset)));
949  else
950  ereport(ERROR,
951  (errmsg("could not access status of transaction %u", xid),
952  errdetail("Could not write to file \"%s\" at offset %u: wrote too few bytes.",
953  path, offset)));
954  break;
955  case SLRU_FSYNC_FAILED:
958  errmsg("could not access status of transaction %u", xid),
959  errdetail("Could not fsync file \"%s\": %m.",
960  path)));
961  break;
962  case SLRU_CLOSE_FAILED:
963  ereport(ERROR,
965  errmsg("could not access status of transaction %u", xid),
966  errdetail("Could not close file \"%s\": %m.",
967  path)));
968  break;
969  default:
970  /* can't get here, we trust */
971  elog(ERROR, "unrecognized SimpleLru error cause: %d",
972  (int) slru_errcause);
973  break;
974  }
975 }
976 
977 /*
978  * Select the slot to re-use when we need a free slot.
979  *
980  * The target page number is passed because we need to consider the
981  * possibility that some other process reads in the target page while
982  * we are doing I/O to free a slot. Hence, check or recheck to see if
983  * any slot already holds the target page, and return that slot if so.
984  * Thus, the returned slot is *either* a slot already holding the pageno
985  * (could be any state except EMPTY), *or* a freeable slot (state EMPTY
986  * or CLEAN).
987  *
988  * Control lock must be held at entry, and will be held at exit.
989  */
990 static int
991 SlruSelectLRUPage(SlruCtl ctl, int pageno)
992 {
993  SlruShared shared = ctl->shared;
994 
995  /* Outer loop handles restart after I/O */
996  for (;;)
997  {
998  int slotno;
999  int cur_count;
1000  int bestvalidslot = 0; /* keep compiler quiet */
1001  int best_valid_delta = -1;
1002  int best_valid_page_number = 0; /* keep compiler quiet */
1003  int bestinvalidslot = 0; /* keep compiler quiet */
1004  int best_invalid_delta = -1;
1005  int best_invalid_page_number = 0; /* keep compiler quiet */
1006 
1007  /* See if page already has a buffer assigned */
1008  for (slotno = 0; slotno < shared->num_slots; slotno++)
1009  {
1010  if (shared->page_number[slotno] == pageno &&
1011  shared->page_status[slotno] != SLRU_PAGE_EMPTY)
1012  return slotno;
1013  }
1014 
1015  /*
1016  * If we find any EMPTY slot, just select that one. Else choose a
1017  * victim page to replace. We normally take the least recently used
1018  * valid page, but we will never take the slot containing
1019  * latest_page_number, even if it appears least recently used. We
1020  * will select a slot that is already I/O busy only if there is no
1021  * other choice: a read-busy slot will not be least recently used once
1022  * the read finishes, and waiting for an I/O on a write-busy slot is
1023  * inferior to just picking some other slot. Testing shows the slot
1024  * we pick instead will often be clean, allowing us to begin a read at
1025  * once.
1026  *
1027  * Normally the page_lru_count values will all be different and so
1028  * there will be a well-defined LRU page. But since we allow
1029  * concurrent execution of SlruRecentlyUsed() within
1030  * SimpleLruReadPage_ReadOnly(), it is possible that multiple pages
1031  * acquire the same lru_count values. In that case we break ties by
1032  * choosing the furthest-back page.
1033  *
1034  * Notice that this next line forcibly advances cur_lru_count to a
1035  * value that is certainly beyond any value that will be in the
1036  * page_lru_count array after the loop finishes. This ensures that
1037  * the next execution of SlruRecentlyUsed will mark the page newly
1038  * used, even if it's for a page that has the current counter value.
1039  * That gets us back on the path to having good data when there are
1040  * multiple pages with the same lru_count.
1041  */
1042  cur_count = (shared->cur_lru_count)++;
1043  for (slotno = 0; slotno < shared->num_slots; slotno++)
1044  {
1045  int this_delta;
1046  int this_page_number;
1047 
1048  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1049  return slotno;
1050  this_delta = cur_count - shared->page_lru_count[slotno];
1051  if (this_delta < 0)
1052  {
1053  /*
1054  * Clean up in case shared updates have caused cur_count
1055  * increments to get "lost". We back off the page counts,
1056  * rather than trying to increase cur_count, to avoid any
1057  * question of infinite loops or failure in the presence of
1058  * wrapped-around counts.
1059  */
1060  shared->page_lru_count[slotno] = cur_count;
1061  this_delta = 0;
1062  }
1063  this_page_number = shared->page_number[slotno];
1064  if (this_page_number == shared->latest_page_number)
1065  continue;
1066  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1067  {
1068  if (this_delta > best_valid_delta ||
1069  (this_delta == best_valid_delta &&
1070  ctl->PagePrecedes(this_page_number,
1071  best_valid_page_number)))
1072  {
1073  bestvalidslot = slotno;
1074  best_valid_delta = this_delta;
1075  best_valid_page_number = this_page_number;
1076  }
1077  }
1078  else
1079  {
1080  if (this_delta > best_invalid_delta ||
1081  (this_delta == best_invalid_delta &&
1082  ctl->PagePrecedes(this_page_number,
1083  best_invalid_page_number)))
1084  {
1085  bestinvalidslot = slotno;
1086  best_invalid_delta = this_delta;
1087  best_invalid_page_number = this_page_number;
1088  }
1089  }
1090  }
1091 
1092  /*
1093  * If all pages (except possibly the latest one) are I/O busy, we'll
1094  * have to wait for an I/O to complete and then retry. In that
1095  * unhappy case, we choose to wait for the I/O on the least recently
1096  * used slot, on the assumption that it was likely initiated first of
1097  * all the I/Os in progress and may therefore finish first.
1098  */
1099  if (best_valid_delta < 0)
1100  {
1101  SimpleLruWaitIO(ctl, bestinvalidslot);
1102  continue;
1103  }
1104 
1105  /*
1106  * If the selected page is clean, we're set.
1107  */
1108  if (!shared->page_dirty[bestvalidslot])
1109  return bestvalidslot;
1110 
1111  /*
1112  * Write the page.
1113  */
1114  SlruInternalWritePage(ctl, bestvalidslot, NULL);
1115 
1116  /*
1117  * Now loop back and try again. This is the easiest way of dealing
1118  * with corner cases such as the victim page being re-dirtied while we
1119  * wrote it.
1120  */
1121  }
1122 }
1123 
1124 /*
1125  * Flush dirty pages to disk during checkpoint or database shutdown
1126  */
1127 void
1128 SimpleLruFlush(SlruCtl ctl, bool allow_redirtied)
1129 {
1130  SlruShared shared = ctl->shared;
1131  SlruFlushData fdata;
1132  int slotno;
1133  int pageno = 0;
1134  int i;
1135  bool ok;
1136 
1137  /* update the stats counter of flushes */
1139 
1140  /*
1141  * Find and write dirty pages
1142  */
1143  fdata.num_files = 0;
1144 
1146 
1147  for (slotno = 0; slotno < shared->num_slots; slotno++)
1148  {
1149  SlruInternalWritePage(ctl, slotno, &fdata);
1150 
1151  /*
1152  * In some places (e.g. checkpoints), we cannot assert that the slot
1153  * is clean now, since another process might have re-dirtied it
1154  * already. That's okay.
1155  */
1156  Assert(allow_redirtied ||
1157  shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
1158  (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1159  !shared->page_dirty[slotno]));
1160  }
1161 
1162  LWLockRelease(shared->ControlLock);
1163 
1164  /*
1165  * Now fsync and close any files that were open
1166  */
1167  ok = true;
1168  for (i = 0; i < fdata.num_files; i++)
1169  {
1171  if (ctl->do_fsync && pg_fsync(fdata.fd[i]) != 0)
1172  {
1174  slru_errno = errno;
1175  pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1176  ok = false;
1177  }
1179 
1180  if (CloseTransientFile(fdata.fd[i]) != 0)
1181  {
1183  slru_errno = errno;
1184  pageno = fdata.segno[i] * SLRU_PAGES_PER_SEGMENT;
1185  ok = false;
1186  }
1187  }
1188  if (!ok)
1190 }
1191 
1192 /*
1193  * Remove all segments before the one holding the passed page number
1194  */
1195 void
1196 SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
1197 {
1198  SlruShared shared = ctl->shared;
1199  int slotno;
1200 
1201  /* update the stats counter of truncates */
1203 
1204  /*
1205  * The cutoff point is the start of the segment containing cutoffPage.
1206  */
1207  cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1208 
1209  /*
1210  * Scan shared memory and remove any pages preceding the cutoff page, to
1211  * ensure we won't rewrite them later. (Since this is normally called in
1212  * or just after a checkpoint, any dirty pages should have been flushed
1213  * already ... we're just being extra careful here.)
1214  */
1216 
1217 restart:;
1218 
1219  /*
1220  * While we are holding the lock, make an important safety check: the
1221  * planned cutoff point must be <= the current endpoint page. Otherwise we
1222  * have already wrapped around, and proceeding with the truncation would
1223  * risk removing the current segment.
1224  */
1225  if (ctl->PagePrecedes(shared->latest_page_number, cutoffPage))
1226  {
1227  LWLockRelease(shared->ControlLock);
1228  ereport(LOG,
1229  (errmsg("could not truncate directory \"%s\": apparent wraparound",
1230  ctl->Dir)));
1231  return;
1232  }
1233 
1234  for (slotno = 0; slotno < shared->num_slots; slotno++)
1235  {
1236  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1237  continue;
1238  if (!ctl->PagePrecedes(shared->page_number[slotno], cutoffPage))
1239  continue;
1240 
1241  /*
1242  * If page is clean, just change state to EMPTY (expected case).
1243  */
1244  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1245  !shared->page_dirty[slotno])
1246  {
1247  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1248  continue;
1249  }
1250 
1251  /*
1252  * Hmm, we have (or may have) I/O operations acting on the page, so
1253  * we've got to wait for them to finish and then start again. This is
1254  * the same logic as in SlruSelectLRUPage. (XXX if page is dirty,
1255  * wouldn't it be OK to just discard it without writing it? For now,
1256  * keep the logic the same as it was.)
1257  */
1258  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1259  SlruInternalWritePage(ctl, slotno, NULL);
1260  else
1261  SimpleLruWaitIO(ctl, slotno);
1262  goto restart;
1263  }
1264 
1265  LWLockRelease(shared->ControlLock);
1266 
1267  /* Now we can remove the old segment(s) */
1268  (void) SlruScanDirectory(ctl, SlruScanDirCbDeleteCutoff, &cutoffPage);
1269 }
1270 
1271 /*
1272  * Delete an individual SLRU segment, identified by the filename.
1273  *
1274  * NB: This does not touch the SLRU buffers themselves, callers have to ensure
1275  * they either can't yet contain anything, or have already been cleaned out.
1276  */
1277 static void
1279 {
1280  char path[MAXPGPATH];
1281 
1282  snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
1283  ereport(DEBUG2,
1284  (errmsg("removing file \"%s\"", path)));
1285  unlink(path);
1286 }
1287 
1288 /*
1289  * Delete an individual SLRU segment, identified by the segment number.
1290  */
1291 void
1293 {
1294  SlruShared shared = ctl->shared;
1295  int slotno;
1296  char path[MAXPGPATH];
1297  bool did_write;
1298 
1299  /* Clean out any possibly existing references to the segment. */
1301 restart:
1302  did_write = false;
1303  for (slotno = 0; slotno < shared->num_slots; slotno++)
1304  {
1305  int pagesegno = shared->page_number[slotno] / SLRU_PAGES_PER_SEGMENT;
1306 
1307  if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
1308  continue;
1309 
1310  /* not the segment we're looking for */
1311  if (pagesegno != segno)
1312  continue;
1313 
1314  /* If page is clean, just change state to EMPTY (expected case). */
1315  if (shared->page_status[slotno] == SLRU_PAGE_VALID &&
1316  !shared->page_dirty[slotno])
1317  {
1318  shared->page_status[slotno] = SLRU_PAGE_EMPTY;
1319  continue;
1320  }
1321 
1322  /* Same logic as SimpleLruTruncate() */
1323  if (shared->page_status[slotno] == SLRU_PAGE_VALID)
1324  SlruInternalWritePage(ctl, slotno, NULL);
1325  else
1326  SimpleLruWaitIO(ctl, slotno);
1327 
1328  did_write = true;
1329  }
1330 
1331  /*
1332  * Be extra careful and re-check. The IO functions release the control
1333  * lock, so new pages could have been read in.
1334  */
1335  if (did_write)
1336  goto restart;
1337 
1338  snprintf(path, MAXPGPATH, "%s/%04X", ctl->Dir, segno);
1339  ereport(DEBUG2,
1340  (errmsg("removing file \"%s\"", path)));
1341  unlink(path);
1342 
1343  LWLockRelease(shared->ControlLock);
1344 }
1345 
1346 /*
1347  * SlruScanDirectory callback
1348  * This callback reports true if there's any segment prior to the one
1349  * containing the page passed as "data".
1350  */
1351 bool
1352 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
1353 {
1354  int cutoffPage = *(int *) data;
1355 
1356  cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
1357 
1358  if (ctl->PagePrecedes(segpage, cutoffPage))
1359  return true; /* found one; don't iterate any more */
1360 
1361  return false; /* keep going */
1362 }
1363 
1364 /*
1365  * SlruScanDirectory callback.
1366  * This callback deletes segments prior to the one passed in as "data".
1367  */
1368 static bool
1369 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
1370 {
1371  int cutoffPage = *(int *) data;
1372 
1373  if (ctl->PagePrecedes(segpage, cutoffPage))
1374  SlruInternalDeleteSegment(ctl, filename);
1375 
1376  return false; /* keep going */
1377 }
1378 
1379 /*
1380  * SlruScanDirectory callback.
1381  * This callback deletes all segments.
1382  */
1383 bool
1384 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
1385 {
1386  SlruInternalDeleteSegment(ctl, filename);
1387 
1388  return false; /* keep going */
1389 }
1390 
1391 /*
1392  * Scan the SimpleLru directory and apply a callback to each file found in it.
1393  *
1394  * If the callback returns true, the scan is stopped. The last return value
1395  * from the callback is returned.
1396  *
1397  * The callback receives the following arguments: 1. the SlruCtl struct for the
1398  * slru being truncated; 2. the filename being considered; 3. the page number
1399  * for the first page of that file; 4. a pointer to the opaque data given to us
1400  * by the caller.
1401  *
1402  * Note that the ordering in which the directory is scanned is not guaranteed.
1403  *
1404  * Note that no locking is applied.
1405  */
1406 bool
1408 {
1409  bool retval = false;
1410  DIR *cldir;
1411  struct dirent *clde;
1412  int segno;
1413  int segpage;
1414 
1415  cldir = AllocateDir(ctl->Dir);
1416  while ((clde = ReadDir(cldir, ctl->Dir)) != NULL)
1417  {
1418  size_t len;
1419 
1420  len = strlen(clde->d_name);
1421 
1422  if ((len == 4 || len == 5 || len == 6) &&
1423  strspn(clde->d_name, "0123456789ABCDEF") == len)
1424  {
1425  segno = (int) strtol(clde->d_name, NULL, 16);
1426  segpage = segno * SLRU_PAGES_PER_SEGMENT;
1427 
1428  elog(DEBUG2, "SlruScanDirectory invoking callback on %s/%s",
1429  ctl->Dir, clde->d_name);
1430  retval = callback(ctl, clde->d_name, segpage, data);
1431  if (retval)
1432  break;
1433  }
1434  }
1435  FreeDir(cldir);
1436 
1437  return retval;
1438 }
LWLock * ControlLock
Definition: slru.h:54
int * page_number
Definition: slru.h:66
Definition: lwlock.h:31
SlruPageStatus
Definition: slru.h:41
uint32 TransactionId
Definition: c.h:520
static void SlruInternalWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
Definition: slru.c:526
bool SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1384
int latest_page_number
Definition: slru.h:97
char ** page_buffer
Definition: slru.h:63
void SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
Definition: slru.c:1196
bool InRecovery
Definition: xlog.c:204
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
SlruErrorCause
Definition: slru.c:112
static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
Definition: slru.c:739
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define MemSet(start, val, len)
Definition: c.h:978
static SlruErrorCause slru_errcause
Definition: slru.c:122
int cur_lru_count
Definition: slru.h:90
static void SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
Definition: slru.c:311
int lsn_groups_per_page
Definition: slru.h:79
int segno[MAX_FLUSH_BUFFERS]
Definition: slru.c:78
void pgstat_count_slru_flush(int slru_idx)
Definition: pgstat.c:6776
#define LOG
Definition: elog.h:26
Definition: dirent.h:9
Size SimpleLruShmemSize(int nslots, int nlsns)
Definition: slru.c:144
void SimpleLruFlush(SlruCtl ctl, bool allow_redirtied)
Definition: slru.c:1128
void XLogFlush(XLogRecPtr record)
Definition: xlog.c:2844
static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:906
#define PG_BINARY
Definition: c.h:1240
ssize_t pg_pwrite(int fd, const void *buf, size_t nbyte, off_t offset)
Definition: pwrite.c:27
int pgstat_slru_index(const char *name)
Definition: pgstat.c:6691
struct SlruFlushData * SlruFlush
Definition: slru.c:81
ssize_t pg_pread(int fd, void *buf, size_t nbyte, off_t offset)
Definition: pread.c:27
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1812
static void SlruInternalDeleteSegment(SlruCtl ctl, char *filename)
Definition: slru.c:1278
SlruPageStatus * page_status
Definition: slru.h:64
Definition: dirent.c:25
#define ERROR
Definition: elog.h:43
int OpenTransientFile(const char *fileName, int fileFlags)
Definition: fd.c:2372
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:392
int SimpleLruReadPage(SlruCtl ctl, int pageno, bool write_ok, TransactionId xid)
Definition: slru.c:382
#define MAXPGPATH
static void callback(struct sockaddr *addr, struct sockaddr *mask, void *unused)
Definition: test_ifaddrs.c:48
static XLogRecPtr endpos
Definition: pg_receivewal.c:46
#define DEBUG2
Definition: elog.h:24
void pgstat_count_slru_page_zeroed(int slru_idx)
Definition: pgstat.c:6746
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1380
#define MAX_FLUSH_BUFFERS
Definition: slru.c:72
bool IsUnderPostmaster
Definition: globals.c:109
LWLockPadded * buffer_locks
Definition: slru.h:68
int errdetail(const char *fmt,...)
Definition: elog.c:957
int errcode_for_file_access(void)
Definition: elog.c:633
#define InvalidTransactionId
Definition: transam.h:31
void pgstat_count_slru_page_read(int slru_idx)
Definition: pgstat.c:6764
XLogRecPtr * group_lsn
Definition: slru.h:78
DIR * AllocateDir(const char *dirname)
Definition: fd.c:2583
bool SimpleLruDoesPhysicalPageExist(SlruCtl ctl, int pageno)
Definition: slru.c:609
void SimpleLruWritePage(SlruCtl ctl, int slotno)
Definition: slru.c:597
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1381
static void SimpleLruWaitIO(SlruCtl ctl, int slotno)
Definition: slru.c:328
bool do_fsync
Definition: slru.h:117
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition: lwlock.c:745
int CloseTransientFile(int fd)
Definition: fd.c:2549
#define XLogRecPtrIsInvalid(r)
Definition: xlogdefs.h:29
void pgstat_count_slru_page_exists(int slru_idx)
Definition: pgstat.c:6758
bool(* SlruScanCallback)(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.h:149
SlruSharedData * SlruShared
Definition: slru.h:103
bool SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1352
static bool SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
Definition: slru.c:1369
#define SlruFileName(ctl, path, seg)
Definition: slru.c:62
int data_sync_elevel(int elevel)
Definition: fd.c:3605
char Dir[64]
Definition: slru.h:130
int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid)
Definition: slru.c:482
void pgstat_count_slru_truncate(int slru_idx)
Definition: pgstat.c:6782
int num_files
Definition: slru.c:76
#define ereport(elevel,...)
Definition: elog.h:144
LWLock lock
Definition: lwlock.h:78
static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno)
Definition: slru.c:667
int * page_lru_count
Definition: slru.h:67
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:745
#define StrNCpy(dst, src, len)
Definition: c.h:951
bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data)
Definition: slru.c:1407
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition: fd.c:2649
size_t Size
Definition: c.h:473
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1357
void pgstat_count_slru_page_hit(int slru_idx)
Definition: pgstat.c:6752
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1208
#define MAXALIGN(LEN)
Definition: c.h:698
int slru_stats_idx
Definition: slru.h:100
int num_slots
Definition: slru.h:57
const char * name
Definition: encode.c:561
struct SlruFlushData SlruFlushData
static int SlruSelectLRUPage(SlruCtl ctl, int pageno)
Definition: slru.c:991
static int slru_errno
Definition: slru.c:123
static char * filename
Definition: pg_dumpall.c:90
int errmsg(const char *fmt,...)
Definition: elog.c:824
bool * page_dirty
Definition: slru.h:65
#define elog(elevel,...)
Definition: elog.h:214
int i
SlruShared shared
Definition: slru.h:111
#define BUFFERALIGN(LEN)
Definition: c.h:700
bool(* PagePrecedes)(int, int)
Definition: slru.h:124
void SlruDeleteSegment(SlruCtl ctl, int segno)
Definition: slru.c:1292
int pg_fsync(int fd)
Definition: fd.c:345
char d_name[MAX_PATH]
Definition: dirent.h:14
#define SLRU_PAGES_PER_SEGMENT
Definition: slru.h:33
#define snprintf
Definition: port.h:193
int SimpleLruZeroPage(SlruCtl ctl, int pageno)
Definition: slru.c:267
int FreeDir(DIR *dir)
Definition: fd.c:2701
void SimpleLruInit(SlruCtl ctl, const char *name, int nslots, int nlsns, LWLock *ctllock, const char *subdir, int tranche_id)
Definition: slru.c:175
#define SlruRecentlyUsed(shared, slotno)
Definition: slru.c:102
void pgstat_count_slru_page_written(int slru_idx)
Definition: pgstat.c:6770
int fd[MAX_FLUSH_BUFFERS]
Definition: slru.c:77