PostgreSQL Source Code  git master
sync.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * sync.c
4  * File synchronization management code.
5  *
6  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/storage/sync/sync.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <unistd.h>
18 #include <fcntl.h>
19 #include <sys/file.h>
20 
21 #include "access/xlog.h"
22 #include "access/xlogutils.h"
23 #include "commands/tablespace.h"
24 #include "miscadmin.h"
25 #include "pgstat.h"
26 #include "portability/instr_time.h"
27 #include "postmaster/bgwriter.h"
28 #include "storage/bufmgr.h"
29 #include "storage/ipc.h"
30 #include "storage/md.h"
31 #include "utils/hsearch.h"
32 #include "utils/inval.h"
33 #include "utils/memutils.h"
34 
35 static MemoryContext pendingOpsCxt; /* context for the pending ops state */
36 
37 /*
38  * In some contexts (currently, standalone backends and the checkpointer)
39  * we keep track of pending fsync operations: we need to remember all relation
40  * segments that have been written since the last checkpoint, so that we can
41  * fsync them down to disk before completing the next checkpoint. This hash
42  * table remembers the pending operations. We use a hash table mostly as
43  * a convenient way of merging duplicate requests.
44  *
45  * We use a similar mechanism to remember no-longer-needed files that can
46  * be deleted after the next checkpoint, but we use a linked list instead of
47  * a hash table, because we don't expect there to be any duplicate requests.
48  *
49  * These mechanisms are only used for non-temp relations; we never fsync
50  * temp rels, nor do we need to postpone their deletion (see comments in
51  * mdunlink).
52  *
53  * (Regular backends do not track pending operations locally, but forward
54  * them to the checkpointer.)
55  */
56 typedef uint16 CycleCtr; /* can be any convenient integer size */
57 
58 typedef struct
59 {
60  FileTag tag; /* identifies handler and file */
61  CycleCtr cycle_ctr; /* sync_cycle_ctr of oldest request */
62  bool canceled; /* canceled is true if we canceled "recently" */
64 
65 typedef struct
66 {
67  FileTag tag; /* identifies handler and file */
68  CycleCtr cycle_ctr; /* checkpoint_cycle_ctr when request was made */
70 
71 static HTAB *pendingOps = NULL;
73 static MemoryContext pendingOpsCxt; /* context for the above */
74 
77 
78 /* Intervals for calling AbsorbSyncRequests */
79 #define FSYNCS_PER_ABSORB 10
80 #define UNLINKS_PER_ABSORB 10
81 
82 /*
83  * Function pointers for handling sync and unlink requests.
84  */
85 typedef struct SyncOps
86 {
87  int (*sync_syncfiletag) (const FileTag *ftag, char *path);
88  int (*sync_unlinkfiletag) (const FileTag *ftag, char *path);
89  bool (*sync_filetagmatches) (const FileTag *ftag,
90  const FileTag *candidate);
91 } SyncOps;
92 
93 static const SyncOps syncsw[] = {
94  /* magnetic disk */
95  {
97  .sync_unlinkfiletag = mdunlinkfiletag,
98  .sync_filetagmatches = mdfiletagmatches
99  }
100 };
101 
102 /*
103  * Initialize data structures for the file sync tracking.
104  */
105 void
106 InitSync(void)
107 {
108  /*
109  * Create pending-operations hashtable if we need it. Currently, we need
110  * it if we are standalone (not under a postmaster) or if we are a startup
111  * or checkpointer auxiliary process.
112  */
114  {
115  HASHCTL hash_ctl;
116 
117  /*
118  * XXX: The checkpointer needs to add entries to the pending ops table
119  * when absorbing fsync requests. That is done within a critical
120  * section, which isn't usually allowed, but we make an exception. It
121  * means that there's a theoretical possibility that you run out of
122  * memory while absorbing fsync requests, which leads to a PANIC.
123  * Fortunately the hash table is small so that's unlikely to happen in
124  * practice.
125  */
126  pendingOpsCxt = AllocSetContextCreate(TopMemoryContext,
127  "Pending ops context",
129  MemoryContextAllowInCriticalSection(pendingOpsCxt, true);
130 
131  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
132  hash_ctl.keysize = sizeof(FileTag);
133  hash_ctl.entrysize = sizeof(PendingFsyncEntry);
134  hash_ctl.hcxt = pendingOpsCxt;
135  pendingOps = hash_create("Pending Ops Table",
136  100L,
137  &hash_ctl,
139  pendingUnlinks = NIL;
140  }
141 
142 }
143 
144 /*
145  * SyncPreCheckpoint() -- Do pre-checkpoint work
146  *
147  * To distinguish unlink requests that arrived before this checkpoint
148  * started from those that arrived during the checkpoint, we use a cycle
149  * counter similar to the one we use for fsync requests. That cycle
150  * counter is incremented here.
151  *
152  * This must be called *before* the checkpoint REDO point is determined.
153  * That ensures that we won't delete files too soon.
154  *
155  * Note that we can't do anything here that depends on the assumption
156  * that the checkpoint will be completed.
157  */
158 void
160 {
161  /*
162  * Any unlink requests arriving after this point will be assigned the next
163  * cycle counter, and won't be unlinked until next checkpoint.
164  */
166 }
167 
168 /*
169  * SyncPostCheckpoint() -- Do post-checkpoint work
170  *
171  * Remove any lingering files that can now be safely removed.
172  */
173 void
175 {
176  int absorb_counter;
177 
178  absorb_counter = UNLINKS_PER_ABSORB;
179  while (pendingUnlinks != NIL)
180  {
181  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) linitial(pendingUnlinks);
182  char path[MAXPGPATH];
183 
184  /*
185  * New entries are appended to the end, so if the entry is new we've
186  * reached the end of old entries.
187  *
188  * Note: if just the right number of consecutive checkpoints fail, we
189  * could be fooled here by cycle_ctr wraparound. However, the only
190  * consequence is that we'd delay unlinking for one more checkpoint,
191  * which is perfectly tolerable.
192  */
193  if (entry->cycle_ctr == checkpoint_cycle_ctr)
194  break;
195 
196  /* Unlink the file */
197  if (syncsw[entry->tag.handler].sync_unlinkfiletag(&entry->tag,
198  path) < 0)
199  {
200  /*
201  * There's a race condition, when the database is dropped at the
202  * same time that we process the pending unlink requests. If the
203  * DROP DATABASE deletes the file before we do, we will get ENOENT
204  * here. rmtree() also has to ignore ENOENT errors, to deal with
205  * the possibility that we delete the file first.
206  */
207  if (errno != ENOENT)
210  errmsg("could not remove file \"%s\": %m", path)));
211  }
212 
213  /* And remove the list entry */
214  pendingUnlinks = list_delete_first(pendingUnlinks);
215  pfree(entry);
216 
217  /*
218  * As in ProcessSyncRequests, we don't want to stop absorbing fsync
219  * requests for along time when there are many deletions to be done.
220  * We can safely call AbsorbSyncRequests() at this point in the loop
221  * (note it might try to delete list entries).
222  */
223  if (--absorb_counter <= 0)
224  {
226  absorb_counter = UNLINKS_PER_ABSORB;
227  }
228  }
229 }
230 
231 /*
232 
233  * ProcessSyncRequests() -- Process queued fsync requests.
234  */
235 void
237 {
238  static bool sync_in_progress = false;
239 
240  HASH_SEQ_STATUS hstat;
241  PendingFsyncEntry *entry;
242  int absorb_counter;
243 
244  /* Statistics on sync times */
245  int processed = 0;
246  instr_time sync_start,
247  sync_end,
248  sync_diff;
249  uint64 elapsed;
250  uint64 longest = 0;
251  uint64 total_elapsed = 0;
252 
253  /*
254  * This is only called during checkpoints, and checkpoints should only
255  * occur in processes that have created a pendingOps.
256  */
257  if (!pendingOps)
258  elog(ERROR, "cannot sync without a pendingOps table");
259 
260  /*
261  * If we are in the checkpointer, the sync had better include all fsync
262  * requests that were queued by backends up to this point. The tightest
263  * race condition that could occur is that a buffer that must be written
264  * and fsync'd for the checkpoint could have been dumped by a backend just
265  * before it was visited by BufferSync(). We know the backend will have
266  * queued an fsync request before clearing the buffer's dirtybit, so we
267  * are safe as long as we do an Absorb after completing BufferSync().
268  */
270 
271  /*
272  * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
273  * checkpoint), we want to ignore fsync requests that are entered into the
274  * hashtable after this point --- they should be processed next time,
275  * instead. We use sync_cycle_ctr to tell old entries apart from new
276  * ones: new ones will have cycle_ctr equal to the incremented value of
277  * sync_cycle_ctr.
278  *
279  * In normal circumstances, all entries present in the table at this point
280  * will have cycle_ctr exactly equal to the current (about to be old)
281  * value of sync_cycle_ctr. However, if we fail partway through the
282  * fsync'ing loop, then older values of cycle_ctr might remain when we
283  * come back here to try again. Repeated checkpoint failures would
284  * eventually wrap the counter around to the point where an old entry
285  * might appear new, causing us to skip it, possibly allowing a checkpoint
286  * to succeed that should not have. To forestall wraparound, any time the
287  * previous ProcessSyncRequests() failed to complete, run through the
288  * table and forcibly set cycle_ctr = sync_cycle_ctr.
289  *
290  * Think not to merge this loop with the main loop, as the problem is
291  * exactly that that loop may fail before having visited all the entries.
292  * From a performance point of view it doesn't matter anyway, as this path
293  * will never be taken in a system that's functioning normally.
294  */
295  if (sync_in_progress)
296  {
297  /* prior try failed, so update any stale cycle_ctr values */
298  hash_seq_init(&hstat, pendingOps);
299  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
300  {
301  entry->cycle_ctr = sync_cycle_ctr;
302  }
303  }
304 
305  /* Advance counter so that new hashtable entries are distinguishable */
306  sync_cycle_ctr++;
307 
308  /* Set flag to detect failure if we don't reach the end of the loop */
309  sync_in_progress = true;
310 
311  /* Now scan the hashtable for fsync requests to process */
312  absorb_counter = FSYNCS_PER_ABSORB;
313  hash_seq_init(&hstat, pendingOps);
314  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
315  {
316  int failures;
317 
318  /*
319  * If fsync is off then we don't have to bother opening the file at
320  * all. (We delay checking until this point so that changing fsync on
321  * the fly behaves sensibly.)
322  */
323  if (!enableFsync)
324  continue;
325 
326  /*
327  * If the entry is new then don't process it this time; it is new.
328  * Note "continue" bypasses the hash-remove call at the bottom of the
329  * loop.
330  */
331  if (entry->cycle_ctr == sync_cycle_ctr)
332  continue;
333 
334  /* Else assert we haven't missed it */
335  Assert((CycleCtr) (entry->cycle_ctr + 1) == sync_cycle_ctr);
336 
337  /*
338  * If in checkpointer, we want to absorb pending requests every so
339  * often to prevent overflow of the fsync request queue. It is
340  * unspecified whether newly-added entries will be visited by
341  * hash_seq_search, but we don't care since we don't need to process
342  * them anyway.
343  */
344  if (--absorb_counter <= 0)
345  {
347  absorb_counter = FSYNCS_PER_ABSORB;
348  }
349 
350  /*
351  * The fsync table could contain requests to fsync segments that have
352  * been deleted (unlinked) by the time we get to them. Rather than
353  * just hoping an ENOENT (or EACCES on Windows) error can be ignored,
354  * what we do on error is absorb pending requests and then retry.
355  * Since mdunlink() queues a "cancel" message before actually
356  * unlinking, the fsync request is guaranteed to be marked canceled
357  * after the absorb if it really was this case. DROP DATABASE likewise
358  * has to tell us to forget fsync requests before it starts deletions.
359  */
360  for (failures = 0; !entry->canceled; failures++)
361  {
362  char path[MAXPGPATH];
363 
364  INSTR_TIME_SET_CURRENT(sync_start);
365  if (syncsw[entry->tag.handler].sync_syncfiletag(&entry->tag,
366  path) == 0)
367  {
368  /* Success; update statistics about sync timing */
369  INSTR_TIME_SET_CURRENT(sync_end);
370  sync_diff = sync_end;
371  INSTR_TIME_SUBTRACT(sync_diff, sync_start);
372  elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
373  if (elapsed > longest)
374  longest = elapsed;
375  total_elapsed += elapsed;
376  processed++;
377 
378  if (log_checkpoints)
379  elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
380  processed,
381  path,
382  (double) elapsed / 1000);
383 
384  break; /* out of retry loop */
385  }
386 
387  /*
388  * It is possible that the relation has been dropped or truncated
389  * since the fsync request was entered. Therefore, allow ENOENT,
390  * but only if we didn't fail already on this file.
391  */
392  if (!FILE_POSSIBLY_DELETED(errno) || failures > 0)
395  errmsg("could not fsync file \"%s\": %m",
396  path)));
397  else
398  ereport(DEBUG1,
400  errmsg("could not fsync file \"%s\" but retrying: %m",
401  path)));
402 
403  /*
404  * Absorb incoming requests and check to see if a cancel arrived
405  * for this relation fork.
406  */
408  absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
409  } /* end retry loop */
410 
411  /* We are done with this entry, remove it */
412  if (hash_search(pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL)
413  elog(ERROR, "pendingOps corrupted");
414  } /* end loop over hashtable entries */
415 
416  /* Return sync performance metrics for report at checkpoint end */
417  CheckpointStats.ckpt_sync_rels = processed;
419  CheckpointStats.ckpt_agg_sync_time = total_elapsed;
420 
421  /* Flag successful completion of ProcessSyncRequests */
422  sync_in_progress = false;
423 }
424 
425 /*
426  * RememberSyncRequest() -- callback from checkpointer side of sync request
427  *
428  * We stuff fsync requests into the local hash table for execution
429  * during the checkpointer's next checkpoint. UNLINK requests go into a
430  * separate linked list, however, because they get processed separately.
431  *
432  * See sync.h for more information on the types of sync requests supported.
433  */
434 void
436 {
437  Assert(pendingOps);
438 
439  if (type == SYNC_FORGET_REQUEST)
440  {
441  PendingFsyncEntry *entry;
442 
443  /* Cancel previously entered request */
444  entry = (PendingFsyncEntry *) hash_search(pendingOps,
445  (void *) ftag,
446  HASH_FIND,
447  NULL);
448  if (entry != NULL)
449  entry->canceled = true;
450  }
451  else if (type == SYNC_FILTER_REQUEST)
452  {
453  HASH_SEQ_STATUS hstat;
454  PendingFsyncEntry *entry;
455  ListCell *cell;
456 
457  /* Cancel matching fsync requests */
458  hash_seq_init(&hstat, pendingOps);
459  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
460  {
461  if (entry->tag.handler == ftag->handler &&
462  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
463  entry->canceled = true;
464  }
465 
466  /* Remove matching unlink requests */
467  foreach(cell, pendingUnlinks)
468  {
469  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
470 
471  if (entry->tag.handler == ftag->handler &&
472  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
473  {
474  pendingUnlinks = foreach_delete_current(pendingUnlinks, cell);
475  pfree(entry);
476  }
477  }
478  }
479  else if (type == SYNC_UNLINK_REQUEST)
480  {
481  /* Unlink request: put it in the linked list */
482  MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
483  PendingUnlinkEntry *entry;
484 
485  entry = palloc(sizeof(PendingUnlinkEntry));
486  entry->tag = *ftag;
488 
489  pendingUnlinks = lappend(pendingUnlinks, entry);
490 
491  MemoryContextSwitchTo(oldcxt);
492  }
493  else
494  {
495  /* Normal case: enter a request to fsync this segment */
496  MemoryContext oldcxt = MemoryContextSwitchTo(pendingOpsCxt);
497  PendingFsyncEntry *entry;
498  bool found;
499 
500  Assert(type == SYNC_REQUEST);
501 
502  entry = (PendingFsyncEntry *) hash_search(pendingOps,
503  (void *) ftag,
504  HASH_ENTER,
505  &found);
506  /* if new entry, initialize it */
507  if (!found)
508  {
509  entry->cycle_ctr = sync_cycle_ctr;
510  entry->canceled = false;
511  }
512 
513  /*
514  * NB: it's intentional that we don't change cycle_ctr if the entry
515  * already exists. The cycle_ctr must represent the oldest fsync
516  * request that could be in the entry.
517  */
518 
519  MemoryContextSwitchTo(oldcxt);
520  }
521 }
522 
523 /*
524  * Register the sync request locally, or forward it to the checkpointer.
525  *
526  * If retryOnError is true, we'll keep trying if there is no space in the
527  * queue. Return true if we succeeded, or false if there wasn't space.
528  */
529 bool
531  bool retryOnError)
532 {
533  bool ret;
534 
535  if (pendingOps != NULL)
536  {
537  /* standalone backend or startup process: fsync state is local */
538  RememberSyncRequest(ftag, type);
539  return true;
540  }
541 
542  for (;;)
543  {
544  /*
545  * Notify the checkpointer about it. If we fail to queue a message in
546  * retryOnError mode, we have to sleep and try again ... ugly, but
547  * hopefully won't happen often.
548  *
549  * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
550  * error in the case of SYNC_UNLINK_REQUEST would leave the
551  * no-longer-used file still present on disk, which would be bad, so
552  * I'm inclined to assume that the checkpointer will always empty the
553  * queue soon.
554  */
555  ret = ForwardSyncRequest(ftag, type);
556 
557  /*
558  * If we are successful in queueing the request, or we failed and were
559  * instructed not to retry on error, break.
560  */
561  if (ret || (!ret && !retryOnError))
562  break;
563 
564  pg_usleep(10000L);
565  }
566 
567  return ret;
568 }
569 
570 /*
571  * In archive recovery, we rely on checkpointer to do fsyncs, but we will have
572  * already created the pendingOps during initialization of the startup
573  * process. Calling this function drops the local pendingOps so that
574  * subsequent requests will be forwarded to checkpointer.
575  */
576 void
578 {
579  /* Perform any pending fsyncs we may have queued up, then drop table */
580  if (pendingOps)
581  {
583  hash_destroy(pendingOps);
584  }
585  pendingOps = NULL;
586 
587  /*
588  * We should not have any pending unlink requests, since mdunlink doesn't
589  * queue unlink requests when isRedo.
590  */
591  Assert(pendingUnlinks == NIL);
592 }
void ProcessSyncRequests(void)
Definition: sync.c:236
#define NIL
Definition: pg_list.h:65
#define AmStartupProcess()
Definition: miscadmin.h:412
uint64 ckpt_agg_sync_time
Definition: xlog.h:249
void RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
Definition: sync.c:435
Definition: sync.c:85
bool log_checkpoints
Definition: xlog.c:101
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
int ckpt_sync_rels
Definition: xlog.h:247
#define AllocSetContextCreate
Definition: memutils.h:170
#define DEBUG1
Definition: elog.h:25
CycleCtr cycle_ctr
Definition: sync.c:68
#define HASH_CONTEXT
Definition: hsearch.h:93
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:411
#define HASH_ELEM
Definition: hsearch.h:87
MemoryContext hcxt
Definition: hsearch.h:78
FileTag tag
Definition: sync.c:60
struct timeval instr_time
Definition: instr_time.h:150
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
Size entrysize
Definition: hsearch.h:73
#define MemSet(start, val, len)
Definition: c.h:962
struct SyncOps SyncOps
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
static CycleCtr checkpoint_cycle_ctr
Definition: sync.c:76
struct FileTag FileTag
uint16 CycleCtr
Definition: sync.c:56
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:368
void pg_usleep(long microsec)
Definition: signal.c:53
Definition: dynahash.c:208
unsigned short uint16
Definition: c.h:358
void AbsorbSyncRequests(void)
void pfree(void *pointer)
Definition: mcxt.c:1056
#define linitial(l)
Definition: pg_list.h:195
#define ERROR
Definition: elog.h:43
static List * pendingUnlinks
Definition: sync.c:72
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
#define AmCheckpointerProcess()
Definition: miscadmin.h:414
#define MAXPGPATH
bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
bool IsUnderPostmaster
Definition: globals.c:109
int errcode_for_file_access(void)
Definition: elog.c:631
#define ereport(elevel, rest)
Definition: elog.h:141
MemoryContext TopMemoryContext
Definition: mcxt.c:44
FileTag tag
Definition: sync.c:67
List * lappend(List *list, void *datum)
Definition: list.c:322
#define WARNING
Definition: elog.h:40
int mdunlinkfiletag(const FileTag *ftag, char *path)
Definition: md.c:1311
CycleCtr cycle_ctr
Definition: sync.c:61
#define HASH_BLOBS
Definition: hsearch.h:88
bool(* sync_filetagmatches)(const FileTag *ftag, const FileTag *candidate)
Definition: sync.c:89
static HTAB * pendingOps
Definition: sync.c:71
int data_sync_elevel(int elevel)
Definition: fd.c:3519
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size keysize
Definition: hsearch.h:72
bool RegisterSyncRequest(const FileTag *ftag, SyncRequestType type, bool retryOnError)
Definition: sync.c:530
#define UNLINKS_PER_ABSORB
Definition: sync.c:80
#define FSYNCS_PER_ABSORB
Definition: sync.c:79
bool mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
Definition: md.c:1330
#define Assert(condition)
Definition: c.h:739
#define lfirst(lc)
Definition: pg_list.h:190
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
CheckpointStatsData CheckpointStats
Definition: xlog.c:181
SyncRequestType
Definition: sync.h:23
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
int mdsyncfiletag(const FileTag *ftag, char *path)
Definition: md.c:1280
static chr * longest(struct vars *v, struct dfa *d, chr *start, chr *stop, int *hitstopp)
Definition: rege_dfa.c:42
void InitSync(void)
Definition: sync.c:106
bool enableFsync
Definition: globals.c:119
void EnableSyncRequestForwarding(void)
Definition: sync.c:577
void * palloc(Size size)
Definition: mcxt.c:949
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
bool canceled
Definition: sync.c:62
int16 handler
Definition: sync.h:47
void SyncPostCheckpoint(void)
Definition: sync.c:174
static const SyncOps syncsw[]
Definition: sync.c:93
static CycleCtr sync_cycle_ctr
Definition: sync.c:75
int(* sync_unlinkfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:88
int(* sync_syncfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:87
Definition: pg_list.h:50
Definition: sync.h:45
#define FILE_POSSIBLY_DELETED(err)
Definition: fd.h:65
List * list_delete_first(List *list)
Definition: list.c:861
static MemoryContext pendingOpsCxt
Definition: sync.c:35
void SyncPreCheckpoint(void)
Definition: sync.c:159
unsigned char bool
Definition: c.h:309
uint64 ckpt_longest_sync
Definition: xlog.h:248