PostgreSQL Source Code  git master
sync.c File Reference
#include "postgres.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/file.h>
#include "access/commit_ts.h"
#include "access/clog.h"
#include "access/multixact.h"
#include "access/xlog.h"
#include "access/xlogutils.h"
#include "commands/tablespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "portability/instr_time.h"
#include "postmaster/bgwriter.h"
#include "storage/bufmgr.h"
#include "storage/ipc.h"
#include "storage/md.h"
#include "utils/hsearch.h"
#include "utils/inval.h"
#include "utils/memutils.h"
Include dependency graph for sync.c:

Go to the source code of this file.

Data Structures

struct  PendingFsyncEntry
 
struct  PendingUnlinkEntry
 
struct  SyncOps
 

Macros

#define FSYNCS_PER_ABSORB   10
 
#define UNLINKS_PER_ABSORB   10
 

Typedefs

typedef uint16 CycleCtr
 
typedef struct SyncOps SyncOps
 

Functions

void InitSync (void)
 
void SyncPreCheckpoint (void)
 
void SyncPostCheckpoint (void)
 
void ProcessSyncRequests (void)
 
void RememberSyncRequest (const FileTag *ftag, SyncRequestType type)
 
bool RegisterSyncRequest (const FileTag *ftag, SyncRequestType type, bool retryOnError)
 
void EnableSyncRequestForwarding (void)
 

Variables

static MemoryContext pendingOpsCxt
 
static HTABpendingOps = NULL
 
static ListpendingUnlinks = NIL
 
static CycleCtr sync_cycle_ctr = 0
 
static CycleCtr checkpoint_cycle_ctr = 0
 
static const SyncOps syncsw []
 

Macro Definition Documentation

◆ FSYNCS_PER_ABSORB

#define FSYNCS_PER_ABSORB   10

Definition at line 82 of file sync.c.

Referenced by ProcessSyncRequests().

◆ UNLINKS_PER_ABSORB

#define UNLINKS_PER_ABSORB   10

Definition at line 83 of file sync.c.

Referenced by SyncPostCheckpoint().

Typedef Documentation

◆ CycleCtr

typedef uint16 CycleCtr

Definition at line 59 of file sync.c.

◆ SyncOps

typedef struct SyncOps SyncOps

Function Documentation

◆ EnableSyncRequestForwarding()

void EnableSyncRequestForwarding ( void  )

Definition at line 601 of file sync.c.

References Assert, hash_destroy(), NIL, and ProcessSyncRequests().

Referenced by StartupXLOG().

602 {
603  /* Perform any pending fsyncs we may have queued up, then drop table */
604  if (pendingOps)
605  {
608  }
609  pendingOps = NULL;
610 
611  /*
612  * We should not have any pending unlink requests, since mdunlink doesn't
613  * queue unlink requests when isRedo.
614  */
616 }
void ProcessSyncRequests(void)
Definition: sync.c:258
#define NIL
Definition: pg_list.h:65
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:827
static List * pendingUnlinks
Definition: sync.c:75
static HTAB * pendingOps
Definition: sync.c:74
#define Assert(condition)
Definition: c.h:746

◆ InitSync()

void InitSync ( void  )

Definition at line 128 of file sync.c.

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AmCheckpointerProcess, AmStartupProcess, HASHCTL::entrysize, HASH_BLOBS, HASH_CONTEXT, hash_create(), HASH_ELEM, HASHCTL::hcxt, IsUnderPostmaster, HASHCTL::keysize, MemoryContextAllowInCriticalSection(), MemSet, NIL, pendingOpsCxt, and TopMemoryContext.

Referenced by BaseInit().

129 {
130  /*
131  * Create pending-operations hashtable if we need it. Currently, we need
132  * it if we are standalone (not under a postmaster) or if we are a startup
133  * or checkpointer auxiliary process.
134  */
136  {
137  HASHCTL hash_ctl;
138 
139  /*
140  * XXX: The checkpointer needs to add entries to the pending ops table
141  * when absorbing fsync requests. That is done within a critical
142  * section, which isn't usually allowed, but we make an exception. It
143  * means that there's a theoretical possibility that you run out of
144  * memory while absorbing fsync requests, which leads to a PANIC.
145  * Fortunately the hash table is small so that's unlikely to happen in
146  * practice.
147  */
149  "Pending ops context",
152 
153  MemSet(&hash_ctl, 0, sizeof(hash_ctl));
154  hash_ctl.keysize = sizeof(FileTag);
155  hash_ctl.entrysize = sizeof(PendingFsyncEntry);
156  hash_ctl.hcxt = pendingOpsCxt;
157  pendingOps = hash_create("Pending Ops Table",
158  100L,
159  &hash_ctl,
162  }
163 
164 }
#define NIL
Definition: pg_list.h:65
#define AmStartupProcess()
Definition: miscadmin.h:431
#define AllocSetContextCreate
Definition: memutils.h:170
#define HASH_CONTEXT
Definition: hsearch.h:91
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:412
#define HASH_ELEM
Definition: hsearch.h:85
MemoryContext hcxt
Definition: hsearch.h:77
Size entrysize
Definition: hsearch.h:72
#define MemSet(start, val, len)
Definition: c.h:950
struct FileTag FileTag
static List * pendingUnlinks
Definition: sync.c:75
#define AmCheckpointerProcess()
Definition: miscadmin.h:433
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:192
bool IsUnderPostmaster
Definition: globals.c:109
MemoryContext TopMemoryContext
Definition: mcxt.c:44
#define HASH_BLOBS
Definition: hsearch.h:86
static HTAB * pendingOps
Definition: sync.c:74
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:326
Size keysize
Definition: hsearch.h:71
static MemoryContext pendingOpsCxt
Definition: sync.c:38

◆ ProcessSyncRequests()

void ProcessSyncRequests ( void  )

Definition at line 258 of file sync.c.

References AbsorbSyncRequests(), Assert, PendingFsyncEntry::canceled, CheckpointStats, CheckpointStatsData::ckpt_agg_sync_time, CheckpointStatsData::ckpt_longest_sync, CheckpointStatsData::ckpt_sync_rels, PendingFsyncEntry::cycle_ctr, data_sync_elevel(), DEBUG1, elog, enableFsync, ereport, errcode_for_file_access(), errmsg(), ERROR, FILE_POSSIBLY_DELETED, FSYNCS_PER_ABSORB, FileTag::handler, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, log_checkpoints, longest(), MAXPGPATH, sync_cycle_ctr, SyncOps::sync_syncfiletag, and PendingFsyncEntry::tag.

Referenced by CheckPointGuts(), and EnableSyncRequestForwarding().

259 {
260  static bool sync_in_progress = false;
261 
262  HASH_SEQ_STATUS hstat;
263  PendingFsyncEntry *entry;
264  int absorb_counter;
265 
266  /* Statistics on sync times */
267  int processed = 0;
268  instr_time sync_start,
269  sync_end,
270  sync_diff;
271  uint64 elapsed;
272  uint64 longest = 0;
273  uint64 total_elapsed = 0;
274 
275  /*
276  * This is only called during checkpoints, and checkpoints should only
277  * occur in processes that have created a pendingOps.
278  */
279  if (!pendingOps)
280  elog(ERROR, "cannot sync without a pendingOps table");
281 
282  /*
283  * If we are in the checkpointer, the sync had better include all fsync
284  * requests that were queued by backends up to this point. The tightest
285  * race condition that could occur is that a buffer that must be written
286  * and fsync'd for the checkpoint could have been dumped by a backend just
287  * before it was visited by BufferSync(). We know the backend will have
288  * queued an fsync request before clearing the buffer's dirtybit, so we
289  * are safe as long as we do an Absorb after completing BufferSync().
290  */
292 
293  /*
294  * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
295  * checkpoint), we want to ignore fsync requests that are entered into the
296  * hashtable after this point --- they should be processed next time,
297  * instead. We use sync_cycle_ctr to tell old entries apart from new
298  * ones: new ones will have cycle_ctr equal to the incremented value of
299  * sync_cycle_ctr.
300  *
301  * In normal circumstances, all entries present in the table at this point
302  * will have cycle_ctr exactly equal to the current (about to be old)
303  * value of sync_cycle_ctr. However, if we fail partway through the
304  * fsync'ing loop, then older values of cycle_ctr might remain when we
305  * come back here to try again. Repeated checkpoint failures would
306  * eventually wrap the counter around to the point where an old entry
307  * might appear new, causing us to skip it, possibly allowing a checkpoint
308  * to succeed that should not have. To forestall wraparound, any time the
309  * previous ProcessSyncRequests() failed to complete, run through the
310  * table and forcibly set cycle_ctr = sync_cycle_ctr.
311  *
312  * Think not to merge this loop with the main loop, as the problem is
313  * exactly that that loop may fail before having visited all the entries.
314  * From a performance point of view it doesn't matter anyway, as this path
315  * will never be taken in a system that's functioning normally.
316  */
317  if (sync_in_progress)
318  {
319  /* prior try failed, so update any stale cycle_ctr values */
320  hash_seq_init(&hstat, pendingOps);
321  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
322  {
323  entry->cycle_ctr = sync_cycle_ctr;
324  }
325  }
326 
327  /* Advance counter so that new hashtable entries are distinguishable */
328  sync_cycle_ctr++;
329 
330  /* Set flag to detect failure if we don't reach the end of the loop */
331  sync_in_progress = true;
332 
333  /* Now scan the hashtable for fsync requests to process */
334  absorb_counter = FSYNCS_PER_ABSORB;
335  hash_seq_init(&hstat, pendingOps);
336  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
337  {
338  int failures;
339 
340  /*
341  * If the entry is new then don't process it this time; it is new.
342  * Note "continue" bypasses the hash-remove call at the bottom of the
343  * loop.
344  */
345  if (entry->cycle_ctr == sync_cycle_ctr)
346  continue;
347 
348  /* Else assert we haven't missed it */
349  Assert((CycleCtr) (entry->cycle_ctr + 1) == sync_cycle_ctr);
350 
351  /*
352  * If fsync is off then we don't have to bother opening the file at
353  * all. (We delay checking until this point so that changing fsync on
354  * the fly behaves sensibly.)
355  */
356  if (enableFsync)
357  {
358  /*
359  * If in checkpointer, we want to absorb pending requests every so
360  * often to prevent overflow of the fsync request queue. It is
361  * unspecified whether newly-added entries will be visited by
362  * hash_seq_search, but we don't care since we don't need to
363  * process them anyway.
364  */
365  if (--absorb_counter <= 0)
366  {
368  absorb_counter = FSYNCS_PER_ABSORB;
369  }
370 
371  /*
372  * The fsync table could contain requests to fsync segments that
373  * have been deleted (unlinked) by the time we get to them. Rather
374  * than just hoping an ENOENT (or EACCES on Windows) error can be
375  * ignored, what we do on error is absorb pending requests and
376  * then retry. Since mdunlink() queues a "cancel" message before
377  * actually unlinking, the fsync request is guaranteed to be
378  * marked canceled after the absorb if it really was this case.
379  * DROP DATABASE likewise has to tell us to forget fsync requests
380  * before it starts deletions.
381  */
382  for (failures = 0; !entry->canceled; failures++)
383  {
384  char path[MAXPGPATH];
385 
386  INSTR_TIME_SET_CURRENT(sync_start);
387  if (syncsw[entry->tag.handler].sync_syncfiletag(&entry->tag,
388  path) == 0)
389  {
390  /* Success; update statistics about sync timing */
391  INSTR_TIME_SET_CURRENT(sync_end);
392  sync_diff = sync_end;
393  INSTR_TIME_SUBTRACT(sync_diff, sync_start);
394  elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
395  if (elapsed > longest)
396  longest = elapsed;
397  total_elapsed += elapsed;
398  processed++;
399 
400  if (log_checkpoints)
401  elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f ms",
402  processed,
403  path,
404  (double) elapsed / 1000);
405 
406  break; /* out of retry loop */
407  }
408 
409  /*
410  * It is possible that the relation has been dropped or
411  * truncated since the fsync request was entered. Therefore,
412  * allow ENOENT, but only if we didn't fail already on this
413  * file.
414  */
415  if (!FILE_POSSIBLY_DELETED(errno) || failures > 0)
418  errmsg("could not fsync file \"%s\": %m",
419  path)));
420  else
421  ereport(DEBUG1,
423  errmsg("could not fsync file \"%s\" but retrying: %m",
424  path)));
425 
426  /*
427  * Absorb incoming requests and check to see if a cancel
428  * arrived for this relation fork.
429  */
431  absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
432  } /* end retry loop */
433  }
434 
435  /* We are done with this entry, remove it */
436  if (hash_search(pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL)
437  elog(ERROR, "pendingOps corrupted");
438  } /* end loop over hashtable entries */
439 
440  /* Return sync performance metrics for report at checkpoint end */
441  CheckpointStats.ckpt_sync_rels = processed;
443  CheckpointStats.ckpt_agg_sync_time = total_elapsed;
444 
445  /* Flag successful completion of ProcessSyncRequests */
446  sync_in_progress = false;
447 }
uint64 ckpt_agg_sync_time
Definition: xlog.h:260
bool log_checkpoints
Definition: xlog.c:105
int ckpt_sync_rels
Definition: xlog.h:258
#define DEBUG1
Definition: elog.h:25
FileTag tag
Definition: sync.c:63
struct timeval instr_time
Definition: instr_time.h:150
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
uint16 CycleCtr
Definition: sync.c:59
void AbsorbSyncRequests(void)
#define ERROR
Definition: elog.h:43
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
#define MAXPGPATH
int errcode_for_file_access(void)
Definition: elog.c:633
CycleCtr cycle_ctr
Definition: sync.c:64
static HTAB * pendingOps
Definition: sync.c:74
int data_sync_elevel(int elevel)
Definition: fd.c:3603
#define FSYNCS_PER_ABSORB
Definition: sync.c:82
#define ereport(elevel,...)
Definition: elog.h:144
#define Assert(condition)
Definition: c.h:746
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
CheckpointStatsData CheckpointStats
Definition: xlog.c:186
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1401
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1391
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
static chr * longest(struct vars *v, struct dfa *d, chr *start, chr *stop, int *hitstopp)
Definition: rege_dfa.c:42
bool enableFsync
Definition: globals.c:119
int errmsg(const char *fmt,...)
Definition: elog.c:821
#define elog(elevel,...)
Definition: elog.h:214
bool canceled
Definition: sync.c:65
int16 handler
Definition: sync.h:52
static const SyncOps syncsw[]
Definition: sync.c:99
static CycleCtr sync_cycle_ctr
Definition: sync.c:78
int(* sync_syncfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:90
#define FILE_POSSIBLY_DELETED(err)
Definition: fd.h:69
uint64 ckpt_longest_sync
Definition: xlog.h:259

◆ RegisterSyncRequest()

bool RegisterSyncRequest ( const FileTag ftag,
SyncRequestType  type,
bool  retryOnError 
)

Definition at line 554 of file sync.c.

References ForwardSyncRequest(), pg_usleep(), and RememberSyncRequest().

Referenced by ForgetDatabaseSyncRequests(), register_dirty_segment(), register_forget_request(), register_unlink_segment(), SlruDeleteSegment(), and SlruPhysicalWritePage().

556 {
557  bool ret;
558 
559  if (pendingOps != NULL)
560  {
561  /* standalone backend or startup process: fsync state is local */
562  RememberSyncRequest(ftag, type);
563  return true;
564  }
565 
566  for (;;)
567  {
568  /*
569  * Notify the checkpointer about it. If we fail to queue a message in
570  * retryOnError mode, we have to sleep and try again ... ugly, but
571  * hopefully won't happen often.
572  *
573  * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
574  * error in the case of SYNC_UNLINK_REQUEST would leave the
575  * no-longer-used file still present on disk, which would be bad, so
576  * I'm inclined to assume that the checkpointer will always empty the
577  * queue soon.
578  */
579  ret = ForwardSyncRequest(ftag, type);
580 
581  /*
582  * If we are successful in queueing the request, or we failed and were
583  * instructed not to retry on error, break.
584  */
585  if (ret || (!ret && !retryOnError))
586  break;
587 
588  pg_usleep(10000L);
589  }
590 
591  return ret;
592 }
void RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
Definition: sync.c:459
void pg_usleep(long microsec)
Definition: signal.c:53
bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
static HTAB * pendingOps
Definition: sync.c:74

◆ RememberSyncRequest()

void RememberSyncRequest ( const FileTag ftag,
SyncRequestType  type 
)

Definition at line 459 of file sync.c.

References Assert, PendingFsyncEntry::canceled, checkpoint_cycle_ctr, PendingFsyncEntry::cycle_ctr, PendingUnlinkEntry::cycle_ctr, foreach_delete_current, FileTag::handler, HASH_ENTER, HASH_FIND, hash_search(), hash_seq_init(), hash_seq_search(), lappend(), lfirst, MemoryContextSwitchTo(), palloc(), pfree(), sync_cycle_ctr, SyncOps::sync_filetagmatches, SYNC_FILTER_REQUEST, SYNC_FORGET_REQUEST, SYNC_REQUEST, SYNC_UNLINK_REQUEST, PendingFsyncEntry::tag, and PendingUnlinkEntry::tag.

Referenced by AbsorbSyncRequests(), and RegisterSyncRequest().

460 {
462 
463  if (type == SYNC_FORGET_REQUEST)
464  {
465  PendingFsyncEntry *entry;
466 
467  /* Cancel previously entered request */
469  (void *) ftag,
470  HASH_FIND,
471  NULL);
472  if (entry != NULL)
473  entry->canceled = true;
474  }
475  else if (type == SYNC_FILTER_REQUEST)
476  {
477  HASH_SEQ_STATUS hstat;
478  PendingFsyncEntry *entry;
479  ListCell *cell;
480 
481  /* Cancel matching fsync requests */
482  hash_seq_init(&hstat, pendingOps);
483  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
484  {
485  if (entry->tag.handler == ftag->handler &&
486  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
487  entry->canceled = true;
488  }
489 
490  /* Remove matching unlink requests */
491  foreach(cell, pendingUnlinks)
492  {
493  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
494 
495  if (entry->tag.handler == ftag->handler &&
496  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
497  {
499  pfree(entry);
500  }
501  }
502  }
503  else if (type == SYNC_UNLINK_REQUEST)
504  {
505  /* Unlink request: put it in the linked list */
507  PendingUnlinkEntry *entry;
508 
509  entry = palloc(sizeof(PendingUnlinkEntry));
510  entry->tag = *ftag;
512 
514 
515  MemoryContextSwitchTo(oldcxt);
516  }
517  else
518  {
519  /* Normal case: enter a request to fsync this segment */
521  PendingFsyncEntry *entry;
522  bool found;
523 
525 
527  (void *) ftag,
528  HASH_ENTER,
529  &found);
530  /* if new entry, or was previously canceled, initialize it */
531  if (!found || entry->canceled)
532  {
533  entry->cycle_ctr = sync_cycle_ctr;
534  entry->canceled = false;
535  }
536 
537  /*
538  * NB: it's intentional that we don't change cycle_ctr if the entry
539  * already exists. The cycle_ctr must represent the oldest fsync
540  * request that could be in the entry.
541  */
542 
543  MemoryContextSwitchTo(oldcxt);
544  }
545 }
CycleCtr cycle_ctr
Definition: sync.c:71
FileTag tag
Definition: sync.c:63
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:919
static CycleCtr checkpoint_cycle_ctr
Definition: sync.c:79
#define foreach_delete_current(lst, cell)
Definition: pg_list.h:357
void pfree(void *pointer)
Definition: mcxt.c:1057
static List * pendingUnlinks
Definition: sync.c:75
FileTag tag
Definition: sync.c:70
List * lappend(List *list, void *datum)
Definition: list.c:321
CycleCtr cycle_ctr
Definition: sync.c:64
bool(* sync_filetagmatches)(const FileTag *ftag, const FileTag *candidate)
Definition: sync.c:92
static HTAB * pendingOps
Definition: sync.c:74
#define Assert(condition)
Definition: c.h:746
#define lfirst(lc)
Definition: pg_list.h:169
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1401
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1391
void * palloc(Size size)
Definition: mcxt.c:950
bool canceled
Definition: sync.c:65
int16 handler
Definition: sync.h:52
static const SyncOps syncsw[]
Definition: sync.c:99
static CycleCtr sync_cycle_ctr
Definition: sync.c:78
static MemoryContext pendingOpsCxt
Definition: sync.c:38

◆ SyncPostCheckpoint()

void SyncPostCheckpoint ( void  )

Definition at line 196 of file sync.c.

References AbsorbSyncRequests(), checkpoint_cycle_ctr, PendingUnlinkEntry::cycle_ctr, ereport, errcode_for_file_access(), errmsg(), FileTag::handler, linitial, list_delete_first(), MAXPGPATH, NIL, pfree(), SyncOps::sync_unlinkfiletag, PendingUnlinkEntry::tag, UNLINKS_PER_ABSORB, and WARNING.

Referenced by CreateCheckPoint().

197 {
198  int absorb_counter;
199 
200  absorb_counter = UNLINKS_PER_ABSORB;
201  while (pendingUnlinks != NIL)
202  {
204  char path[MAXPGPATH];
205 
206  /*
207  * New entries are appended to the end, so if the entry is new we've
208  * reached the end of old entries.
209  *
210  * Note: if just the right number of consecutive checkpoints fail, we
211  * could be fooled here by cycle_ctr wraparound. However, the only
212  * consequence is that we'd delay unlinking for one more checkpoint,
213  * which is perfectly tolerable.
214  */
215  if (entry->cycle_ctr == checkpoint_cycle_ctr)
216  break;
217 
218  /* Unlink the file */
219  if (syncsw[entry->tag.handler].sync_unlinkfiletag(&entry->tag,
220  path) < 0)
221  {
222  /*
223  * There's a race condition, when the database is dropped at the
224  * same time that we process the pending unlink requests. If the
225  * DROP DATABASE deletes the file before we do, we will get ENOENT
226  * here. rmtree() also has to ignore ENOENT errors, to deal with
227  * the possibility that we delete the file first.
228  */
229  if (errno != ENOENT)
232  errmsg("could not remove file \"%s\": %m", path)));
233  }
234 
235  /* And remove the list entry */
237  pfree(entry);
238 
239  /*
240  * As in ProcessSyncRequests, we don't want to stop absorbing fsync
241  * requests for a long time when there are many deletions to be done.
242  * We can safely call AbsorbSyncRequests() at this point in the loop
243  * (note it might try to delete list entries).
244  */
245  if (--absorb_counter <= 0)
246  {
248  absorb_counter = UNLINKS_PER_ABSORB;
249  }
250  }
251 }
#define NIL
Definition: pg_list.h:65
CycleCtr cycle_ctr
Definition: sync.c:71
static CycleCtr checkpoint_cycle_ctr
Definition: sync.c:79
void AbsorbSyncRequests(void)
void pfree(void *pointer)
Definition: mcxt.c:1057
#define linitial(l)
Definition: pg_list.h:174
static List * pendingUnlinks
Definition: sync.c:75
#define MAXPGPATH
int errcode_for_file_access(void)
Definition: elog.c:633
FileTag tag
Definition: sync.c:70
#define WARNING
Definition: elog.h:40
#define UNLINKS_PER_ABSORB
Definition: sync.c:83
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg(const char *fmt,...)
Definition: elog.c:821
int16 handler
Definition: sync.h:52
static const SyncOps syncsw[]
Definition: sync.c:99
int(* sync_unlinkfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:91
List * list_delete_first(List *list)
Definition: list.c:860

◆ SyncPreCheckpoint()

void SyncPreCheckpoint ( void  )

Definition at line 181 of file sync.c.

References checkpoint_cycle_ctr.

Referenced by CreateCheckPoint().

182 {
183  /*
184  * Any unlink requests arriving after this point will be assigned the next
185  * cycle counter, and won't be unlinked until next checkpoint.
186  */
188 }
static CycleCtr checkpoint_cycle_ctr
Definition: sync.c:79

Variable Documentation

◆ checkpoint_cycle_ctr

CycleCtr checkpoint_cycle_ctr = 0
static

Definition at line 79 of file sync.c.

Referenced by RememberSyncRequest(), SyncPostCheckpoint(), and SyncPreCheckpoint().

◆ pendingOps

HTAB* pendingOps = NULL
static

Definition at line 74 of file sync.c.

◆ pendingOpsCxt

static MemoryContext pendingOpsCxt
static

Definition at line 38 of file sync.c.

Referenced by InitSync().

◆ pendingUnlinks

List* pendingUnlinks = NIL
static

Definition at line 75 of file sync.c.

◆ sync_cycle_ctr

CycleCtr sync_cycle_ctr = 0
static

Definition at line 78 of file sync.c.

Referenced by ProcessSyncRequests(), and RememberSyncRequest().

◆ syncsw

const SyncOps syncsw[]
static
Initial value:
= {
.sync_syncfiletag = mdsyncfiletag,
.sync_unlinkfiletag = mdunlinkfiletag,
.sync_filetagmatches = mdfiletagmatches
},
.sync_syncfiletag = clogsyncfiletag
},
.sync_syncfiletag = committssyncfiletag
},
.sync_syncfiletag = multixactoffsetssyncfiletag
},
.sync_syncfiletag = multixactmemberssyncfiletag
}
}
int committssyncfiletag(const FileTag *ftag, char *path)
Definition: commit_ts.c:1074
int multixactoffsetssyncfiletag(const FileTag *ftag, char *path)
Definition: multixact.c:3402
int clogsyncfiletag(const FileTag *ftag, char *path)
Definition: clog.c:1021
int mdunlinkfiletag(const FileTag *ftag, char *path)
Definition: md.c:1353
bool mdfiletagmatches(const FileTag *ftag, const FileTag *candidate)
Definition: md.c:1372
int mdsyncfiletag(const FileTag *ftag, char *path)
Definition: md.c:1306
int multixactmemberssyncfiletag(const FileTag *ftag, char *path)
Definition: multixact.c:3411

Definition at line 99 of file sync.c.