PostgreSQL Source Code  git master
sync.h File Reference
Include dependency graph for sync.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  FileTag
 

Typedefs

typedef enum SyncRequestType SyncRequestType
 
typedef enum SyncRequestHandler SyncRequestHandler
 
typedef struct FileTag FileTag
 

Enumerations

enum  SyncRequestType { SYNC_REQUEST , SYNC_UNLINK_REQUEST , SYNC_FORGET_REQUEST , SYNC_FILTER_REQUEST }
 
enum  SyncRequestHandler {
  SYNC_HANDLER_MD = 0 , SYNC_HANDLER_CLOG , SYNC_HANDLER_COMMIT_TS , SYNC_HANDLER_MULTIXACT_OFFSET ,
  SYNC_HANDLER_MULTIXACT_MEMBER , SYNC_HANDLER_NONE
}
 

Functions

void InitSync (void)
 
void SyncPreCheckpoint (void)
 
void SyncPostCheckpoint (void)
 
void ProcessSyncRequests (void)
 
void RememberSyncRequest (const FileTag *ftag, SyncRequestType type)
 
bool RegisterSyncRequest (const FileTag *ftag, SyncRequestType type, bool retryOnError)
 

Typedef Documentation

◆ FileTag

typedef struct FileTag FileTag

◆ SyncRequestHandler

◆ SyncRequestType

Enumeration Type Documentation

◆ SyncRequestHandler

Enumerator
SYNC_HANDLER_MD 
SYNC_HANDLER_CLOG 
SYNC_HANDLER_COMMIT_TS 
SYNC_HANDLER_MULTIXACT_OFFSET 
SYNC_HANDLER_MULTIXACT_MEMBER 
SYNC_HANDLER_NONE 

Definition at line 35 of file sync.h.

36 {
37  SYNC_HANDLER_MD = 0,
SyncRequestHandler
Definition: sync.h:36
@ SYNC_HANDLER_MD
Definition: sync.h:37
@ SYNC_HANDLER_COMMIT_TS
Definition: sync.h:39
@ SYNC_HANDLER_MULTIXACT_MEMBER
Definition: sync.h:41
@ SYNC_HANDLER_CLOG
Definition: sync.h:38
@ SYNC_HANDLER_NONE
Definition: sync.h:42
@ SYNC_HANDLER_MULTIXACT_OFFSET
Definition: sync.h:40

◆ SyncRequestType

Enumerator
SYNC_REQUEST 
SYNC_UNLINK_REQUEST 
SYNC_FORGET_REQUEST 
SYNC_FILTER_REQUEST 

Definition at line 23 of file sync.h.

24 {
25  SYNC_REQUEST, /* schedule a call of sync function */
26  SYNC_UNLINK_REQUEST, /* schedule a call of unlink function */
27  SYNC_FORGET_REQUEST, /* forget all calls for a tag */
28  SYNC_FILTER_REQUEST /* forget all calls satisfying match fn */
SyncRequestType
Definition: sync.h:24
@ SYNC_FILTER_REQUEST
Definition: sync.h:28
@ SYNC_FORGET_REQUEST
Definition: sync.h:27
@ SYNC_UNLINK_REQUEST
Definition: sync.h:26
@ SYNC_REQUEST
Definition: sync.h:25

Function Documentation

◆ InitSync()

void InitSync ( void  )

Definition at line 129 of file sync.c.

130 {
131  /*
132  * Create pending-operations hashtable if we need it. Currently, we need
133  * it if we are standalone (not under a postmaster) or if we are a
134  * checkpointer auxiliary process.
135  */
137  {
138  HASHCTL hash_ctl;
139 
140  /*
141  * XXX: The checkpointer needs to add entries to the pending ops table
142  * when absorbing fsync requests. That is done within a critical
143  * section, which isn't usually allowed, but we make an exception. It
144  * means that there's a theoretical possibility that you run out of
145  * memory while absorbing fsync requests, which leads to a PANIC.
146  * Fortunately the hash table is small so that's unlikely to happen in
147  * practice.
148  */
150  "Pending ops context",
153 
154  hash_ctl.keysize = sizeof(FileTag);
155  hash_ctl.entrysize = sizeof(PendingFsyncEntry);
156  hash_ctl.hcxt = pendingOpsCxt;
157  pendingOps = hash_create("Pending Ops Table",
158  100L,
159  &hash_ctl,
162  }
163 
164 }
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
bool IsUnderPostmaster
Definition: globals.c:112
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
MemoryContext TopMemoryContext
Definition: mcxt.c:48
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition: mcxt.c:418
#define AllocSetContextCreate
Definition: memutils.h:173
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:195
#define AmCheckpointerProcess()
Definition: miscadmin.h:447
#define NIL
Definition: pg_list.h:65
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
MemoryContext hcxt
Definition: hsearch.h:86
static List * pendingUnlinks
Definition: sync.c:76
static HTAB * pendingOps
Definition: sync.c:75
static MemoryContext pendingOpsCxt
Definition: sync.c:38
struct FileTag FileTag

References ALLOCSET_DEFAULT_SIZES, AllocSetContextCreate, AmCheckpointerProcess, HASHCTL::entrysize, HASH_BLOBS, HASH_CONTEXT, hash_create(), HASH_ELEM, HASHCTL::hcxt, IsUnderPostmaster, HASHCTL::keysize, MemoryContextAllowInCriticalSection(), NIL, pendingOps, pendingOpsCxt, pendingUnlinks, and TopMemoryContext.

Referenced by BaseInit().

◆ ProcessSyncRequests()

void ProcessSyncRequests ( void  )

Definition at line 281 of file sync.c.

282 {
283  static bool sync_in_progress = false;
284 
285  HASH_SEQ_STATUS hstat;
286  PendingFsyncEntry *entry;
287  int absorb_counter;
288 
289  /* Statistics on sync times */
290  int processed = 0;
291  instr_time sync_start,
292  sync_end,
293  sync_diff;
294  uint64 elapsed;
295  uint64 longest = 0;
296  uint64 total_elapsed = 0;
297 
298  /*
299  * This is only called during checkpoints, and checkpoints should only
300  * occur in processes that have created a pendingOps.
301  */
302  if (!pendingOps)
303  elog(ERROR, "cannot sync without a pendingOps table");
304 
305  /*
306  * If we are in the checkpointer, the sync had better include all fsync
307  * requests that were queued by backends up to this point. The tightest
308  * race condition that could occur is that a buffer that must be written
309  * and fsync'd for the checkpoint could have been dumped by a backend just
310  * before it was visited by BufferSync(). We know the backend will have
311  * queued an fsync request before clearing the buffer's dirtybit, so we
312  * are safe as long as we do an Absorb after completing BufferSync().
313  */
315 
316  /*
317  * To avoid excess fsync'ing (in the worst case, maybe a never-terminating
318  * checkpoint), we want to ignore fsync requests that are entered into the
319  * hashtable after this point --- they should be processed next time,
320  * instead. We use sync_cycle_ctr to tell old entries apart from new
321  * ones: new ones will have cycle_ctr equal to the incremented value of
322  * sync_cycle_ctr.
323  *
324  * In normal circumstances, all entries present in the table at this point
325  * will have cycle_ctr exactly equal to the current (about to be old)
326  * value of sync_cycle_ctr. However, if we fail partway through the
327  * fsync'ing loop, then older values of cycle_ctr might remain when we
328  * come back here to try again. Repeated checkpoint failures would
329  * eventually wrap the counter around to the point where an old entry
330  * might appear new, causing us to skip it, possibly allowing a checkpoint
331  * to succeed that should not have. To forestall wraparound, any time the
332  * previous ProcessSyncRequests() failed to complete, run through the
333  * table and forcibly set cycle_ctr = sync_cycle_ctr.
334  *
335  * Think not to merge this loop with the main loop, as the problem is
336  * exactly that that loop may fail before having visited all the entries.
337  * From a performance point of view it doesn't matter anyway, as this path
338  * will never be taken in a system that's functioning normally.
339  */
340  if (sync_in_progress)
341  {
342  /* prior try failed, so update any stale cycle_ctr values */
343  hash_seq_init(&hstat, pendingOps);
344  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
345  {
346  entry->cycle_ctr = sync_cycle_ctr;
347  }
348  }
349 
350  /* Advance counter so that new hashtable entries are distinguishable */
351  sync_cycle_ctr++;
352 
353  /* Set flag to detect failure if we don't reach the end of the loop */
354  sync_in_progress = true;
355 
356  /* Now scan the hashtable for fsync requests to process */
357  absorb_counter = FSYNCS_PER_ABSORB;
358  hash_seq_init(&hstat, pendingOps);
359  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
360  {
361  int failures;
362 
363  /*
364  * If the entry is new then don't process it this time; it is new.
365  * Note "continue" bypasses the hash-remove call at the bottom of the
366  * loop.
367  */
368  if (entry->cycle_ctr == sync_cycle_ctr)
369  continue;
370 
371  /* Else assert we haven't missed it */
372  Assert((CycleCtr) (entry->cycle_ctr + 1) == sync_cycle_ctr);
373 
374  /*
375  * If fsync is off then we don't have to bother opening the file at
376  * all. (We delay checking until this point so that changing fsync on
377  * the fly behaves sensibly.)
378  */
379  if (enableFsync)
380  {
381  /*
382  * If in checkpointer, we want to absorb pending requests every so
383  * often to prevent overflow of the fsync request queue. It is
384  * unspecified whether newly-added entries will be visited by
385  * hash_seq_search, but we don't care since we don't need to
386  * process them anyway.
387  */
388  if (--absorb_counter <= 0)
389  {
391  absorb_counter = FSYNCS_PER_ABSORB;
392  }
393 
394  /*
395  * The fsync table could contain requests to fsync segments that
396  * have been deleted (unlinked) by the time we get to them. Rather
397  * than just hoping an ENOENT (or EACCES on Windows) error can be
398  * ignored, what we do on error is absorb pending requests and
399  * then retry. Since mdunlink() queues a "cancel" message before
400  * actually unlinking, the fsync request is guaranteed to be
401  * marked canceled after the absorb if it really was this case.
402  * DROP DATABASE likewise has to tell us to forget fsync requests
403  * before it starts deletions.
404  */
405  for (failures = 0; !entry->canceled; failures++)
406  {
407  char path[MAXPGPATH];
408 
409  INSTR_TIME_SET_CURRENT(sync_start);
410  if (syncsw[entry->tag.handler].sync_syncfiletag(&entry->tag,
411  path) == 0)
412  {
413  /* Success; update statistics about sync timing */
414  INSTR_TIME_SET_CURRENT(sync_end);
415  sync_diff = sync_end;
416  INSTR_TIME_SUBTRACT(sync_diff, sync_start);
417  elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
418  if (elapsed > longest)
419  longest = elapsed;
420  total_elapsed += elapsed;
421  processed++;
422 
423  if (log_checkpoints)
424  elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f ms",
425  processed,
426  path,
427  (double) elapsed / 1000);
428 
429  break; /* out of retry loop */
430  }
431 
432  /*
433  * It is possible that the relation has been dropped or
434  * truncated since the fsync request was entered. Therefore,
435  * allow ENOENT, but only if we didn't fail already on this
436  * file.
437  */
438  if (!FILE_POSSIBLY_DELETED(errno) || failures > 0)
441  errmsg("could not fsync file \"%s\": %m",
442  path)));
443  else
444  ereport(DEBUG1,
446  errmsg_internal("could not fsync file \"%s\" but retrying: %m",
447  path)));
448 
449  /*
450  * Absorb incoming requests and check to see if a cancel
451  * arrived for this relation fork.
452  */
454  absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
455  } /* end retry loop */
456  }
457 
458  /* We are done with this entry, remove it */
459  if (hash_search(pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL)
460  elog(ERROR, "pendingOps corrupted");
461  } /* end loop over hashtable entries */
462 
463  /* Return sync performance metrics for report at checkpoint end */
464  CheckpointStats.ckpt_sync_rels = processed;
466  CheckpointStats.ckpt_agg_sync_time = total_elapsed;
467 
468  /* Flag successful completion of ProcessSyncRequests */
469  sync_in_progress = false;
470 }
void AbsorbSyncRequests(void)
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
int errcode_for_file_access(void)
Definition: elog.c:721
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define DEBUG1
Definition: elog.h:24
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
int data_sync_elevel(int elevel)
Definition: fd.c:3826
#define FILE_POSSIBLY_DELETED(err)
Definition: fd.h:77
bool enableFsync
Definition: globals.c:122
@ HASH_REMOVE
Definition: hsearch.h:115
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:156
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:170
struct timeval instr_time
Definition: instr_time.h:150
#define INSTR_TIME_GET_MICROSEC(t)
Definition: instr_time.h:205
Assert(fmt[strlen(fmt) - 1] !='\n')
#define MAXPGPATH
static chr * longest(struct vars *v, struct dfa *d, chr *start, chr *stop, int *hitstopp)
Definition: rege_dfa.c:42
uint64 ckpt_agg_sync_time
Definition: xlog.h:230
uint64 ckpt_longest_sync
Definition: xlog.h:229
int ckpt_sync_rels
Definition: xlog.h:228
int16 handler
Definition: sync.h:52
FileTag tag
Definition: sync.c:63
CycleCtr cycle_ctr
Definition: sync.c:64
bool canceled
Definition: sync.c:65
int(* sync_syncfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:91
static const SyncOps syncsw[]
Definition: sync.c:100
static CycleCtr sync_cycle_ctr
Definition: sync.c:79
#define FSYNCS_PER_ABSORB
Definition: sync.c:83
uint16 CycleCtr
Definition: sync.c:59
bool log_checkpoints
Definition: xlog.c:110
CheckpointStatsData CheckpointStats
Definition: xlog.c:192

References AbsorbSyncRequests(), Assert(), PendingFsyncEntry::canceled, CheckpointStats, CheckpointStatsData::ckpt_agg_sync_time, CheckpointStatsData::ckpt_longest_sync, CheckpointStatsData::ckpt_sync_rels, PendingFsyncEntry::cycle_ctr, data_sync_elevel(), DEBUG1, elog, enableFsync, ereport, errcode_for_file_access(), errmsg(), errmsg_internal(), ERROR, FILE_POSSIBLY_DELETED, FSYNCS_PER_ABSORB, FileTag::handler, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), INSTR_TIME_GET_MICROSEC, INSTR_TIME_SET_CURRENT, INSTR_TIME_SUBTRACT, log_checkpoints, longest(), MAXPGPATH, pendingOps, sync_cycle_ctr, SyncOps::sync_syncfiletag, syncsw, and PendingFsyncEntry::tag.

Referenced by CheckPointGuts().

◆ RegisterSyncRequest()

bool RegisterSyncRequest ( const FileTag ftag,
SyncRequestType  type,
bool  retryOnError 
)

Definition at line 575 of file sync.c.

577 {
578  bool ret;
579 
580  if (pendingOps != NULL)
581  {
582  /* standalone backend or startup process: fsync state is local */
583  RememberSyncRequest(ftag, type);
584  return true;
585  }
586 
587  for (;;)
588  {
589  /*
590  * Notify the checkpointer about it. If we fail to queue a message in
591  * retryOnError mode, we have to sleep and try again ... ugly, but
592  * hopefully won't happen often.
593  *
594  * XXX should we CHECK_FOR_INTERRUPTS in this loop? Escaping with an
595  * error in the case of SYNC_UNLINK_REQUEST would leave the
596  * no-longer-used file still present on disk, which would be bad, so
597  * I'm inclined to assume that the checkpointer will always empty the
598  * queue soon.
599  */
600  ret = ForwardSyncRequest(ftag, type);
601 
602  /*
603  * If we are successful in queueing the request, or we failed and were
604  * instructed not to retry on error, break.
605  */
606  if (ret || (!ret && !retryOnError))
607  break;
608 
609  pg_usleep(10000L);
610  }
611 
612  return ret;
613 }
bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
void pg_usleep(long microsec)
Definition: signal.c:53
void RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
Definition: sync.c:482

References ForwardSyncRequest(), pendingOps, pg_usleep(), RememberSyncRequest(), and generate_unaccent_rules::type.

Referenced by ForgetDatabaseSyncRequests(), register_dirty_segment(), register_forget_request(), register_unlink_segment(), SlruInternalDeleteSegment(), and SlruPhysicalWritePage().

◆ RememberSyncRequest()

void RememberSyncRequest ( const FileTag ftag,
SyncRequestType  type 
)

Definition at line 482 of file sync.c.

483 {
485 
486  if (type == SYNC_FORGET_REQUEST)
487  {
488  PendingFsyncEntry *entry;
489 
490  /* Cancel previously entered request */
492  (void *) ftag,
493  HASH_FIND,
494  NULL);
495  if (entry != NULL)
496  entry->canceled = true;
497  }
498  else if (type == SYNC_FILTER_REQUEST)
499  {
500  HASH_SEQ_STATUS hstat;
501  PendingFsyncEntry *entry;
502  ListCell *cell;
503 
504  /* Cancel matching fsync requests */
505  hash_seq_init(&hstat, pendingOps);
506  while ((entry = (PendingFsyncEntry *) hash_seq_search(&hstat)) != NULL)
507  {
508  if (entry->tag.handler == ftag->handler &&
509  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
510  entry->canceled = true;
511  }
512 
513  /* Cancel matching unlink requests */
514  foreach(cell, pendingUnlinks)
515  {
516  PendingUnlinkEntry *entry = (PendingUnlinkEntry *) lfirst(cell);
517 
518  if (entry->tag.handler == ftag->handler &&
519  syncsw[ftag->handler].sync_filetagmatches(ftag, &entry->tag))
520  entry->canceled = true;
521  }
522  }
523  else if (type == SYNC_UNLINK_REQUEST)
524  {
525  /* Unlink request: put it in the linked list */
527  PendingUnlinkEntry *entry;
528 
529  entry = palloc(sizeof(PendingUnlinkEntry));
530  entry->tag = *ftag;
532  entry->canceled = false;
533 
535 
536  MemoryContextSwitchTo(oldcxt);
537  }
538  else
539  {
540  /* Normal case: enter a request to fsync this segment */
542  PendingFsyncEntry *entry;
543  bool found;
544 
546 
548  (void *) ftag,
549  HASH_ENTER,
550  &found);
551  /* if new entry, or was previously canceled, initialize it */
552  if (!found || entry->canceled)
553  {
554  entry->cycle_ctr = sync_cycle_ctr;
555  entry->canceled = false;
556  }
557 
558  /*
559  * NB: it's intentional that we don't change cycle_ctr if the entry
560  * already exists. The cycle_ctr must represent the oldest fsync
561  * request that could be in the entry.
562  */
563 
564  MemoryContextSwitchTo(oldcxt);
565  }
566 }
@ HASH_FIND
Definition: hsearch.h:113
@ HASH_ENTER
Definition: hsearch.h:114
List * lappend(List *list, void *datum)
Definition: list.c:336
void * palloc(Size size)
Definition: mcxt.c:1062
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:109
#define lfirst(lc)
Definition: pg_list.h:169
FileTag tag
Definition: sync.c:70
CycleCtr cycle_ctr
Definition: sync.c:71
bool canceled
Definition: sync.c:72
bool(* sync_filetagmatches)(const FileTag *ftag, const FileTag *candidate)
Definition: sync.c:93
static CycleCtr checkpoint_cycle_ctr
Definition: sync.c:80

References Assert(), PendingFsyncEntry::canceled, PendingUnlinkEntry::canceled, checkpoint_cycle_ctr, PendingFsyncEntry::cycle_ctr, PendingUnlinkEntry::cycle_ctr, FileTag::handler, HASH_ENTER, HASH_FIND, hash_search(), hash_seq_init(), hash_seq_search(), lappend(), lfirst, MemoryContextSwitchTo(), palloc(), pendingOps, pendingOpsCxt, pendingUnlinks, sync_cycle_ctr, SyncOps::sync_filetagmatches, SYNC_FILTER_REQUEST, SYNC_FORGET_REQUEST, SYNC_REQUEST, SYNC_UNLINK_REQUEST, syncsw, PendingFsyncEntry::tag, PendingUnlinkEntry::tag, and generate_unaccent_rules::type.

Referenced by AbsorbSyncRequests(), and RegisterSyncRequest().

◆ SyncPostCheckpoint()

void SyncPostCheckpoint ( void  )

Definition at line 196 of file sync.c.

197 {
198  int absorb_counter;
199  ListCell *lc;
200 
201  absorb_counter = UNLINKS_PER_ABSORB;
202  foreach(lc, pendingUnlinks)
203  {
205  char path[MAXPGPATH];
206 
207  /* Skip over any canceled entries */
208  if (entry->canceled)
209  continue;
210 
211  /*
212  * New entries are appended to the end, so if the entry is new we've
213  * reached the end of old entries.
214  *
215  * Note: if just the right number of consecutive checkpoints fail, we
216  * could be fooled here by cycle_ctr wraparound. However, the only
217  * consequence is that we'd delay unlinking for one more checkpoint,
218  * which is perfectly tolerable.
219  */
220  if (entry->cycle_ctr == checkpoint_cycle_ctr)
221  break;
222 
223  /* Unlink the file */
224  if (syncsw[entry->tag.handler].sync_unlinkfiletag(&entry->tag,
225  path) < 0)
226  {
227  /*
228  * There's a race condition, when the database is dropped at the
229  * same time that we process the pending unlink requests. If the
230  * DROP DATABASE deletes the file before we do, we will get ENOENT
231  * here. rmtree() also has to ignore ENOENT errors, to deal with
232  * the possibility that we delete the file first.
233  */
234  if (errno != ENOENT)
237  errmsg("could not remove file \"%s\": %m", path)));
238  }
239 
240  /* Mark the list entry as canceled, just in case */
241  entry->canceled = true;
242 
243  /*
244  * As in ProcessSyncRequests, we don't want to stop absorbing fsync
245  * requests for a long time when there are many deletions to be done.
246  * We can safely call AbsorbSyncRequests() at this point in the loop.
247  */
248  if (--absorb_counter <= 0)
249  {
251  absorb_counter = UNLINKS_PER_ABSORB;
252  }
253  }
254 
255  /*
256  * If we reached the end of the list, we can just remove the whole list
257  * (remembering to pfree all the PendingUnlinkEntry objects). Otherwise,
258  * we must keep the entries at or after "lc".
259  */
260  if (lc == NULL)
261  {
264  }
265  else
266  {
267  int ntodelete = list_cell_number(pendingUnlinks, lc);
268 
269  for (int i = 0; i < ntodelete; i++)
271 
273  }
274 }
#define WARNING
Definition: elog.h:30
int i
Definition: isn.c:73
List * list_delete_first_n(List *list, int n)
Definition: list.c:942
void list_free_deep(List *list)
Definition: list.c:1519
void pfree(void *pointer)
Definition: mcxt.c:1169
static void * list_nth(const List *list, int n)
Definition: pg_list.h:278
static int list_cell_number(const List *l, const ListCell *c)
Definition: pg_list.h:312
int(* sync_unlinkfiletag)(const FileTag *ftag, char *path)
Definition: sync.c:92
#define UNLINKS_PER_ABSORB
Definition: sync.c:84

References AbsorbSyncRequests(), PendingUnlinkEntry::canceled, checkpoint_cycle_ctr, PendingUnlinkEntry::cycle_ctr, ereport, errcode_for_file_access(), errmsg(), FileTag::handler, i, lfirst, list_cell_number(), list_delete_first_n(), list_free_deep(), list_nth(), MAXPGPATH, NIL, pendingUnlinks, pfree(), SyncOps::sync_unlinkfiletag, syncsw, PendingUnlinkEntry::tag, UNLINKS_PER_ABSORB, and WARNING.

Referenced by CreateCheckPoint().

◆ SyncPreCheckpoint()

void SyncPreCheckpoint ( void  )

Definition at line 181 of file sync.c.

182 {
183  /*
184  * Any unlink requests arriving after this point will be assigned the next
185  * cycle counter, and won't be unlinked until next checkpoint.
186  */
188 }

References checkpoint_cycle_ctr.

Referenced by CreateCheckPoint().