PostgreSQL Source Code  git master
timeout.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * timeout.c
4  * Routines to multiplex SIGALRM interrupts for multiple timeout reasons.
5  *
6  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  * src/backend/utils/misc/timeout.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include <sys/time.h>
18 
19 #include "miscadmin.h"
20 #include "storage/proc.h"
21 #include "utils/timeout.h"
22 #include "utils/timestamp.h"
23 
24 
25 /* Data about any one timeout reason */
26 typedef struct timeout_params
27 {
28  TimeoutId index; /* identifier of timeout reason */
29 
30  /* volatile because these may be changed from the signal handler */
31  volatile bool active; /* true if timeout is in active_timeouts[] */
32  volatile bool indicator; /* true if timeout has occurred */
33 
34  /* callback function for timeout, or NULL if timeout not registered */
36 
37  TimestampTz start_time; /* time that timeout was last activated */
38  TimestampTz fin_time; /* time it is, or was last, due to fire */
39  int interval_in_ms; /* time between firings, or 0 if just once */
41 
42 /*
43  * List of possible timeout reasons in the order of enum TimeoutId.
44  */
46 static bool all_timeouts_initialized = false;
47 
48 /*
49  * List of active timeouts ordered by their fin_time and priority.
50  * This list is subject to change by the interrupt handler, so it's volatile.
51  */
52 static volatile int num_active_timeouts = 0;
54 
55 /*
56  * Flag controlling whether the signal handler is allowed to do anything.
57  * This is useful to avoid race conditions with the handler. Note in
58  * particular that this lets us make changes in the data structures without
59  * tediously disabling and re-enabling the timer signal. Most of the time,
60  * no interrupt would happen anyway during such critical sections, but if
61  * one does, this rule ensures it's safe. Leaving the signal enabled across
62  * multiple operations can greatly reduce the number of kernel calls we make,
63  * too. See comments in schedule_alarm() about that.
64  *
65  * We leave this "false" when we're not expecting interrupts, just in case.
66  */
67 static volatile sig_atomic_t alarm_enabled = false;
68 
69 #define disable_alarm() (alarm_enabled = false)
70 #define enable_alarm() (alarm_enabled = true)
71 
72 /*
73  * State recording if and when we next expect the interrupt to fire.
74  * Note that the signal handler will unconditionally reset signal_pending to
75  * false, so that can change asynchronously even when alarm_enabled is false.
76  */
77 static volatile sig_atomic_t signal_pending = false;
78 static TimestampTz signal_due_at = 0; /* valid only when signal_pending */
79 
80 
81 /*****************************************************************************
82  * Internal helper functions
83  *
84  * For all of these, it is caller's responsibility to protect them from
85  * interruption by the signal handler. Generally, call disable_alarm()
86  * first to prevent interruption, then update state, and last call
87  * schedule_alarm(), which will re-enable the signal handler if needed.
88  *****************************************************************************/
89 
90 /*
91  * Find the index of a given timeout reason in the active array.
92  * If it's not there, return -1.
93  */
94 static int
96 {
97  int i;
98 
99  for (i = 0; i < num_active_timeouts; i++)
100  {
101  if (active_timeouts[i]->index == id)
102  return i;
103  }
104 
105  return -1;
106 }
107 
108 /*
109  * Insert specified timeout reason into the list of active timeouts
110  * at the given index.
111  */
112 static void
114 {
115  int i;
116 
118  elog(FATAL, "timeout index %d out of range 0..%d", index,
120 
121  Assert(!all_timeouts[id].active);
122  all_timeouts[id].active = true;
123 
124  for (i = num_active_timeouts - 1; i >= index; i--)
126 
128 
130 }
131 
132 /*
133  * Remove the index'th element from the timeout list.
134  */
135 static void
137 {
138  int i;
139 
141  elog(FATAL, "timeout index %d out of range 0..%d", index,
142  num_active_timeouts - 1);
143 
144  Assert(active_timeouts[index]->active);
145  active_timeouts[index]->active = false;
146 
147  for (i = index + 1; i < num_active_timeouts; i++)
149 
151 }
152 
153 /*
154  * Enable the specified timeout reason
155  */
156 static void
158  int interval_in_ms)
159 {
160  int i;
161 
162  /* Assert request is sane */
164  Assert(all_timeouts[id].timeout_handler != NULL);
165 
166  /*
167  * If this timeout was already active, momentarily disable it. We
168  * interpret the call as a directive to reschedule the timeout.
169  */
170  if (all_timeouts[id].active)
172 
173  /*
174  * Find out the index where to insert the new timeout. We sort by
175  * fin_time, and for equal fin_time by priority.
176  */
177  for (i = 0; i < num_active_timeouts; i++)
178  {
179  timeout_params *old_timeout = active_timeouts[i];
180 
181  if (fin_time < old_timeout->fin_time)
182  break;
183  if (fin_time == old_timeout->fin_time && id < old_timeout->index)
184  break;
185  }
186 
187  /*
188  * Mark the timeout active, and insert it into the active list.
189  */
190  all_timeouts[id].indicator = false;
192  all_timeouts[id].fin_time = fin_time;
193  all_timeouts[id].interval_in_ms = interval_in_ms;
194 
195  insert_timeout(id, i);
196 }
197 
198 /*
199  * Schedule alarm for the next active timeout, if any
200  *
201  * We assume the caller has obtained the current time, or a close-enough
202  * approximation. (It's okay if a tick or two has passed since "now", or
203  * if a little more time elapses before we reach the kernel call; that will
204  * cause us to ask for an interrupt a tick or two later than the nearest
205  * timeout, which is no big deal. Passing a "now" value that's in the future
206  * would be bad though.)
207  */
208 static void
210 {
211  if (num_active_timeouts > 0)
212  {
213  struct itimerval timeval;
214  TimestampTz nearest_timeout;
215  long secs;
216  int usecs;
217 
218  MemSet(&timeval, 0, sizeof(struct itimerval));
219 
220  /*
221  * Get the time remaining till the nearest pending timeout. If it is
222  * negative, assume that we somehow missed an interrupt, and force
223  * signal_pending off. This gives us a chance to recover if the
224  * kernel drops a timeout request for some reason.
225  */
226  nearest_timeout = active_timeouts[0]->fin_time;
227  if (now > nearest_timeout)
228  {
229  signal_pending = false;
230  /* force an interrupt as soon as possible */
231  secs = 0;
232  usecs = 1;
233  }
234  else
235  {
236  TimestampDifference(now, nearest_timeout,
237  &secs, &usecs);
238 
239  /*
240  * It's possible that the difference is less than a microsecond;
241  * ensure we don't cancel, rather than set, the interrupt.
242  */
243  if (secs == 0 && usecs == 0)
244  usecs = 1;
245  }
246 
247  timeval.it_value.tv_sec = secs;
248  timeval.it_value.tv_usec = usecs;
249 
250  /*
251  * We must enable the signal handler before calling setitimer(); if we
252  * did it in the other order, we'd have a race condition wherein the
253  * interrupt could occur before we can set alarm_enabled, so that the
254  * signal handler would fail to do anything.
255  *
256  * Because we didn't bother to disable the timer in disable_alarm(),
257  * it's possible that a previously-set interrupt will fire between
258  * enable_alarm() and setitimer(). This is safe, however. There are
259  * two possible outcomes:
260  *
261  * 1. The signal handler finds nothing to do (because the nearest
262  * timeout event is still in the future). It will re-set the timer
263  * and return. Then we'll overwrite the timer value with a new one.
264  * This will mean that the timer fires a little later than we
265  * intended, but only by the amount of time it takes for the signal
266  * handler to do nothing useful, which shouldn't be much.
267  *
268  * 2. The signal handler executes and removes one or more timeout
269  * events. When it returns, either the queue is now empty or the
270  * frontmost event is later than the one we looked at above. So we'll
271  * overwrite the timer value with one that is too soon (plus or minus
272  * the signal handler's execution time), causing a useless interrupt
273  * to occur. But the handler will then re-set the timer and
274  * everything will still work as expected.
275  *
276  * Since these cases are of very low probability (the window here
277  * being quite narrow), it's not worth adding cycles to the mainline
278  * code to prevent occasional wasted interrupts.
279  */
280  enable_alarm();
281 
282  /*
283  * If there is already an interrupt pending that's at or before the
284  * needed time, we need not do anything more. The signal handler will
285  * do the right thing in the first case, and re-schedule the interrupt
286  * for later in the second case. It might seem that the extra
287  * interrupt is wasted work, but it's not terribly much work, and this
288  * method has very significant advantages in the common use-case where
289  * we repeatedly set a timeout that we don't expect to reach and then
290  * cancel it. Instead of invoking setitimer() every time the timeout
291  * is set or canceled, we perform one interrupt and a re-scheduling
292  * setitimer() call at intervals roughly equal to the timeout delay.
293  * For example, with statement_timeout = 1s and a throughput of
294  * thousands of queries per second, this method requires an interrupt
295  * and setitimer() call roughly once a second, rather than thousands
296  * of setitimer() calls per second.
297  *
298  * Because of the possible passage of time between when we obtained
299  * "now" and when we reach setitimer(), the kernel's opinion of when
300  * to trigger the interrupt is likely to be a bit later than
301  * signal_due_at. That's fine, for the same reasons described above.
302  */
303  if (signal_pending && nearest_timeout >= signal_due_at)
304  return;
305 
306  /*
307  * As with calling enable_alarm(), we must set signal_pending *before*
308  * calling setitimer(); if we did it after, the signal handler could
309  * trigger before we set it, leaving us with a false opinion that a
310  * signal is still coming.
311  *
312  * Other race conditions involved with setting/checking signal_pending
313  * are okay, for the reasons described above. One additional point is
314  * that the signal handler could fire after we set signal_due_at, but
315  * still before the setitimer() call. Then the handler could
316  * overwrite signal_due_at with a value it computes, which will be the
317  * same as or perhaps later than what we just computed. After we
318  * perform setitimer(), the net effect would be that signal_due_at
319  * gives a time later than when the interrupt will really happen;
320  * which is a safe situation.
321  */
322  signal_due_at = nearest_timeout;
323  signal_pending = true;
324 
325  /* Set the alarm timer */
326  if (setitimer(ITIMER_REAL, &timeval, NULL) != 0)
327  {
328  /*
329  * Clearing signal_pending here is a bit pro forma, but not
330  * entirely so, since something in the FATAL exit path could try
331  * to use timeout facilities.
332  */
333  signal_pending = false;
334  elog(FATAL, "could not enable SIGALRM timer: %m");
335  }
336  }
337 }
338 
339 
340 /*****************************************************************************
341  * Signal handler
342  *****************************************************************************/
343 
344 /*
345  * Signal handler for SIGALRM
346  *
347  * Process any active timeout reasons and then reschedule the interrupt
348  * as needed.
349  */
350 static void
352 {
353  int save_errno = errno;
354 
355  /*
356  * Bump the holdoff counter, to make sure nothing we call will process
357  * interrupts directly. No timeout handler should do that, but these
358  * failures are hard to debug, so better be sure.
359  */
360  HOLD_INTERRUPTS();
361 
362  /*
363  * SIGALRM is always cause for waking anything waiting on the process
364  * latch.
365  */
366  SetLatch(MyLatch);
367 
368  /*
369  * Always reset signal_pending, even if !alarm_enabled, since indeed no
370  * signal is now pending.
371  */
372  signal_pending = false;
373 
374  /*
375  * Fire any pending timeouts, but only if we're enabled to do so.
376  */
377  if (alarm_enabled)
378  {
379  /*
380  * Disable alarms, just in case this platform allows signal handlers
381  * to interrupt themselves. schedule_alarm() will re-enable if
382  * appropriate.
383  */
384  disable_alarm();
385 
386  if (num_active_timeouts > 0)
387  {
389 
390  /* While the first pending timeout has been reached ... */
391  while (num_active_timeouts > 0 &&
392  now >= active_timeouts[0]->fin_time)
393  {
394  timeout_params *this_timeout = active_timeouts[0];
395 
396  /* Remove it from the active list */
398 
399  /* Mark it as fired */
400  this_timeout->indicator = true;
401 
402  /* And call its handler function */
403  this_timeout->timeout_handler();
404 
405  /* If it should fire repeatedly, re-enable it. */
406  if (this_timeout->interval_in_ms > 0)
407  {
408  TimestampTz new_fin_time;
409 
410  /*
411  * To guard against drift, schedule the next instance of
412  * the timeout based on the intended firing time rather
413  * than the actual firing time. But if the timeout was so
414  * late that we missed an entire cycle, fall back to
415  * scheduling based on the actual firing time.
416  */
417  new_fin_time =
419  this_timeout->interval_in_ms);
420  if (new_fin_time < now)
421  new_fin_time =
423  this_timeout->interval_in_ms);
424  enable_timeout(this_timeout->index, now, new_fin_time,
425  this_timeout->interval_in_ms);
426  }
427 
428  /*
429  * The handler might not take negligible time (CheckDeadLock
430  * for instance isn't too cheap), so let's update our idea of
431  * "now" after each one.
432  */
434  }
435 
436  /* Done firing timeouts, so reschedule next interrupt if any */
438  }
439  }
440 
442 
443  errno = save_errno;
444 }
445 
446 
447 /*****************************************************************************
448  * Public API
449  *****************************************************************************/
450 
451 /*
452  * Initialize timeout module.
453  *
454  * This must be called in every process that wants to use timeouts.
455  *
456  * If the process was forked from another one that was also using this
457  * module, be sure to call this before re-enabling signals; else handlers
458  * meant to run in the parent process might get invoked in this one.
459  */
460 void
462 {
463  int i;
464 
465  /* Initialize, or re-initialize, all local state */
466  disable_alarm();
467 
469 
470  for (i = 0; i < MAX_TIMEOUTS; i++)
471  {
472  all_timeouts[i].index = i;
473  all_timeouts[i].active = false;
474  all_timeouts[i].indicator = false;
477  all_timeouts[i].fin_time = 0;
479  }
480 
482 
483  /* Now establish the signal handler */
485 }
486 
487 /*
488  * Register a timeout reason
489  *
490  * For predefined timeouts, this just registers the callback function.
491  *
492  * For user-defined timeouts, pass id == USER_TIMEOUT; we then allocate and
493  * return a timeout ID.
494  */
495 TimeoutId
497 {
499 
500  /* There's no need to disable the signal handler here. */
501 
502  if (id >= USER_TIMEOUT)
503  {
504  /* Allocate a user-defined timeout reason */
505  for (id = USER_TIMEOUT; id < MAX_TIMEOUTS; id++)
506  if (all_timeouts[id].timeout_handler == NULL)
507  break;
508  if (id >= MAX_TIMEOUTS)
509  ereport(FATAL,
510  (errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
511  errmsg("cannot add more timeout reasons")));
512  }
513 
514  Assert(all_timeouts[id].timeout_handler == NULL);
515 
516  all_timeouts[id].timeout_handler = handler;
517 
518  return id;
519 }
520 
521 /*
522  * Reschedule any pending SIGALRM interrupt.
523  *
524  * This can be used during error recovery in case query cancel resulted in loss
525  * of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
526  * could do anything). But note it's not necessary if any of the public
527  * enable_ or disable_timeout functions are called in the same area, since
528  * those all do schedule_alarm() internally if needed.
529  */
530 void
532 {
533  /* For flexibility, allow this to be called before we're initialized. */
535  return;
536 
537  /* Disable timeout interrupts for safety. */
538  disable_alarm();
539 
540  /* Reschedule the interrupt, if any timeouts remain active. */
541  if (num_active_timeouts > 0)
543 }
544 
545 /*
546  * Enable the specified timeout to fire after the specified delay.
547  *
548  * Delay is given in milliseconds.
549  */
550 void
552 {
554  TimestampTz fin_time;
555 
556  /* Disable timeout interrupts for safety. */
557  disable_alarm();
558 
559  /* Queue the timeout at the appropriate time. */
561  fin_time = TimestampTzPlusMilliseconds(now, delay_ms);
562  enable_timeout(id, now, fin_time, 0);
563 
564  /* Set the timer interrupt. */
566 }
567 
568 /*
569  * Enable the specified timeout to fire periodically, with the specified
570  * delay as the time between firings.
571  *
572  * Delay is given in milliseconds.
573  */
574 void
575 enable_timeout_every(TimeoutId id, TimestampTz fin_time, int delay_ms)
576 {
578 
579  /* Disable timeout interrupts for safety. */
580  disable_alarm();
581 
582  /* Queue the timeout at the appropriate time. */
584  enable_timeout(id, now, fin_time, delay_ms);
585 
586  /* Set the timer interrupt. */
588 }
589 
590 /*
591  * Enable the specified timeout to fire at the specified time.
592  *
593  * This is provided to support cases where there's a reason to calculate
594  * the timeout by reference to some point other than "now". If there isn't,
595  * use enable_timeout_after(), to avoid calling GetCurrentTimestamp() twice.
596  */
597 void
599 {
601 
602  /* Disable timeout interrupts for safety. */
603  disable_alarm();
604 
605  /* Queue the timeout at the appropriate time. */
607  enable_timeout(id, now, fin_time, 0);
608 
609  /* Set the timer interrupt. */
611 }
612 
613 /*
614  * Enable multiple timeouts at once.
615  *
616  * This works like calling enable_timeout_after() and/or enable_timeout_at()
617  * multiple times. Use this to reduce the number of GetCurrentTimestamp()
618  * and setitimer() calls needed to establish multiple timeouts.
619  */
620 void
621 enable_timeouts(const EnableTimeoutParams *timeouts, int count)
622 {
624  int i;
625 
626  /* Disable timeout interrupts for safety. */
627  disable_alarm();
628 
629  /* Queue the timeout(s) at the appropriate times. */
631 
632  for (i = 0; i < count; i++)
633  {
634  TimeoutId id = timeouts[i].id;
635  TimestampTz fin_time;
636 
637  switch (timeouts[i].type)
638  {
639  case TMPARAM_AFTER:
640  fin_time = TimestampTzPlusMilliseconds(now,
641  timeouts[i].delay_ms);
642  enable_timeout(id, now, fin_time, 0);
643  break;
644 
645  case TMPARAM_AT:
646  enable_timeout(id, now, timeouts[i].fin_time, 0);
647  break;
648 
649  case TMPARAM_EVERY:
650  fin_time = TimestampTzPlusMilliseconds(now,
651  timeouts[i].delay_ms);
652  enable_timeout(id, now, fin_time, timeouts[i].delay_ms);
653  break;
654 
655  default:
656  elog(ERROR, "unrecognized timeout type %d",
657  (int) timeouts[i].type);
658  break;
659  }
660  }
661 
662  /* Set the timer interrupt. */
664 }
665 
666 /*
667  * Cancel the specified timeout.
668  *
669  * The timeout's I've-been-fired indicator is reset,
670  * unless keep_indicator is true.
671  *
672  * When a timeout is canceled, any other active timeout remains in force.
673  * It's not an error to disable a timeout that is not enabled.
674  */
675 void
676 disable_timeout(TimeoutId id, bool keep_indicator)
677 {
678  /* Assert request is sane */
680  Assert(all_timeouts[id].timeout_handler != NULL);
681 
682  /* Disable timeout interrupts for safety. */
683  disable_alarm();
684 
685  /* Find the timeout and remove it from the active list. */
686  if (all_timeouts[id].active)
688 
689  /* Mark it inactive, whether it was active or not. */
690  if (!keep_indicator)
691  all_timeouts[id].indicator = false;
692 
693  /* Reschedule the interrupt, if any timeouts remain active. */
694  if (num_active_timeouts > 0)
696 }
697 
698 /*
699  * Cancel multiple timeouts at once.
700  *
701  * The timeouts' I've-been-fired indicators are reset,
702  * unless timeouts[i].keep_indicator is true.
703  *
704  * This works like calling disable_timeout() multiple times.
705  * Use this to reduce the number of GetCurrentTimestamp()
706  * and setitimer() calls needed to cancel multiple timeouts.
707  */
708 void
709 disable_timeouts(const DisableTimeoutParams *timeouts, int count)
710 {
711  int i;
712 
714 
715  /* Disable timeout interrupts for safety. */
716  disable_alarm();
717 
718  /* Cancel the timeout(s). */
719  for (i = 0; i < count; i++)
720  {
721  TimeoutId id = timeouts[i].id;
722 
723  Assert(all_timeouts[id].timeout_handler != NULL);
724 
725  if (all_timeouts[id].active)
727 
728  if (!timeouts[i].keep_indicator)
729  all_timeouts[id].indicator = false;
730  }
731 
732  /* Reschedule the interrupt, if any timeouts remain active. */
733  if (num_active_timeouts > 0)
735 }
736 
737 /*
738  * Disable the signal handler, remove all timeouts from the active list,
739  * and optionally reset their timeout indicators.
740  */
741 void
742 disable_all_timeouts(bool keep_indicators)
743 {
744  int i;
745 
746  disable_alarm();
747 
748  /*
749  * We used to disable the timer interrupt here, but in common usage
750  * patterns it's cheaper to leave it enabled; that may save us from having
751  * to enable it again shortly. See comments in schedule_alarm().
752  */
753 
755 
756  for (i = 0; i < MAX_TIMEOUTS; i++)
757  {
758  all_timeouts[i].active = false;
759  if (!keep_indicators)
760  all_timeouts[i].indicator = false;
761  }
762 }
763 
764 /*
765  * Return true if the timeout is active (enabled and not yet fired)
766  *
767  * This is, of course, subject to race conditions, as the timeout could fire
768  * immediately after we look.
769  */
770 bool
772 {
773  return all_timeouts[id].active;
774 }
775 
776 /*
777  * Return the timeout's I've-been-fired indicator
778  *
779  * If reset_indicator is true, reset the indicator when returning true.
780  * To avoid missing timeouts due to race conditions, we are careful not to
781  * reset the indicator when returning false.
782  */
783 bool
784 get_timeout_indicator(TimeoutId id, bool reset_indicator)
785 {
786  if (all_timeouts[id].indicator)
787  {
788  if (reset_indicator)
789  all_timeouts[id].indicator = false;
790  return true;
791  }
792  return false;
793 }
794 
795 /*
796  * Return the time when the timeout was most recently activated
797  *
798  * Note: will return 0 if timeout has never been activated in this process.
799  * However, we do *not* reset the start_time when a timeout occurs, so as
800  * not to create a race condition if SIGALRM fires just as some code is
801  * about to fetch the value.
802  */
805 {
806  return all_timeouts[id].start_time;
807 }
808 
809 /*
810  * Return the time when the timeout is, or most recently was, due to fire
811  *
812  * Note: will return 0 if timeout has never been activated in this process.
813  * However, we do *not* reset the fin_time when a timeout occurs, so as
814  * not to create a race condition if SIGALRM fires just as some code is
815  * about to fetch the value.
816  */
819 {
820  return all_timeouts[id].fin_time;
821 }
void TimestampDifference(TimestampTz start_time, TimestampTz stop_time, long *secs, int *microsecs)
Definition: timestamp.c:1656
TimestampTz GetCurrentTimestamp(void)
Definition: timestamp.c:1580
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1544
#define SIGNAL_ARGS
Definition: c.h:1342
#define MemSet(start, val, len)
Definition: c.h:1008
int64 TimestampTz
Definition: timestamp.h:39
int errcode(int sqlerrcode)
Definition: elog.c:698
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define FATAL
Definition: elog.h:35
#define ERROR
Definition: elog.h:33
#define elog(elevel,...)
Definition: elog.h:218
#define ereport(elevel,...)
Definition: elog.h:143
struct Latch * MyLatch
Definition: globals.c:57
int i
Definition: isn.c:73
void SetLatch(Latch *latch)
Definition: latch.c:567
Assert(fmt[strlen(fmt) - 1] !='\n')
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:133
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:131
pqsigfunc pqsignal(int signum, pqsigfunc handler)
Definition: signal.c:170
TimeoutId id
Definition: timeout.h:69
TimeoutId id
Definition: timeout.h:58
Definition: type.h:90
struct timeval it_value
Definition: win32_port.h:199
int interval_in_ms
Definition: timeout.c:39
volatile bool active
Definition: timeout.c:31
timeout_handler_proc timeout_handler
Definition: timeout.c:35
TimeoutId index
Definition: timeout.c:28
TimestampTz start_time
Definition: timeout.c:37
TimestampTz fin_time
Definition: timeout.c:38
volatile bool indicator
Definition: timeout.c:32
static int find_active_timeout(TimeoutId id)
Definition: timeout.c:95
void enable_timeout_after(TimeoutId id, int delay_ms)
Definition: timeout.c:551
void reschedule_timeouts(void)
Definition: timeout.c:531
bool get_timeout_active(TimeoutId id)
Definition: timeout.c:771
void disable_all_timeouts(bool keep_indicators)
Definition: timeout.c:742
#define enable_alarm()
Definition: timeout.c:70
#define disable_alarm()
Definition: timeout.c:69
static timeout_params *volatile active_timeouts[MAX_TIMEOUTS]
Definition: timeout.c:53
static bool all_timeouts_initialized
Definition: timeout.c:46
TimestampTz get_timeout_finish_time(TimeoutId id)
Definition: timeout.c:818
void InitializeTimeouts(void)
Definition: timeout.c:461
TimestampTz get_timeout_start_time(TimeoutId id)
Definition: timeout.c:804
void enable_timeout_every(TimeoutId id, TimestampTz fin_time, int delay_ms)
Definition: timeout.c:575
static void handle_sig_alarm(SIGNAL_ARGS)
Definition: timeout.c:351
static TimestampTz signal_due_at
Definition: timeout.c:78
static timeout_params all_timeouts[MAX_TIMEOUTS]
Definition: timeout.c:45
static volatile sig_atomic_t alarm_enabled
Definition: timeout.c:67
static void insert_timeout(TimeoutId id, int index)
Definition: timeout.c:113
static volatile sig_atomic_t signal_pending
Definition: timeout.c:77
struct timeout_params timeout_params
void enable_timeout_at(TimeoutId id, TimestampTz fin_time)
Definition: timeout.c:598
static void enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time, int interval_in_ms)
Definition: timeout.c:157
void disable_timeout(TimeoutId id, bool keep_indicator)
Definition: timeout.c:676
static volatile int num_active_timeouts
Definition: timeout.c:52
void enable_timeouts(const EnableTimeoutParams *timeouts, int count)
Definition: timeout.c:621
TimeoutId RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
Definition: timeout.c:496
static void schedule_alarm(TimestampTz now)
Definition: timeout.c:209
void disable_timeouts(const DisableTimeoutParams *timeouts, int count)
Definition: timeout.c:709
static void remove_timeout_index(int index)
Definition: timeout.c:136
bool get_timeout_indicator(TimeoutId id, bool reset_indicator)
Definition: timeout.c:784
void(* timeout_handler_proc)(void)
Definition: timeout.h:44
TimeoutId
Definition: timeout.h:24
@ MAX_TIMEOUTS
Definition: timeout.h:40
@ USER_TIMEOUT
Definition: timeout.h:38
@ TMPARAM_AT
Definition: timeout.h:52
@ TMPARAM_EVERY
Definition: timeout.h:53
@ TMPARAM_AFTER
Definition: timeout.h:51
int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue)
Definition: timer.c:86
#define TimestampTzPlusMilliseconds(tz, ms)
Definition: timestamp.h:56
#define SIGALRM
Definition: win32_port.h:173
#define ITIMER_REAL
Definition: win32_port.h:195