PostgreSQL Source Code  git master
atomics.c File Reference
#include "postgres.h"
#include "miscadmin.h"
#include "port/atomics.h"
#include "storage/spin.h"
#include <signal.h>
Include dependency graph for atomics.c:

Go to the source code of this file.

Functions

void pg_spinlock_barrier (void)
 
void pg_extern_compiler_barrier (void)
 
void pg_atomic_init_flag_impl (volatile pg_atomic_flag *ptr)
 
bool pg_atomic_test_set_flag_impl (volatile pg_atomic_flag *ptr)
 
void pg_atomic_clear_flag_impl (volatile pg_atomic_flag *ptr)
 
bool pg_atomic_unlocked_test_flag_impl (volatile pg_atomic_flag *ptr)
 
void pg_atomic_init_u32_impl (volatile pg_atomic_uint32 *ptr, uint32 val_)
 
void pg_atomic_write_u32_impl (volatile pg_atomic_uint32 *ptr, uint32 val)
 
bool pg_atomic_compare_exchange_u32_impl (volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
 
uint32 pg_atomic_fetch_add_u32_impl (volatile pg_atomic_uint32 *ptr, int32 add_)
 
void pg_atomic_init_u64_impl (volatile pg_atomic_uint64 *ptr, uint64 val_)
 
bool pg_atomic_compare_exchange_u64_impl (volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval)
 
uint64 pg_atomic_fetch_add_u64_impl (volatile pg_atomic_uint64 *ptr, int64 add_)
 

Function Documentation

◆ pg_atomic_clear_flag_impl()

void pg_atomic_clear_flag_impl ( volatile pg_atomic_flag ptr)

Definition at line 89 of file atomics.c.

90 {
91  SpinLockAcquire((slock_t *) &ptr->sema);
92  ptr->value = false;
93  SpinLockRelease((slock_t *) &ptr->sema);
94 }
int slock_t
Definition: s_lock.h:735
#define SpinLockRelease(lock)
Definition: spin.h:64
#define SpinLockAcquire(lock)
Definition: spin.h:62

References pg_atomic_flag::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_flag::value.

Referenced by pg_atomic_clear_flag().

◆ pg_atomic_compare_exchange_u32_impl()

bool pg_atomic_compare_exchange_u32_impl ( volatile pg_atomic_uint32 ptr,
uint32 expected,
uint32  newval 
)

Definition at line 137 of file atomics.c.

139 {
140  bool ret;
141 
142  /*
143  * Do atomic op under a spinlock. It might look like we could just skip
144  * the cmpxchg if the lock isn't available, but that'd just emulate a
145  * 'weak' compare and swap. I.e. one that allows spurious failures. Since
146  * several algorithms rely on a strong variant and that is efficiently
147  * implementable on most major architectures let's emulate it here as
148  * well.
149  */
150  SpinLockAcquire((slock_t *) &ptr->sema);
151 
152  /* perform compare/exchange logic */
153  ret = ptr->value == *expected;
154  *expected = ptr->value;
155  if (ret)
156  ptr->value = newval;
157 
158  /* and release lock */
159  SpinLockRelease((slock_t *) &ptr->sema);
160 
161  return ret;
162 }
#define newval

References newval, pg_atomic_uint32::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_uint32::value.

Referenced by pg_atomic_compare_exchange_u32().

◆ pg_atomic_compare_exchange_u64_impl()

bool pg_atomic_compare_exchange_u64_impl ( volatile pg_atomic_uint64 ptr,
uint64 *  expected,
uint64  newval 
)

Definition at line 200 of file atomics.c.

202 {
203  bool ret;
204 
205  /*
206  * Do atomic op under a spinlock. It might look like we could just skip
207  * the cmpxchg if the lock isn't available, but that'd just emulate a
208  * 'weak' compare and swap. I.e. one that allows spurious failures. Since
209  * several algorithms rely on a strong variant and that is efficiently
210  * implementable on most major architectures let's emulate it here as
211  * well.
212  */
213  SpinLockAcquire((slock_t *) &ptr->sema);
214 
215  /* perform compare/exchange logic */
216  ret = ptr->value == *expected;
217  *expected = ptr->value;
218  if (ret)
219  ptr->value = newval;
220 
221  /* and release lock */
222  SpinLockRelease((slock_t *) &ptr->sema);
223 
224  return ret;
225 }

References newval, pg_atomic_uint64::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_uint64::value.

Referenced by pg_atomic_compare_exchange_u64(), pg_atomic_monotonic_advance_u64(), and pg_atomic_read_u64_impl().

◆ pg_atomic_fetch_add_u32_impl()

uint32 pg_atomic_fetch_add_u32_impl ( volatile pg_atomic_uint32 ptr,
int32  add_ 
)

Definition at line 165 of file atomics.c.

166 {
167  uint32 oldval;
168 
169  SpinLockAcquire((slock_t *) &ptr->sema);
170  oldval = ptr->value;
171  ptr->value += add_;
172  SpinLockRelease((slock_t *) &ptr->sema);
173  return oldval;
174 }
unsigned int uint32
Definition: c.h:506

References pg_atomic_uint32::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_uint32::value.

Referenced by pg_atomic_fetch_add_u32().

◆ pg_atomic_fetch_add_u64_impl()

uint64 pg_atomic_fetch_add_u64_impl ( volatile pg_atomic_uint64 ptr,
int64  add_ 
)

Definition at line 228 of file atomics.c.

229 {
230  uint64 oldval;
231 
232  SpinLockAcquire((slock_t *) &ptr->sema);
233  oldval = ptr->value;
234  ptr->value += add_;
235  SpinLockRelease((slock_t *) &ptr->sema);
236  return oldval;
237 }

References pg_atomic_uint64::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_uint64::value.

Referenced by pg_atomic_fetch_add_u64().

◆ pg_atomic_init_flag_impl()

void pg_atomic_init_flag_impl ( volatile pg_atomic_flag ptr)

Definition at line 55 of file atomics.c.

56 {
57  StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
58  "size mismatch of atomic_flag vs slock_t");
59 
60 #ifndef HAVE_SPINLOCKS
61 
62  /*
63  * NB: If we're using semaphore based TAS emulation, be careful to use a
64  * separate set of semaphores. Otherwise we'd get in trouble if an atomic
65  * var would be manipulated while spinlock is held.
66  */
67  s_init_lock_sema((slock_t *) &ptr->sema, true);
68 #else
69  SpinLockInit((slock_t *) &ptr->sema);
70 #endif
71 
72  ptr->value = false;
73 }
#define StaticAssertDecl(condition, errmessage)
Definition: c.h:936
void s_init_lock_sema(volatile slock_t *lock, bool nested)
Definition: spin.c:121
#define SpinLockInit(lock)
Definition: spin.h:60

References s_init_lock_sema(), pg_atomic_flag::sema, SpinLockInit, StaticAssertDecl, and pg_atomic_flag::value.

Referenced by pg_atomic_init_flag().

◆ pg_atomic_init_u32_impl()

void pg_atomic_init_u32_impl ( volatile pg_atomic_uint32 ptr,
uint32  val_ 
)

Definition at line 106 of file atomics.c.

107 {
108  StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
109  "size mismatch of atomic_uint32 vs slock_t");
110 
111  /*
112  * If we're using semaphore based atomic flags, be careful about nested
113  * usage of atomics while a spinlock is held.
114  */
115 #ifndef HAVE_SPINLOCKS
116  s_init_lock_sema((slock_t *) &ptr->sema, true);
117 #else
118  SpinLockInit((slock_t *) &ptr->sema);
119 #endif
120  ptr->value = val_;
121 }

References s_init_lock_sema(), pg_atomic_uint32::sema, SpinLockInit, StaticAssertDecl, and pg_atomic_uint32::value.

Referenced by pg_atomic_init_u32().

◆ pg_atomic_init_u64_impl()

void pg_atomic_init_u64_impl ( volatile pg_atomic_uint64 ptr,
uint64  val_ 
)

Definition at line 182 of file atomics.c.

183 {
184  StaticAssertDecl(sizeof(ptr->sema) >= sizeof(slock_t),
185  "size mismatch of atomic_uint64 vs slock_t");
186 
187  /*
188  * If we're using semaphore based atomic flags, be careful about nested
189  * usage of atomics while a spinlock is held.
190  */
191 #ifndef HAVE_SPINLOCKS
192  s_init_lock_sema((slock_t *) &ptr->sema, true);
193 #else
194  SpinLockInit((slock_t *) &ptr->sema);
195 #endif
196  ptr->value = val_;
197 }

References s_init_lock_sema(), pg_atomic_uint64::sema, SpinLockInit, StaticAssertDecl, and pg_atomic_uint64::value.

Referenced by pg_atomic_init_u64().

◆ pg_atomic_test_set_flag_impl()

bool pg_atomic_test_set_flag_impl ( volatile pg_atomic_flag ptr)

Definition at line 76 of file atomics.c.

77 {
78  uint32 oldval;
79 
80  SpinLockAcquire((slock_t *) &ptr->sema);
81  oldval = ptr->value;
82  ptr->value = true;
83  SpinLockRelease((slock_t *) &ptr->sema);
84 
85  return oldval == 0;
86 }

References pg_atomic_flag::sema, SpinLockAcquire, SpinLockRelease, and pg_atomic_flag::value.

Referenced by pg_atomic_test_set_flag().

◆ pg_atomic_unlocked_test_flag_impl()

bool pg_atomic_unlocked_test_flag_impl ( volatile pg_atomic_flag ptr)

Definition at line 97 of file atomics.c.

98 {
99  return ptr->value == 0;
100 }

References pg_atomic_flag::value.

Referenced by pg_atomic_unlocked_test_flag().

◆ pg_atomic_write_u32_impl()

void pg_atomic_write_u32_impl ( volatile pg_atomic_uint32 ptr,
uint32  val 
)

Definition at line 124 of file atomics.c.

125 {
126  /*
127  * One might think that an unlocked write doesn't need to acquire the
128  * spinlock, but one would be wrong. Even an unlocked write has to cause a
129  * concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
130  */
131  SpinLockAcquire((slock_t *) &ptr->sema);
132  ptr->value = val;
133  SpinLockRelease((slock_t *) &ptr->sema);
134 }
long val
Definition: informix.c:670

References pg_atomic_uint32::sema, SpinLockAcquire, SpinLockRelease, val, and pg_atomic_uint32::value.

Referenced by pg_atomic_write_u32().

◆ pg_extern_compiler_barrier()

void pg_extern_compiler_barrier ( void  )

Definition at line 45 of file atomics.c.

46 {
47  /* do nothing */
48 }

◆ pg_spinlock_barrier()

void pg_spinlock_barrier ( void  )

Definition at line 29 of file atomics.c.

30 {
31  /*
32  * NB: we have to be reentrant here, some barriers are placed in signal
33  * handlers.
34  *
35  * We use kill(0) for the fallback barrier as we assume that kernels on
36  * systems old enough to require fallback barrier support will include an
37  * appropriate barrier while checking the existence of the postmaster pid.
38  */
39  (void) kill(PostmasterPid, 0);
40 }
pid_t PostmasterPid
Definition: globals.c:103
#define kill(pid, sig)
Definition: win32_port.h:485

References kill, and PostmasterPid.