PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
atomics.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.c
4  * Non-Inline parts of the atomics implementation
5  *
6  * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group
7  *
8  *
9  * IDENTIFICATION
10  * src/backend/port/atomics.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "miscadmin.h"
17 #include "port/atomics.h"
18 #include "storage/spin.h"
19 
20 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
21 #ifdef WIN32
22 #error "barriers are required (and provided) on WIN32 platforms"
23 #endif
24 #include <signal.h>
25 #endif
26 
27 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
28 void
30 {
31  /*
32  * NB: we have to be reentrant here, some barriers are placed in signal
33  * handlers.
34  *
35  * We use kill(0) for the fallback barrier as we assume that kernels on
36  * systems old enough to require fallback barrier support will include an
37  * appropriate barrier while checking the existence of the postmaster pid.
38  */
39  (void) kill(PostmasterPid, 0);
40 }
41 #endif
42 
43 #ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
44 void
46 {
47  /* do nothing */
48 }
49 #endif
50 
51 
52 #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
53 
54 void
56 {
57  StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
58  "size mismatch of atomic_flag vs slock_t");
59 
60 #ifndef HAVE_SPINLOCKS
61 
62  /*
63  * NB: If we're using semaphore based TAS emulation, be careful to use a
64  * separate set of semaphores. Otherwise we'd get in trouble if an atomic
65  * var would be manipulated while spinlock is held.
66  */
67  s_init_lock_sema((slock_t *) &ptr->sema, true);
68 #else
69  SpinLockInit((slock_t *) &ptr->sema);
70 #endif
71 }
72 
73 bool
75 {
76  return TAS((slock_t *) &ptr->sema);
77 }
78 
79 void
81 {
82  S_UNLOCK((slock_t *) &ptr->sema);
83 }
84 
85 #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
86 
87 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
88 void
90 {
91  StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
92  "size mismatch of atomic_flag vs slock_t");
93 
94  /*
95  * If we're using semaphore based atomic flags, be careful about nested
96  * usage of atomics while a spinlock is held.
97  */
98 #ifndef HAVE_SPINLOCKS
99  s_init_lock_sema((slock_t *) &ptr->sema, true);
100 #else
101  SpinLockInit((slock_t *) &ptr->sema);
102 #endif
103  ptr->value = val_;
104 }
105 
106 void
108 {
109  /*
110  * One might think that an unlocked write doesn't need to acquire the
111  * spinlock, but one would be wrong. Even an unlocked write has to cause a
112  * concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
113  */
114  SpinLockAcquire((slock_t *) &ptr->sema);
115  ptr->value = val;
116  SpinLockRelease((slock_t *) &ptr->sema);
117 }
118 
119 bool
121  uint32 *expected, uint32 newval)
122 {
123  bool ret;
124 
125  /*
126  * Do atomic op under a spinlock. It might look like we could just skip
127  * the cmpxchg if the lock isn't available, but that'd just emulate a
128  * 'weak' compare and swap. I.e. one that allows spurious failures. Since
129  * several algorithms rely on a strong variant and that is efficiently
130  * implementable on most major architectures let's emulate it here as
131  * well.
132  */
133  SpinLockAcquire((slock_t *) &ptr->sema);
134 
135  /* perform compare/exchange logic */
136  ret = ptr->value == *expected;
137  *expected = ptr->value;
138  if (ret)
139  ptr->value = newval;
140 
141  /* and release lock */
142  SpinLockRelease((slock_t *) &ptr->sema);
143 
144  return ret;
145 }
146 
147 uint32
149 {
150  uint32 oldval;
151 
152  SpinLockAcquire((slock_t *) &ptr->sema);
153  oldval = ptr->value;
154  ptr->value += add_;
155  SpinLockRelease((slock_t *) &ptr->sema);
156  return oldval;
157 }
158 
159 #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
int slock_t
Definition: s_lock.h:888
#define TAS(lock)
Definition: s_lock.h:898
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:107
#define SpinLockInit(lock)
Definition: spin.h:60
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:80
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:120
volatile uint32 value
Definition: fallback.h:100
void pg_spinlock_barrier(void)
Definition: atomics.c:29
signed int int32
Definition: c.h:256
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:148
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:757
#define SpinLockAcquire(lock)
Definition: spin.h:62
unsigned int uint32
Definition: c.h:268
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:89
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:55
void s_init_lock_sema(volatile slock_t *lock, bool nested)
Definition: spin.c:107
pid_t PostmasterPid
Definition: globals.c:86
#define SpinLockRelease(lock)
Definition: spin.h:64
#define newval
#define S_UNLOCK(lock)
Definition: s_lock.h:896
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:74
void pg_extern_compiler_barrier(void)
Definition: atomics.c:45
long val
Definition: informix.c:689