PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
atomics.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * atomics.c
4  * Non-Inline parts of the atomics implementation
5  *
6  * Portions Copyright (c) 2013-2017, PostgreSQL Global Development Group
7  *
8  *
9  * IDENTIFICATION
10  * src/backend/port/atomics.c
11  *
12  *-------------------------------------------------------------------------
13  */
14 #include "postgres.h"
15 
16 #include "miscadmin.h"
17 #include "port/atomics.h"
18 #include "storage/spin.h"
19 
20 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
21 #ifdef WIN32
22 #error "barriers are required (and provided) on WIN32 platforms"
23 #endif
24 #include <sys/types.h>
25 #include <signal.h>
26 #endif
27 
28 #ifdef PG_HAVE_MEMORY_BARRIER_EMULATION
29 void
31 {
32  /*
33  * NB: we have to be reentrant here, some barriers are placed in signal
34  * handlers.
35  *
36  * We use kill(0) for the fallback barrier as we assume that kernels on
37  * systems old enough to require fallback barrier support will include an
38  * appropriate barrier while checking the existence of the postmaster pid.
39  */
40  (void) kill(PostmasterPid, 0);
41 }
42 #endif
43 
44 #ifdef PG_HAVE_COMPILER_BARRIER_EMULATION
45 void
47 {
48  /* do nothing */
49 }
50 #endif
51 
52 
53 #ifdef PG_HAVE_ATOMIC_FLAG_SIMULATION
54 
55 void
57 {
58  StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
59  "size mismatch of atomic_flag vs slock_t");
60 
61 #ifndef HAVE_SPINLOCKS
62 
63  /*
64  * NB: If we're using semaphore based TAS emulation, be careful to use a
65  * separate set of semaphores. Otherwise we'd get in trouble if an atomic
66  * var would be manipulated while spinlock is held.
67  */
68  s_init_lock_sema((slock_t *) &ptr->sema, true);
69 #else
70  SpinLockInit((slock_t *) &ptr->sema);
71 #endif
72 }
73 
74 bool
76 {
77  return TAS((slock_t *) &ptr->sema);
78 }
79 
80 void
82 {
83  S_UNLOCK((slock_t *) &ptr->sema);
84 }
85 
86 #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
87 
88 #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
89 void
91 {
92  StaticAssertStmt(sizeof(ptr->sema) >= sizeof(slock_t),
93  "size mismatch of atomic_flag vs slock_t");
94 
95  /*
96  * If we're using semaphore based atomic flags, be careful about nested
97  * usage of atomics while a spinlock is held.
98  */
99 #ifndef HAVE_SPINLOCKS
100  s_init_lock_sema((slock_t *) &ptr->sema, true);
101 #else
102  SpinLockInit((slock_t *) &ptr->sema);
103 #endif
104  ptr->value = val_;
105 }
106 
107 void
109 {
110  /*
111  * One might think that an unlocked write doesn't need to acquire the
112  * spinlock, but one would be wrong. Even an unlocked write has to cause a
113  * concurrent pg_atomic_compare_exchange_u32() (et al) to fail.
114  */
115  SpinLockAcquire((slock_t *) &ptr->sema);
116  ptr->value = val;
117  SpinLockRelease((slock_t *) &ptr->sema);
118 }
119 
120 bool
122  uint32 *expected, uint32 newval)
123 {
124  bool ret;
125 
126  /*
127  * Do atomic op under a spinlock. It might look like we could just skip
128  * the cmpxchg if the lock isn't available, but that'd just emulate a
129  * 'weak' compare and swap. I.e. one that allows spurious failures. Since
130  * several algorithms rely on a strong variant and that is efficiently
131  * implementable on most major architectures let's emulate it here as
132  * well.
133  */
134  SpinLockAcquire((slock_t *) &ptr->sema);
135 
136  /* perform compare/exchange logic */
137  ret = ptr->value == *expected;
138  *expected = ptr->value;
139  if (ret)
140  ptr->value = newval;
141 
142  /* and release lock */
143  SpinLockRelease((slock_t *) &ptr->sema);
144 
145  return ret;
146 }
147 
148 uint32
150 {
151  uint32 oldval;
152 
153  SpinLockAcquire((slock_t *) &ptr->sema);
154  oldval = ptr->value;
155  ptr->value += add_;
156  SpinLockRelease((slock_t *) &ptr->sema);
157  return oldval;
158 }
159 
160 #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
int slock_t
Definition: s_lock.h:888
#define TAS(lock)
Definition: s_lock.h:898
void pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
Definition: atomics.c:108
#define SpinLockInit(lock)
Definition: spin.h:60
void pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:81
bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval)
Definition: atomics.c:121
volatile uint32 value
Definition: fallback.h:100
void pg_spinlock_barrier(void)
Definition: atomics.c:30
signed int int32
Definition: c.h:253
uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
Definition: atomics.c:149
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:752
#define SpinLockAcquire(lock)
Definition: spin.h:62
unsigned int uint32
Definition: c.h:265
void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
Definition: atomics.c:90
void pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:56
void s_init_lock_sema(volatile slock_t *lock, bool nested)
Definition: spin.c:107
pid_t PostmasterPid
Definition: globals.c:86
#define SpinLockRelease(lock)
Definition: spin.h:64
#define newval
#define S_UNLOCK(lock)
Definition: s_lock.h:896
bool pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
Definition: atomics.c:75
void pg_extern_compiler_barrier(void)
Definition: atomics.c:46
long val
Definition: informix.c:689