PostgreSQL Source Code  git master
s_lock.h
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * s_lock.h
4  * Hardware-dependent implementation of spinlocks.
5  *
6  * NOTE: none of the macros in this file are intended to be called directly.
7  * Call them through the hardware-independent macros in spin.h.
8  *
9  * The following hardware-dependent macros must be provided for each
10  * supported platform:
11  *
12  * void S_INIT_LOCK(slock_t *lock)
13  * Initialize a spinlock (to the unlocked state).
14  *
15  * int S_LOCK(slock_t *lock)
16  * Acquire a spinlock, waiting if necessary.
17  * Time out and abort() if unable to acquire the lock in a
18  * "reasonable" amount of time --- typically ~ 1 minute.
19  * Should return number of "delays"; see s_lock.c
20  *
21  * void S_UNLOCK(slock_t *lock)
22  * Unlock a previously acquired lock.
23  *
24  * bool S_LOCK_FREE(slock_t *lock)
25  * Tests if the lock is free. Returns true if free, false if locked.
26  * This does *not* change the state of the lock.
27  *
28  * void SPIN_DELAY(void)
29  * Delay operation to occur inside spinlock wait loop.
30  *
31  * Note to implementors: there are default implementations for all these
32  * macros at the bottom of the file. Check if your platform can use
33  * these or needs to override them.
34  *
35  * Usually, S_LOCK() is implemented in terms of even lower-level macros
36  * TAS() and TAS_SPIN():
37  *
38  * int TAS(slock_t *lock)
39  * Atomic test-and-set instruction. Attempt to acquire the lock,
40  * but do *not* wait. Returns 0 if successful, nonzero if unable
41  * to acquire the lock.
42  *
43  * int TAS_SPIN(slock_t *lock)
44  * Like TAS(), but this version is used when waiting for a lock
45  * previously found to be contended. By default, this is the
46  * same as TAS(), but on some architectures it's better to poll a
47  * contended lock using an unlocked instruction and retry the
48  * atomic test-and-set only when it appears free.
49  *
50  * TAS() and TAS_SPIN() are NOT part of the API, and should never be called
51  * directly.
52  *
53  * CAUTION: on some platforms TAS() and/or TAS_SPIN() may sometimes report
54  * failure to acquire a lock even when the lock is not locked. For example,
55  * on Alpha TAS() will "fail" if interrupted. Therefore a retry loop must
56  * always be used, even if you are certain the lock is free.
57  *
58  * It is the responsibility of these macros to make sure that the compiler
59  * does not re-order accesses to shared memory to precede the actual lock
60  * acquisition, or follow the lock release. Prior to PostgreSQL 9.5, this
61  * was the caller's responsibility, which meant that callers had to use
62  * volatile-qualified pointers to refer to both the spinlock itself and the
63  * shared data being accessed within the spinlocked critical section. This
64  * was notationally awkward, easy to forget (and thus error-prone), and
65  * prevented some useful compiler optimizations. For these reasons, we
66  * now require that the macros themselves prevent compiler re-ordering,
67  * so that the caller doesn't need to take special precautions.
68  *
69  * On platforms with weak memory ordering, the TAS(), TAS_SPIN(), and
70  * S_UNLOCK() macros must further include hardware-level memory fence
71  * instructions to prevent similar re-ordering at the hardware level.
72  * TAS() and TAS_SPIN() must guarantee that loads and stores issued after
73  * the macro are not executed until the lock has been obtained. Conversely,
74  * S_UNLOCK() must guarantee that loads and stores issued before the macro
75  * have been executed before the lock is released.
76  *
77  * On most supported platforms, TAS() uses a tas() function written
78  * in assembly language to execute a hardware atomic-test-and-set
79  * instruction. Equivalent OS-supplied mutex routines could be used too.
80  *
81  * If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
82  * defined), then we fall back on an emulation that uses SysV semaphores
83  * (see spin.c). This emulation will be MUCH MUCH slower than a proper TAS()
84  * implementation, because of the cost of a kernel call per lock or unlock.
85  * An old report is that Postgres spends around 40% of its time in semop(2)
86  * when using the SysV semaphore code.
87  *
88  *
89  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
90  * Portions Copyright (c) 1994, Regents of the University of California
91  *
92  * src/include/storage/s_lock.h
93  *
94  *-------------------------------------------------------------------------
95  */
96 #ifndef S_LOCK_H
97 #define S_LOCK_H
98 
99 #ifdef FRONTEND
100 #error "s_lock.h may not be included from frontend code"
101 #endif
102 
103 #ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
104 
105 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
106 /*************************************************************************
107  * All the gcc inlines
108  * Gcc consistently defines the CPU as __cpu__.
109  * Other compilers use __cpu or __cpu__ so we test for both in those cases.
110  */
111 
112 /*----------
113  * Standard gcc asm format (assuming "volatile slock_t *lock"):
114 
115  __asm__ __volatile__(
116  " instruction \n"
117  " instruction \n"
118  " instruction \n"
119 : "=r"(_res), "+m"(*lock) // return register, in/out lock value
120 : "r"(lock) // lock pointer, in input register
121 : "memory", "cc"); // show clobbered registers here
122 
123  * The output-operands list (after first colon) should always include
124  * "+m"(*lock), whether or not the asm code actually refers to this
125  * operand directly. This ensures that gcc believes the value in the
126  * lock variable is used and set by the asm code. Also, the clobbers
127  * list (after third colon) should always include "memory"; this prevents
128  * gcc from thinking it can cache the values of shared-memory fields
129  * across the asm code. Add "cc" if your asm code changes the condition
130  * code register, and also list any temp registers the code uses.
131  *----------
132  */
133 
134 
135 #ifdef __i386__ /* 32-bit i386 */
136 #define HAS_TEST_AND_SET
137 
138 typedef unsigned char slock_t;
139 
140 #define TAS(lock) tas(lock)
141 
142 static __inline__ int
143 tas(volatile slock_t *lock)
144 {
145  register slock_t _res = 1;
146 
147  /*
148  * Use a non-locking test before asserting the bus lock. Note that the
149  * extra test appears to be a small loss on some x86 platforms and a small
150  * win on others; it's by no means clear that we should keep it.
151  *
152  * When this was last tested, we didn't have separate TAS() and TAS_SPIN()
153  * macros. Nowadays it probably would be better to do a non-locking test
154  * in TAS_SPIN() but not in TAS(), like on x86_64, but no-one's done the
155  * testing to verify that. Without some empirical evidence, better to
156  * leave it alone.
157  */
158  __asm__ __volatile__(
159  " cmpb $0,%1 \n"
160  " jne 1f \n"
161  " lock \n"
162  " xchgb %0,%1 \n"
163  "1: \n"
164 : "+q"(_res), "+m"(*lock)
165 : /* no inputs */
166 : "memory", "cc");
167  return (int) _res;
168 }
169 
170 #define SPIN_DELAY() spin_delay()
171 
172 static __inline__ void
173 spin_delay(void)
174 {
175  /*
176  * This sequence is equivalent to the PAUSE instruction ("rep" is
177  * ignored by old IA32 processors if the following instruction is
178  * not a string operation); the IA-32 Architecture Software
179  * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
180  * PAUSE in the inner loop of a spin lock is necessary for good
181  * performance:
182  *
183  * The PAUSE instruction improves the performance of IA-32
184  * processors supporting Hyper-Threading Technology when
185  * executing spin-wait loops and other routines where one
186  * thread is accessing a shared lock or semaphore in a tight
187  * polling loop. When executing a spin-wait loop, the
188  * processor can suffer a severe performance penalty when
189  * exiting the loop because it detects a possible memory order
190  * violation and flushes the core processor's pipeline. The
191  * PAUSE instruction provides a hint to the processor that the
192  * code sequence is a spin-wait loop. The processor uses this
193  * hint to avoid the memory order violation and prevent the
194  * pipeline flush. In addition, the PAUSE instruction
195  * de-pipelines the spin-wait loop to prevent it from
196  * consuming execution resources excessively.
197  */
198  __asm__ __volatile__(
199  " rep; nop \n");
200 }
201 
202 #endif /* __i386__ */
203 
204 
205 #ifdef __x86_64__ /* AMD Opteron, Intel EM64T */
206 #define HAS_TEST_AND_SET
207 
208 typedef unsigned char slock_t;
209 
210 #define TAS(lock) tas(lock)
211 
212 /*
213  * On Intel EM64T, it's a win to use a non-locking test before the xchg proper,
214  * but only when spinning.
215  *
216  * See also Implementing Scalable Atomic Locks for Multi-Core Intel(tm) EM64T
217  * and IA32, by Michael Chynoweth and Mary R. Lee. As of this writing, it is
218  * available at:
219  * http://software.intel.com/en-us/articles/implementing-scalable-atomic-locks-for-multi-core-intel-em64t-and-ia32-architectures
220  */
221 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
222 
223 static __inline__ int
224 tas(volatile slock_t *lock)
225 {
226  register slock_t _res = 1;
227 
228  __asm__ __volatile__(
229  " lock \n"
230  " xchgb %0,%1 \n"
231 : "+q"(_res), "+m"(*lock)
232 : /* no inputs */
233 : "memory", "cc");
234  return (int) _res;
235 }
236 
237 #define SPIN_DELAY() spin_delay()
238 
239 static __inline__ void
240 spin_delay(void)
241 {
242  /*
243  * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
244  * Opteron, but it may be of some use on EM64T, so we keep it.
245  */
246  __asm__ __volatile__(
247  " rep; nop \n");
248 }
249 
250 #endif /* __x86_64__ */
251 
252 
253 #if defined(__ia64__) || defined(__ia64)
254 /*
255  * Intel Itanium, gcc or Intel's compiler.
256  *
257  * Itanium has weak memory ordering, but we rely on the compiler to enforce
258  * strict ordering of accesses to volatile data. In particular, while the
259  * xchg instruction implicitly acts as a memory barrier with 'acquire'
260  * semantics, we do not have an explicit memory fence instruction in the
261  * S_UNLOCK macro. We use a regular assignment to clear the spinlock, and
262  * trust that the compiler marks the generated store instruction with the
263  * ".rel" opcode.
264  *
265  * Testing shows that assumption to hold on gcc, although I could not find
266  * any explicit statement on that in the gcc manual. In Intel's compiler,
267  * the -m[no-]serialize-volatile option controls that, and testing shows that
268  * it is enabled by default.
269  *
270  * While icc accepts gcc asm blocks on x86[_64], this is not true on ia64
271  * (at least not in icc versions before 12.x). So we have to carry a separate
272  * compiler-intrinsic-based implementation for it.
273  */
274 #define HAS_TEST_AND_SET
275 
276 typedef unsigned int slock_t;
277 
278 #define TAS(lock) tas(lock)
279 
280 /* On IA64, it's a win to use a non-locking test before the xchg proper */
281 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
282 
283 #ifndef __INTEL_COMPILER
284 
285 static __inline__ int
286 tas(volatile slock_t *lock)
287 {
288  long int ret;
289 
290  __asm__ __volatile__(
291  " xchg4 %0=%1,%2 \n"
292 : "=r"(ret), "+m"(*lock)
293 : "r"(1)
294 : "memory");
295  return (int) ret;
296 }
297 
298 #else /* __INTEL_COMPILER */
299 
300 static __inline__ int
301 tas(volatile slock_t *lock)
302 {
303  int ret;
304 
305  ret = _InterlockedExchange(lock,1); /* this is a xchg asm macro */
306 
307  return ret;
308 }
309 
310 /* icc can't use the regular gcc S_UNLOCK() macro either in this case */
311 #define S_UNLOCK(lock) \
312  do { __memory_barrier(); *(lock) = 0; } while (0)
313 
314 #endif /* __INTEL_COMPILER */
315 #endif /* __ia64__ || __ia64 */
316 
317 /*
318  * On ARM and ARM64, we use __sync_lock_test_and_set(int *, int) if available.
319  *
320  * We use the int-width variant of the builtin because it works on more chips
321  * than other widths.
322  */
323 #if defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(__aarch64)
324 #ifdef HAVE_GCC__SYNC_INT32_TAS
325 #define HAS_TEST_AND_SET
326 
327 #define TAS(lock) tas(lock)
328 
329 typedef int slock_t;
330 
331 static __inline__ int
332 tas(volatile slock_t *lock)
333 {
334  return __sync_lock_test_and_set(lock, 1);
335 }
336 
337 #define S_UNLOCK(lock) __sync_lock_release(lock)
338 
339 #endif /* HAVE_GCC__SYNC_INT32_TAS */
340 #endif /* __arm__ || __arm || __aarch64__ || __aarch64 */
341 
342 
343 /* S/390 and S/390x Linux (32- and 64-bit zSeries) */
344 #if defined(__s390__) || defined(__s390x__)
345 #define HAS_TEST_AND_SET
346 
347 typedef unsigned int slock_t;
348 
349 #define TAS(lock) tas(lock)
350 
351 static __inline__ int
352 tas(volatile slock_t *lock)
353 {
354  int _res = 0;
355 
356  __asm__ __volatile__(
357  " cs %0,%3,0(%2) \n"
358 : "+d"(_res), "+m"(*lock)
359 : "a"(lock), "d"(1)
360 : "memory", "cc");
361  return _res;
362 }
363 
364 #endif /* __s390__ || __s390x__ */
365 
366 
367 #if defined(__sparc__) /* Sparc */
368 /*
369  * Solaris has always run sparc processors in TSO (total store) mode, but
370  * linux didn't use to and the *BSDs still don't. So, be careful about
371  * acquire/release semantics. The CPU will treat superfluous membars as
372  * NOPs, so it's just code space.
373  */
374 #define HAS_TEST_AND_SET
375 
376 typedef unsigned char slock_t;
377 
378 #define TAS(lock) tas(lock)
379 
380 static __inline__ int
381 tas(volatile slock_t *lock)
382 {
383  register slock_t _res;
384 
385  /*
386  * See comment in /pg/backend/port/tas/solaris_sparc.s for why this
387  * uses "ldstub", and that file uses "cas". gcc currently generates
388  * sparcv7-targeted binaries, so "cas" use isn't possible.
389  */
390  __asm__ __volatile__(
391  " ldstub [%2], %0 \n"
392 : "=r"(_res), "+m"(*lock)
393 : "r"(lock)
394 : "memory");
395 #if defined(__sparcv7) || defined(__sparc_v7__)
396  /*
397  * No stbar or membar available, luckily no actually produced hardware
398  * requires a barrier.
399  */
400 #elif defined(__sparcv8) || defined(__sparc_v8__)
401  /* stbar is available (and required for both PSO, RMO), membar isn't */
402  __asm__ __volatile__ ("stbar \n":::"memory");
403 #else
404  /*
405  * #LoadStore (RMO) | #LoadLoad (RMO) together are the appropriate acquire
406  * barrier for sparcv8+ upwards.
407  */
408  __asm__ __volatile__ ("membar #LoadStore | #LoadLoad \n":::"memory");
409 #endif
410  return (int) _res;
411 }
412 
413 #if defined(__sparcv7) || defined(__sparc_v7__)
414 /*
415  * No stbar or membar available, luckily no actually produced hardware
416  * requires a barrier. We fall through to the default gcc definition of
417  * S_UNLOCK in this case.
418  */
419 #elif defined(__sparcv8) || defined(__sparc_v8__)
420 /* stbar is available (and required for both PSO, RMO), membar isn't */
421 #define S_UNLOCK(lock) \
422 do \
423 { \
424  __asm__ __volatile__ ("stbar \n":::"memory"); \
425  *((volatile slock_t *) (lock)) = 0; \
426 } while (0)
427 #else
428 /*
429  * #LoadStore (RMO) | #StoreStore (RMO, PSO) together are the appropriate
430  * release barrier for sparcv8+ upwards.
431  */
432 #define S_UNLOCK(lock) \
433 do \
434 { \
435  __asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \
436  *((volatile slock_t *) (lock)) = 0; \
437 } while (0)
438 #endif
439 
440 #endif /* __sparc__ */
441 
442 
443 /* PowerPC */
444 #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
445 #define HAS_TEST_AND_SET
446 
447 typedef unsigned int slock_t;
448 
449 #define TAS(lock) tas(lock)
450 
451 /* On PPC, it's a win to use a non-locking test before the lwarx */
452 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
453 
454 /*
455  * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
456  * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
457  * On newer machines, we can use lwsync instead for better performance.
458  *
459  * Ordinarily, we'd code the branches here using GNU-style local symbols, that
460  * is "1f" referencing "1:" and so on. But some people run gcc on AIX with
461  * IBM's assembler as backend, and IBM's assembler doesn't do local symbols.
462  * So hand-code the branch offsets; fortunately, all PPC instructions are
463  * exactly 4 bytes each, so it's not too hard to count.
464  */
465 static __inline__ int
466 tas(volatile slock_t *lock)
467 {
468  slock_t _t;
469  int _res;
470 
471  __asm__ __volatile__(
472 #ifdef USE_PPC_LWARX_MUTEX_HINT
473 " lwarx %0,0,%3,1 \n"
474 #else
475 " lwarx %0,0,%3 \n"
476 #endif
477 " cmpwi %0,0 \n"
478 " bne $+16 \n" /* branch to li %1,1 */
479 " addi %0,%0,1 \n"
480 " stwcx. %0,0,%3 \n"
481 " beq $+12 \n" /* branch to lwsync/isync */
482 " li %1,1 \n"
483 " b $+12 \n" /* branch to end of asm sequence */
484 #ifdef USE_PPC_LWSYNC
485 " lwsync \n"
486 #else
487 " isync \n"
488 #endif
489 " li %1,0 \n"
490 
491 : "=&r"(_t), "=r"(_res), "+m"(*lock)
492 : "r"(lock)
493 : "memory", "cc");
494  return _res;
495 }
496 
497 /*
498  * PowerPC S_UNLOCK is almost standard but requires a "sync" instruction.
499  * On newer machines, we can use lwsync instead for better performance.
500  */
501 #ifdef USE_PPC_LWSYNC
502 #define S_UNLOCK(lock) \
503 do \
504 { \
505  __asm__ __volatile__ (" lwsync \n" ::: "memory"); \
506  *((volatile slock_t *) (lock)) = 0; \
507 } while (0)
508 #else
509 #define S_UNLOCK(lock) \
510 do \
511 { \
512  __asm__ __volatile__ (" sync \n" ::: "memory"); \
513  *((volatile slock_t *) (lock)) = 0; \
514 } while (0)
515 #endif /* USE_PPC_LWSYNC */
516 
517 #endif /* powerpc */
518 
519 
520 /* Linux Motorola 68k */
521 #if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
522 #define HAS_TEST_AND_SET
523 
524 typedef unsigned char slock_t;
525 
526 #define TAS(lock) tas(lock)
527 
528 static __inline__ int
529 tas(volatile slock_t *lock)
530 {
531  register int rv;
532 
533  __asm__ __volatile__(
534  " clrl %0 \n"
535  " tas %1 \n"
536  " sne %0 \n"
537 : "=d"(rv), "+m"(*lock)
538 : /* no inputs */
539 : "memory", "cc");
540  return rv;
541 }
542 
543 #endif /* (__mc68000__ || __m68k__) && __linux__ */
544 
545 
546 /* Motorola 88k */
547 #if defined(__m88k__)
548 #define HAS_TEST_AND_SET
549 
550 typedef unsigned int slock_t;
551 
552 #define TAS(lock) tas(lock)
553 
554 static __inline__ int
555 tas(volatile slock_t *lock)
556 {
557  register slock_t _res = 1;
558 
559  __asm__ __volatile__(
560  " xmem %0, %2, %%r0 \n"
561 : "+r"(_res), "+m"(*lock)
562 : "r"(lock)
563 : "memory");
564  return (int) _res;
565 }
566 
567 #endif /* __m88k__ */
568 
569 
570 /*
571  * VAXen -- even multiprocessor ones
572  * (thanks to Tom Ivar Helbekkmo)
573  */
574 #if defined(__vax__)
575 #define HAS_TEST_AND_SET
576 
577 typedef unsigned char slock_t;
578 
579 #define TAS(lock) tas(lock)
580 
581 static __inline__ int
582 tas(volatile slock_t *lock)
583 {
584  register int _res;
585 
586  __asm__ __volatile__(
587  " movl $1, %0 \n"
588  " bbssi $0, (%2), 1f \n"
589  " clrl %0 \n"
590  "1: \n"
591 : "=&r"(_res), "+m"(*lock)
592 : "r"(lock)
593 : "memory");
594  return _res;
595 }
596 
597 #endif /* __vax__ */
598 
599 
600 #if defined(__mips__) && !defined(__sgi) /* non-SGI MIPS */
601 /* Note: on SGI we use the OS' mutex ABI, see below */
602 /* Note: R10000 processors require a separate SYNC */
603 #define HAS_TEST_AND_SET
604 
605 typedef unsigned int slock_t;
606 
607 #define TAS(lock) tas(lock)
608 
609 static __inline__ int
610 tas(volatile slock_t *lock)
611 {
612  register volatile slock_t *_l = lock;
613  register int _res;
614  register int _tmp;
615 
616  __asm__ __volatile__(
617  " .set push \n"
618  " .set mips2 \n"
619  " .set noreorder \n"
620  " .set nomacro \n"
621  " ll %0, %2 \n"
622  " or %1, %0, 1 \n"
623  " sc %1, %2 \n"
624  " xori %1, 1 \n"
625  " or %0, %0, %1 \n"
626  " sync \n"
627  " .set pop "
628 : "=&r" (_res), "=&r" (_tmp), "+R" (*_l)
629 : /* no inputs */
630 : "memory");
631  return _res;
632 }
633 
634 /* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
635 #define S_UNLOCK(lock) \
636 do \
637 { \
638  __asm__ __volatile__( \
639  " .set push \n" \
640  " .set mips2 \n" \
641  " .set noreorder \n" \
642  " .set nomacro \n" \
643  " sync \n" \
644  " .set pop " \
645 : /* no outputs */ \
646 : /* no inputs */ \
647 : "memory"); \
648  *((volatile slock_t *) (lock)) = 0; \
649 } while (0)
650 
651 #endif /* __mips__ && !__sgi */
652 
653 
654 #if defined(__m32r__) && defined(HAVE_SYS_TAS_H) /* Renesas' M32R */
655 #define HAS_TEST_AND_SET
656 
657 #include <sys/tas.h>
658 
659 typedef int slock_t;
660 
661 #define TAS(lock) tas(lock)
662 
663 #endif /* __m32r__ */
664 
665 
666 #if defined(__sh__) /* Renesas' SuperH */
667 #define HAS_TEST_AND_SET
668 
669 typedef unsigned char slock_t;
670 
671 #define TAS(lock) tas(lock)
672 
673 static __inline__ int
674 tas(volatile slock_t *lock)
675 {
676  register int _res;
677 
678  /*
679  * This asm is coded as if %0 could be any register, but actually SuperH
680  * restricts the target of xor-immediate to be R0. That's handled by
681  * the "z" constraint on _res.
682  */
683  __asm__ __volatile__(
684  " tas.b @%2 \n"
685  " movt %0 \n"
686  " xor #1,%0 \n"
687 : "=z"(_res), "+m"(*lock)
688 : "r"(lock)
689 : "memory", "t");
690  return _res;
691 }
692 
693 #endif /* __sh__ */
694 
695 
696 /* These live in s_lock.c, but only for gcc */
697 
698 
699 #if defined(__m68k__) && !defined(__linux__) /* non-Linux Motorola 68k */
700 #define HAS_TEST_AND_SET
701 
702 typedef unsigned char slock_t;
703 #endif
704 
705 /*
706  * Default implementation of S_UNLOCK() for gcc/icc.
707  *
708  * Note that this implementation is unsafe for any platform that can reorder
709  * a memory access (either load or store) after a following store. That
710  * happens not to be possible on x86 and most legacy architectures (some are
711  * single-processor!), but many modern systems have weaker memory ordering.
712  * Those that do must define their own version of S_UNLOCK() rather than
713  * relying on this one.
714  */
715 #if !defined(S_UNLOCK)
716 #define S_UNLOCK(lock) \
717  do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0)
718 #endif
719 
720 #endif /* defined(__GNUC__) || defined(__INTEL_COMPILER) */
721 
722 
723 
724 /*
725  * ---------------------------------------------------------------------
726  * Platforms that use non-gcc inline assembly:
727  * ---------------------------------------------------------------------
728  */
729 
730 #if !defined(HAS_TEST_AND_SET) /* We didn't trigger above, let's try here */
731 
732 
733 #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
734 /*
735  * HP's PA-RISC
736  *
737  * See src/backend/port/hpux/tas.c.template for details about LDCWX. Because
738  * LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
739  * struct. The active word in the struct is whichever has the aligned address;
740  * the other three words just sit at -1.
741  *
742  * When using gcc, we can inline the required assembly code.
743  */
744 #define HAS_TEST_AND_SET
745 
746 typedef struct
747 {
748  int sema[4];
749 } slock_t;
750 
751 #define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15))
752 
753 #if defined(__GNUC__)
754 
755 static __inline__ int
756 tas(volatile slock_t *lock)
757 {
758  volatile int *lockword = TAS_ACTIVE_WORD(lock);
759  register int lockval;
760 
761  __asm__ __volatile__(
762  " ldcwx 0(0,%2),%0 \n"
763 : "=r"(lockval), "+m"(*lockword)
764 : "r"(lockword)
765 : "memory");
766  return (lockval == 0);
767 }
768 
769 /*
770  * The hppa implementation doesn't follow the rules of this files and provides
771  * a gcc specific implementation outside of the above defined(__GNUC__). It
772  * does so to avoid duplication between the HP compiler and gcc. So undefine
773  * the generic fallback S_UNLOCK from above.
774  */
775 #ifdef S_UNLOCK
776 #undef S_UNLOCK
777 #endif
778 #define S_UNLOCK(lock) \
779  do { \
780  __asm__ __volatile__("" : : : "memory"); \
781  *TAS_ACTIVE_WORD(lock) = -1; \
782  } while (0)
783 
784 #endif /* __GNUC__ */
785 
786 #define S_INIT_LOCK(lock) \
787  do { \
788  volatile slock_t *lock_ = (lock); \
789  lock_->sema[0] = -1; \
790  lock_->sema[1] = -1; \
791  lock_->sema[2] = -1; \
792  lock_->sema[3] = -1; \
793  } while (0)
794 
795 #define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0)
796 
797 #endif /* __hppa || __hppa__ */
798 
799 
800 #if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)
801 /*
802  * HP-UX on Itanium, non-gcc/icc compiler
803  *
804  * We assume that the compiler enforces strict ordering of loads/stores on
805  * volatile data (see comments on the gcc-version earlier in this file).
806  * Note that this assumption does *not* hold if you use the
807  * +Ovolatile=__unordered option on the HP-UX compiler, so don't do that.
808  *
809  * See also Implementing Spinlocks on the Intel Itanium Architecture and
810  * PA-RISC, by Tor Ekqvist and David Graves, for more information. As of
811  * this writing, version 1.0 of the manual is available at:
812  * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
813  */
814 #define HAS_TEST_AND_SET
815 
816 typedef unsigned int slock_t;
817 
818 #include <ia64/sys/inline.h>
819 #define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)
820 /* On IA64, it's a win to use a non-locking test before the xchg proper */
821 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock))
822 #define S_UNLOCK(lock) \
823  do { _Asm_mf(); (*(lock)) = 0; } while (0)
824 
825 #endif /* HPUX on IA64, non gcc/icc */
826 
827 #if defined(_AIX) /* AIX */
828 /*
829  * AIX (POWER)
830  */
831 #define HAS_TEST_AND_SET
832 
833 #include <sys/atomic_op.h>
834 
835 typedef int slock_t;
836 
837 #define TAS(lock) _check_lock((slock_t *) (lock), 0, 1)
838 #define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0)
839 #endif /* _AIX */
840 
841 
842 /* These are in sunstudio_(sparc|x86).s */
843 
844 #if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
845 #define HAS_TEST_AND_SET
846 
847 #if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
848 typedef unsigned int slock_t;
849 #else
850 typedef unsigned char slock_t;
851 #endif
852 
853 extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
854  slock_t cmp);
855 
856 #define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
857 #endif
858 
859 
860 #ifdef _MSC_VER
861 typedef LONG slock_t;
862 
863 #define HAS_TEST_AND_SET
864 #define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))
865 
866 #define SPIN_DELAY() spin_delay()
867 
868 /* If using Visual C++ on Win64, inline assembly is unavailable.
869  * Use a _mm_pause intrinsic instead of rep nop.
870  */
871 #if defined(_WIN64)
872 static __forceinline void
873 spin_delay(void)
874 {
875  _mm_pause();
876 }
877 #else
878 static __forceinline void
879 spin_delay(void)
880 {
881  /* See comment for gcc code. Same code, MASM syntax */
882  __asm rep nop;
883 }
884 #endif
885 
886 #include <intrin.h>
887 #pragma intrinsic(_ReadWriteBarrier)
888 
889 #define S_UNLOCK(lock) \
890  do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0)
891 
892 #endif
893 
894 
895 #endif /* !defined(HAS_TEST_AND_SET) */
896 
897 
898 /* Blow up if we didn't have any way to do spinlocks */
899 #ifndef HAS_TEST_AND_SET
900 #error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@postgresql.org.
901 #endif
902 
903 
904 #else /* !HAVE_SPINLOCKS */
905 
906 
907 /*
908  * Fake spinlock implementation using semaphores --- slow and prone
909  * to fall foul of kernel limits on number of semaphores, so don't use this
910  * unless you must! The subroutines appear in spin.c.
911  */
912 typedef int slock_t;
913 
914 extern bool s_lock_free_sema(volatile slock_t *lock);
915 extern void s_unlock_sema(volatile slock_t *lock);
916 extern void s_init_lock_sema(volatile slock_t *lock, bool nested);
917 extern int tas_sema(volatile slock_t *lock);
918 
919 #define S_LOCK_FREE(lock) s_lock_free_sema(lock)
920 #define S_UNLOCK(lock) s_unlock_sema(lock)
921 #define S_INIT_LOCK(lock) s_init_lock_sema(lock, false)
922 #define TAS(lock) tas_sema(lock)
923 
924 
925 #endif /* HAVE_SPINLOCKS */
926 
927 
928 /*
929  * Default Definitions - override these above as needed.
930  */
931 
932 #if !defined(S_LOCK)
933 #define S_LOCK(lock) \
934  (TAS(lock) ? s_lock((lock), __FILE__, __LINE__, PG_FUNCNAME_MACRO) : 0)
935 #endif /* S_LOCK */
936 
937 #if !defined(S_LOCK_FREE)
938 #define S_LOCK_FREE(lock) (*(lock) == 0)
939 #endif /* S_LOCK_FREE */
940 
941 #if !defined(S_UNLOCK)
942 /*
943  * Our default implementation of S_UNLOCK is essentially *(lock) = 0. This
944  * is unsafe if the platform can reorder a memory access (either load or
945  * store) after a following store; platforms where this is possible must
946  * define their own S_UNLOCK. But CPU reordering is not the only concern:
947  * if we simply defined S_UNLOCK() as an inline macro, the compiler might
948  * reorder instructions from inside the critical section to occur after the
949  * lock release. Since the compiler probably can't know what the external
950  * function s_unlock is doing, putting the same logic there should be adequate.
951  * A sufficiently-smart globally optimizing compiler could break that
952  * assumption, though, and the cost of a function call for every spinlock
953  * release may hurt performance significantly, so we use this implementation
954  * only for platforms where we don't know of a suitable intrinsic. For the
955  * most part, those are relatively obscure platform/compiler combinations to
956  * which the PostgreSQL project does not have access.
957  */
958 #define USE_DEFAULT_S_UNLOCK
959 extern void s_unlock(volatile slock_t *lock);
960 #define S_UNLOCK(lock) s_unlock(lock)
961 #endif /* S_UNLOCK */
962 
963 #if !defined(S_INIT_LOCK)
964 #define S_INIT_LOCK(lock) S_UNLOCK(lock)
965 #endif /* S_INIT_LOCK */
966 
967 #if !defined(SPIN_DELAY)
968 #define SPIN_DELAY() ((void) 0)
969 #endif /* SPIN_DELAY */
970 
971 #if !defined(TAS)
972 extern int tas(volatile slock_t *lock); /* in port/.../tas.s, or
973  * s_lock.c */
974 
975 #define TAS(lock) tas(lock)
976 #endif /* TAS */
977 
978 #if !defined(TAS_SPIN)
979 #define TAS_SPIN(lock) TAS(lock)
980 #endif /* TAS_SPIN */
981 
982 extern slock_t dummy_spinlock;
983 
984 /*
985  * Platform-independent out-of-line support routines
986  */
987 extern int s_lock(volatile slock_t *lock, const char *file, int line, const char *func);
988 
989 /* Support for dynamic adjustment of spins_per_delay */
990 #define DEFAULT_SPINS_PER_DELAY 100
991 
992 extern void set_spins_per_delay(int shared_spins_per_delay);
993 extern int update_spins_per_delay(int shared_spins_per_delay);
994 
995 /*
996  * Support for spin delay which is useful in various places where
997  * spinlock-like procedures take place.
998  */
999 typedef struct
1000 {
1001  int spins;
1002  int delays;
1004  const char *file;
1005  int line;
1006  const char *func;
1007 } SpinDelayStatus;
1008 
1009 static inline void
1011  const char *file, int line, const char *func)
1012 {
1013  status->spins = 0;
1014  status->delays = 0;
1015  status->cur_delay = 0;
1016  status->file = file;
1017  status->line = line;
1018  status->func = func;
1019 }
1020 
1021 #define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, PG_FUNCNAME_MACRO)
1024 
1025 #endif /* S_LOCK_H */
int slock_t
Definition: s_lock.h:912
void s_init_lock_sema(volatile slock_t *lock, bool nested)
Definition: spin.c:107
int update_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:207
void perform_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:125
int s_lock(volatile slock_t *lock, const char *file, int line, const char *func)
Definition: s_lock.c:92
void finish_spin_delay(SpinDelayStatus *status)
Definition: s_lock.c:175
const char * func
Definition: s_lock.h:1006
int tas_sema(volatile slock_t *lock)
Definition: spin.c:133
static void init_spin_delay(SpinDelayStatus *status, const char *file, int line, const char *func)
Definition: s_lock.h:1010
void s_unlock_sema(volatile slock_t *lock)
Definition: spin.c:115
slock_t dummy_spinlock
Definition: s_lock.c:64
const char * file
Definition: s_lock.h:1004
bool s_lock_free_sema(volatile slock_t *lock)
Definition: spin.c:125
void set_spins_per_delay(int shared_spins_per_delay)
Definition: s_lock.c:196
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
static int cmp(const chr *x, const chr *y, size_t len)
Definition: regc_locale.c:742