100 #error "s_lock.h may not be included from frontend code" 103 #ifdef HAVE_SPINLOCKS 105 #if defined(__GNUC__) || defined(__INTEL_COMPILER) 136 #define HAS_TEST_AND_SET 140 #define TAS(lock) tas(lock) 142 static __inline__
int 158 __asm__ __volatile__(
164 :
"+q"(_res),
"+m"(*lock)
170 #define SPIN_DELAY() spin_delay() 172 static __inline__
void 198 __asm__ __volatile__(
206 #define HAS_TEST_AND_SET 210 #define TAS(lock) tas(lock) 221 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock)) 223 static __inline__
int 224 tas(
volatile slock_t *lock)
226 register slock_t _res = 1;
228 __asm__ __volatile__(
231 :
"+q"(_res),
"+m"(*lock)
237 #define SPIN_DELAY() spin_delay() 239 static __inline__
void 246 __asm__ __volatile__(
253 #if defined(__ia64__) || defined(__ia64) 274 #define HAS_TEST_AND_SET 278 #define TAS(lock) tas(lock) 281 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock)) 283 #ifndef __INTEL_COMPILER 285 static __inline__
int 286 tas(
volatile slock_t *lock)
290 __asm__ __volatile__(
292 :
"=r"(ret),
"+m"(*lock)
300 static __inline__
int 301 tas(
volatile slock_t *lock)
305 ret = _InterlockedExchange(lock,1);
311 #define S_UNLOCK(lock) \ 312 do { __memory_barrier(); *(lock) = 0; } while (0) 323 #if defined(__arm__) || defined(__arm) || defined(__aarch64__) || defined(__aarch64) 324 #ifdef HAVE_GCC__SYNC_INT32_TAS 325 #define HAS_TEST_AND_SET 327 #define TAS(lock) tas(lock) 331 static __inline__
int 332 tas(
volatile slock_t *lock)
334 return __sync_lock_test_and_set(lock, 1);
337 #define S_UNLOCK(lock) __sync_lock_release(lock) 344 #if defined(__s390__) || defined(__s390x__) 345 #define HAS_TEST_AND_SET 349 #define TAS(lock) tas(lock) 351 static __inline__
int 352 tas(
volatile slock_t *lock)
356 __asm__ __volatile__(
358 :
"+d"(_res),
"+m"(*lock)
367 #if defined(__sparc__) 374 #define HAS_TEST_AND_SET 378 #define TAS(lock) tas(lock) 380 static __inline__
int 381 tas(
volatile slock_t *lock)
383 register slock_t _res;
390 __asm__ __volatile__(
391 " ldstub [%2], %0 \n" 392 :
"=r"(_res),
"+m"(*lock)
395 #if defined(__sparcv7) || defined(__sparc_v7__) 400 #elif defined(__sparcv8) || defined(__sparc_v8__) 402 __asm__ __volatile__ (
"stbar \n":::
"memory");
408 __asm__ __volatile__ (
"membar #LoadStore | #LoadLoad \n":::
"memory");
413 #if defined(__sparcv7) || defined(__sparc_v7__) 419 #elif defined(__sparcv8) || defined(__sparc_v8__) 421 #define S_UNLOCK(lock) \ 424 __asm__ __volatile__ ("stbar \n":::"memory"); \ 425 *((volatile slock_t *) (lock)) = 0; \ 432 #define S_UNLOCK(lock) \ 435 __asm__ __volatile__ ("membar #LoadStore | #StoreStore \n":::"memory"); \ 436 *((volatile slock_t *) (lock)) = 0; \ 444 #if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__) 445 #define HAS_TEST_AND_SET 449 #define TAS(lock) tas(lock) 452 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock)) 470 static __inline__
int 471 tas(
volatile slock_t *lock)
476 __asm__ __volatile__(
477 #ifdef USE_PPC_LWARX_MUTEX_HINT
478 " lwarx %0,0,%3,1 \n" 489 #ifdef USE_PPC_LWSYNC
496 :
"=&b"(_t),
"=r"(_res),
"+m"(*lock)
506 #ifdef USE_PPC_LWSYNC 507 #define S_UNLOCK(lock) \ 510 __asm__ __volatile__ (" lwsync \n" ::: "memory"); \ 511 *((volatile slock_t *) (lock)) = 0; \ 514 #define S_UNLOCK(lock) \ 517 __asm__ __volatile__ (" sync \n" ::: "memory"); \ 518 *((volatile slock_t *) (lock)) = 0; \ 526 #if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__) 527 #define HAS_TEST_AND_SET 531 #define TAS(lock) tas(lock) 533 static __inline__
int 534 tas(
volatile slock_t *lock)
538 __asm__ __volatile__(
542 :
"=d"(rv),
"+m"(*lock)
552 #if defined(__m88k__) 553 #define HAS_TEST_AND_SET 557 #define TAS(lock) tas(lock) 559 static __inline__
int 560 tas(
volatile slock_t *lock)
562 register slock_t _res = 1;
564 __asm__ __volatile__(
565 " xmem %0, %2, %%r0 \n" 566 :
"+r"(_res),
"+m"(*lock)
580 #define HAS_TEST_AND_SET 584 #define TAS(lock) tas(lock) 586 static __inline__
int 587 tas(
volatile slock_t *lock)
591 __asm__ __volatile__(
593 " bbssi $0, (%2), 1f \n" 596 :
"=&r"(_res),
"+m"(*lock)
605 #if defined(__mips__) && !defined(__sgi) 606 #define HAS_TEST_AND_SET 610 #define TAS(lock) tas(lock) 626 #define MIPS_SET_MIPS2 " .set mips2 \n" 628 #define MIPS_SET_MIPS2 631 static __inline__
int 632 tas(
volatile slock_t *lock)
634 register volatile slock_t *_l = lock;
638 __asm__ __volatile__(
650 :
"=&r" (_res),
"=&r" (_tmp),
"+R" (*_l)
657 #define S_UNLOCK(lock) \ 660 __asm__ __volatile__( \ 663 " .set noreorder \n" \ 670 *((volatile slock_t *) (lock)) = 0; \ 676 #if defined(__m32r__) && defined(HAVE_SYS_TAS_H) 677 #define HAS_TEST_AND_SET 683 #define TAS(lock) tas(lock) 689 #define HAS_TEST_AND_SET 693 #define TAS(lock) tas(lock) 695 static __inline__
int 696 tas(
volatile slock_t *lock)
705 __asm__ __volatile__(
709 :
"=z"(_res),
"+m"(*lock)
721 #if defined(__m68k__) && !defined(__linux__) 722 #define HAS_TEST_AND_SET 737 #if !defined(S_UNLOCK) 738 #define S_UNLOCK(lock) \ 739 do { __asm__ __volatile__("" : : : "memory"); *(lock) = 0; } while (0) 752 #if !defined(HAS_TEST_AND_SET) 755 #if defined(__hppa) || defined(__hppa__) 766 #define HAS_TEST_AND_SET 773 #define TAS_ACTIVE_WORD(lock) ((volatile int *) (((uintptr_t) (lock) + 15) & ~15)) 775 #if defined(__GNUC__) 777 static __inline__
int 778 tas(
volatile slock_t *lock)
780 volatile int *lockword = TAS_ACTIVE_WORD(lock);
781 register int lockval;
783 __asm__ __volatile__(
784 " ldcwx 0(0,%2),%0 \n" 785 :
"=r"(lockval),
"+m"(*lockword)
788 return (lockval == 0);
800 #define S_UNLOCK(lock) \ 802 __asm__ __volatile__("" : : : "memory"); \ 803 *TAS_ACTIVE_WORD(lock) = -1; \ 808 #define S_INIT_LOCK(lock) \ 810 volatile slock_t *lock_ = (lock); \ 811 lock_->sema[0] = -1; \ 812 lock_->sema[1] = -1; \ 813 lock_->sema[2] = -1; \ 814 lock_->sema[3] = -1; \ 817 #define S_LOCK_FREE(lock) (*TAS_ACTIVE_WORD(lock) != 0) 822 #if defined(__hpux) && defined(__ia64) && !defined(__GNUC__) 836 #define HAS_TEST_AND_SET 840 #include <ia64/sys/inline.h> 841 #define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE) 843 #define TAS_SPIN(lock) (*(lock) ? 1 : TAS(lock)) 844 #define S_UNLOCK(lock) \ 845 do { _Asm_mf(); (*(lock)) = 0; } while (0) 853 #define HAS_TEST_AND_SET 855 #include <sys/atomic_op.h> 859 #define TAS(lock) _check_lock((slock_t *) (lock), 0, 1) 860 #define S_UNLOCK(lock) _clear_lock((slock_t *) (lock), 0) 866 #if defined(__SUNPRO_C) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc)) 867 #define HAS_TEST_AND_SET 869 #if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus) 875 extern slock_t pg_atomic_cas(
volatile slock_t *lock, slock_t with,
878 #define TAS(a) (pg_atomic_cas((a), 1, 0) != 0) 885 #define HAS_TEST_AND_SET 886 #define TAS(lock) (InterlockedCompareExchange(lock, 1, 0)) 888 #define SPIN_DELAY() spin_delay() 894 static __forceinline
void 900 static __forceinline
void 909 #pragma intrinsic(_ReadWriteBarrier) 911 #define S_UNLOCK(lock) \ 912 do { _ReadWriteBarrier(); (*(lock)) = 0; } while (0) 921 #ifndef HAS_TEST_AND_SET 922 #error PostgreSQL does not have native spinlock support on this platform. To continue the compilation, rerun configure using --disable-spinlocks. However, performance will be poor. Please report this to pgsql-bugs@lists.postgresql.org. 939 extern int tas_sema(
volatile slock_t *lock);
941 #define S_LOCK_FREE(lock) s_lock_free_sema(lock) 942 #define S_UNLOCK(lock) s_unlock_sema(lock) 943 #define S_INIT_LOCK(lock) s_init_lock_sema(lock, false) 944 #define TAS(lock) tas_sema(lock) 955 #define S_LOCK(lock) \ 956 (TAS(lock) ? s_lock((lock), __FILE__, __LINE__, PG_FUNCNAME_MACRO) : 0) 959 #if !defined(S_LOCK_FREE) 960 #define S_LOCK_FREE(lock) (*(lock) == 0) 963 #if !defined(S_UNLOCK) 980 #define USE_DEFAULT_S_UNLOCK 981 extern void s_unlock(
volatile slock_t *lock);
982 #define S_UNLOCK(lock) s_unlock(lock) 985 #if !defined(S_INIT_LOCK) 986 #define S_INIT_LOCK(lock) S_UNLOCK(lock) 989 #if !defined(SPIN_DELAY) 990 #define SPIN_DELAY() ((void) 0) 994 extern int tas(
volatile slock_t *lock);
997 #define TAS(lock) tas(lock) 1000 #if !defined(TAS_SPIN) 1001 #define TAS_SPIN(lock) TAS(lock) 1009 extern int s_lock(
volatile slock_t *lock,
const char *file,
int line,
const char *func);
1012 #define DEFAULT_SPINS_PER_DELAY 100 1033 const char *file,
int line,
const char *func)
1038 status->
file = file;
1039 status->
line = line;
1040 status->
func = func;
1043 #define init_local_spin_delay(status) init_spin_delay(status, __FILE__, __LINE__, PG_FUNCNAME_MACRO)
void s_init_lock_sema(volatile slock_t *lock, bool nested)
int update_spins_per_delay(int shared_spins_per_delay)
void perform_spin_delay(SpinDelayStatus *status)
int s_lock(volatile slock_t *lock, const char *file, int line, const char *func)
void finish_spin_delay(SpinDelayStatus *status)
int tas_sema(volatile slock_t *lock)
static void init_spin_delay(SpinDelayStatus *status, const char *file, int line, const char *func)
void s_unlock_sema(volatile slock_t *lock)
bool s_lock_free_sema(volatile slock_t *lock)
void set_spins_per_delay(int shared_spins_per_delay)
static void static void status(const char *fmt,...) pg_attribute_printf(1
static int cmp(const chr *x, const chr *y, size_t len)