glwthread_spin_init (lock);
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask);
glwthread_spin_lock (lock);
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
if (glwthread_spin_unlock (lock))
abort ();
- sigprocmask (SIG_SETMASK, saved_mask, NULL);
}
void
glwthread_spin_destroy (lock);
}
-#elif HAVE_PTHREAD_H
+#else
+
+# if HAVE_PTHREAD_H
/* Use POSIX threads. */
/* We don't use semaphores (although sem_post() is allowed in signal handlers),
We don't use the C11 <stdatomic.h> (available in GCC >= 4.9) because it would
require to link with -latomic. */
-# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) && !defined __ibmxl__
+# if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) && !defined __ibmxl__
/* Use GCC built-ins (available in GCC >= 4.7) that operate on the first byte of
the lock.
Documentation:
<https://gcc.gnu.org/onlinedocs/gcc-4.7.0/gcc/_005f_005fatomic-Builtins.html>
*/
-# if 1
+# if 1
/* An implementation that verifies the unlocks. */
void
__atomic_store_n (lock, 0, __ATOMIC_SEQ_CST);
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
/* Wait until *lock becomes 0, then replace it with 1. */
asyncsafe_spinlock_t zero;
while (!(zero = 0,
;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
/* If *lock is 1, then replace it with 0. */
asyncsafe_spinlock_t one = 1;
if (!__atomic_compare_exchange_n (lock, &one, 0, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
abort ();
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
}
-# else
+# else
/* An implementation that is a little bit more optimized, but does not verify
the unlocks. */
__atomic_clear (lock, __ATOMIC_SEQ_CST);
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
while (__atomic_test_and_set (lock, __ATOMIC_SEQ_CST))
;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
__atomic_clear (lock, __ATOMIC_SEQ_CST);
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
}
-# endif
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
-}
+# endif
-# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined __ibmxl__
+# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) && !defined __ibmxl__
/* Use GCC built-ins (available in GCC >= 4.1).
Documentation:
<https://gcc.gnu.org/onlinedocs/gcc-4.1.2/gcc/Atomic-Builtins.html> */
__sync_synchronize ();
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
/* Wait until *lock becomes 0, then replace it with 1. */
while (__sync_val_compare_and_swap (lock, 0, 1) != 0)
;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
/* If *lock is 1, then replace it with 0. */
if (__sync_val_compare_and_swap (lock, 1, 0) != 1)
abort ();
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
}
-# elif defined _AIX
+# elif defined _AIX
/* AIX */
void
_clear_lock (vp, 0);
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
atomic_p vp = (int *) lock;
while (_check_lock (vp, 0, 1))
;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
atomic_p vp = (int *) lock;
if (_check_lock (vp, 1, 0))
abort ();
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
}
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
-}
-
-# elif (defined __GNUC__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)
+# elif (defined __GNUC__ || defined __SUNPRO_C) && (defined __sparc || defined __i386 || defined __x86_64__)
/* For older versions of GCC, use inline assembly.
GCC and the Oracle Studio C compiler understand GCC's extended asm syntax,
but the plain Solaris cc understands only simple asm. */
static void
memory_barrier (void)
{
-# if defined __GNUC__
-# if defined __i386 || defined __x86_64__
+# if defined __GNUC__
+# if defined __i386 || defined __x86_64__
asm volatile ("mfence");
-# endif
-# if defined __sparc
+# endif
+# if defined __sparc
asm volatile ("membar 2");
-# endif
-# else
-# if defined __i386 || defined __x86_64__
+# endif
+# else
+# if defined __i386 || defined __x86_64__
asm ("mfence");
-# endif
-# if defined __sparc
+# endif
+# if defined __sparc
asm ("membar 2");
+# endif
# endif
-# endif
}
/* Store NEWVAL in *VP if the old value *VP is == CMP.
atomic_compare_and_swap (volatile unsigned int *vp, unsigned int cmp,
unsigned int newval)
{
-# if defined __GNUC__
+# if defined __GNUC__
unsigned int oldval;
-# if defined __i386 || defined __x86_64__
+# if defined __i386 || defined __x86_64__
asm volatile (" lock\n cmpxchgl %3,(%1)"
: "=a" (oldval) : "r" (vp), "a" (cmp), "r" (newval) : "memory");
-# endif
-# if defined __sparc
+# endif
+# if defined __sparc
asm volatile (" cas [%1],%2,%3\n"
" mov %3,%0"
: "=r" (oldval) : "r" (vp), "r" (cmp), "r" (newval) : "memory");
-# endif
+# endif
return oldval;
-# else /* __SUNPRO_C */
-# if defined __x86_64__
+# else /* __SUNPRO_C */
+# if defined __x86_64__
asm (" movl %esi,%eax\n"
" lock\n cmpxchgl %edx,(%rdi)");
-# elif defined __i386
+# elif defined __i386
asm (" movl 16(%ebp),%ecx\n"
" movl 12(%ebp),%eax\n"
" movl 8(%ebp),%edx\n"
" lock\n cmpxchgl %ecx,(%edx)");
-# endif
-# if defined __sparc
+# endif
+# if defined __sparc
asm (" cas [%i0],%i1,%i2\n"
" mov %i2,%i0");
+# endif
# endif
-# endif
}
void
memory_barrier ();
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
volatile unsigned int *vp = lock;
while (atomic_compare_and_swap (vp, 0, 1) != 0)
;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
volatile unsigned int *vp = lock;
if (atomic_compare_and_swap (vp, 1, 0) != 1)
abort ();
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
}
-# else
+# else
/* Fallback code. It has some race conditions. */
void
*vp = 0;
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
-
volatile unsigned int *vp = lock;
while (*vp)
;
*vp = 1;
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
volatile unsigned int *vp = lock;
*vp = 0;
-
- sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
-}
-
-void
-asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
-{
}
-# endif
+# endif
-#else
+# else
/* Provide a dummy implementation for single-threaded applications. */
void
{
}
-void
-asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
- const sigset_t *mask, sigset_t *saved_mask)
+static inline void
+do_lock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_BLOCK, mask, saved_mask);
}
-void
-asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+static inline void
+do_unlock (asyncsafe_spinlock_t *lock)
{
- sigprocmask (SIG_SETMASK, saved_mask, NULL);
}
+# endif
+
void
asyncsafe_spin_destroy (asyncsafe_spinlock_t *lock)
{
}
#endif
+
+void
+asyncsafe_spin_lock (asyncsafe_spinlock_t *lock,
+ const sigset_t *mask, sigset_t *saved_mask)
+{
+ sigprocmask (SIG_BLOCK, mask, saved_mask); /* equivalent to pthread_sigmask */
+ do_lock (lock);
+}
+
+void
+asyncsafe_spin_unlock (asyncsafe_spinlock_t *lock, const sigset_t *saved_mask)
+{
+ do_unlock (lock);
+ sigprocmask (SIG_SETMASK, saved_mask, NULL); /* equivalent to pthread_sigmask */
+}