#include <urcu/compiler.h>
#include <urcu/system.h>
-#ifndef __SIZEOF_LONG__
-#if defined(__x86_64__) || defined(__amd64__)
-#define __SIZEOF_LONG__ 8
-#else
-#define __SIZEOF_LONG__ 4
-#endif
-#endif
+#define UATOMIC_HAS_ATOMIC_BYTE
+#define UATOMIC_HAS_ATOMIC_SHORT
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
+#ifdef __cplusplus
+extern "C" {
+#endif
/*
* Derived from AO_compare_and_swap() and AO_test_and_set_full().
};
#define __hp(x) ((struct __uatomic_dummy *)(x))
-#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v))
-#define _uatomic_read(addr) LOAD_SHARED(*(addr))
+#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v))
/* cmpxchg */
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = old;
: "memory");
return result;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result;
((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \
sizeof(*(addr))))
-/* uatomic_add_return, uatomic_sub_return */
+/* uatomic_add_return */
static inline __attribute__((always_inline))
unsigned long __uatomic_add_return(void *addr, unsigned long val,
: "memory");
return result + (unsigned int)val;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
unsigned long result = val;
(unsigned long)(v), \
sizeof(*(addr))))
-#define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v))
+/* uatomic_and */
-/* uatomic_add, uatomic_sub */
+static inline __attribute__((always_inline))
+void __uatomic_and(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; andb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; andw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; andl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; andq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_and(addr, v) \
+ (__uatomic_and((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_or */
+
+static inline __attribute__((always_inline))
+void __uatomic_or(void *addr, unsigned long val, int len)
+{
+ switch (len) {
+ case 1:
+ {
+ __asm__ __volatile__(
+ "lock; orb %1, %0"
+ : "=m"(*__hp(addr))
+ : "iq" ((unsigned char)val)
+ : "memory");
+ return;
+ }
+ case 2:
+ {
+ __asm__ __volatile__(
+ "lock; orw %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned short)val)
+ : "memory");
+ return;
+ }
+ case 4:
+ {
+ __asm__ __volatile__(
+ "lock; orl %1, %0"
+ : "=m"(*__hp(addr))
+ : "ir" ((unsigned int)val)
+ : "memory");
+ return;
+ }
+#if (CAA_BITS_PER_LONG == 64)
+ case 8:
+ {
+ __asm__ __volatile__(
+ "lock; orq %1, %0"
+ : "=m"(*__hp(addr))
+ : "er" ((unsigned long)val)
+ : "memory");
+ return;
+ }
+#endif
+ }
+ /* generate an illegal instruction. Cannot catch this with linker tricks
+ * when optimizations are disabled. */
+ __asm__ __volatile__("ud2");
+ return;
+}
+
+#define _uatomic_or(addr, v) \
+ (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr))))
+
+/* uatomic_add */
static inline __attribute__((always_inline))
void __uatomic_add(void *addr, unsigned long val, int len)
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
#define _uatomic_add(addr, v) \
(__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
-#define _uatomic_sub(addr, v) _uatomic_add((addr), -(v))
-
/* uatomic_inc */
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
: "memory");
return;
}
-#if (BITS_PER_LONG == 64)
+#if (CAA_BITS_PER_LONG == 64)
case 8:
{
__asm__ __volatile__(
#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr))))
-#if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH))
-extern int __urcu_cas_avail;
-extern int __urcu_cas_init(void);
+#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH))
+extern int __rcu_cas_avail;
+extern int __rcu_cas_init(void);
#define UATOMIC_COMPAT(insn) \
- ((likely(__urcu_cas_avail > 0)) \
+ ((likely(__rcu_cas_avail > 0)) \
? (_uatomic_##insn) \
- : ((unlikely(__urcu_cas_avail < 0) \
- ? ((__urcu_cas_init() > 0) \
+ : ((unlikely(__rcu_cas_avail < 0) \
+ ? ((__rcu_cas_init() > 0) \
? (_uatomic_##insn) \
: (compat_uatomic_##insn)) \
: (compat_uatomic_##insn))))
(unsigned long)(_new), \
sizeof(*(addr))))
-extern unsigned long _compat_uatomic_xchg(void *addr,
- unsigned long _new, int len);
+extern unsigned long _compat_uatomic_and(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_and(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_and((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_or(void *addr,
+ unsigned long _new, int len);
+#define compat_uatomic_or(addr, v) \
+ ((__typeof__(*(addr))) _compat_uatomic_or((addr), \
+ (unsigned long)(v), \
+ sizeof(*(addr))))
+
+extern unsigned long _compat_uatomic_add_return(void *addr,
+ unsigned long _new, int len);
#define compat_uatomic_add_return(addr, v) \
((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \
(unsigned long)(v), \
sizeof(*(addr))))
-#define compat_uatomic_sub_return(addr, v) \
- compat_uatomic_add_return((addr), -(v))
#define compat_uatomic_add(addr, v) \
((void)compat_uatomic_add_return((addr), (v)))
-#define compat_uatomic_sub(addr, v) \
- ((void)compat_uatomic_sub_return((addr), (v)))
#define compat_uatomic_inc(addr) \
(compat_uatomic_add((addr), 1))
#define compat_uatomic_dec(addr) \
- (compat_uatomic_sub((addr), 1))
+ (compat_uatomic_add((addr), -1))
#else
#define UATOMIC_COMPAT(insn) (_uatomic_##insn)
#endif
/* Read is atomic even in compat mode */
-#define uatomic_read(addr) _uatomic_read(addr)
-
#define uatomic_set(addr, v) \
UATOMIC_COMPAT(set(addr, v))
+
#define uatomic_cmpxchg(addr, old, _new) \
UATOMIC_COMPAT(cmpxchg(addr, old, _new))
#define uatomic_xchg(addr, v) \
UATOMIC_COMPAT(xchg(addr, v))
+#define uatomic_and(addr, v) \
+ UATOMIC_COMPAT(and(addr, v))
+#define uatomic_or(addr, v) \
+ UATOMIC_COMPAT(or(addr, v))
#define uatomic_add_return(addr, v) \
UATOMIC_COMPAT(add_return(addr, v))
-#define uatomic_sub_return(addr, v) \
- UATOMIC_COMPAT(sub_return(addr, v))
+
#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v))
-#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v))
#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr))
#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr))
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/uatomic_generic.h>
+
#endif /* _URCU_ARCH_UATOMIC_X86_H */