X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fuatomic_arch_x86.h;h=aed513b67b4146f8f50bf52f133de0079611d97c;hp=39ec407e58c0612458ffc82209e20fc711dec0fd;hb=6cf3827cf5809fbcee555fb08286ad756be42dad;hpb=618c4ae180e1d9406bcad808cd0cb88addc992a1 diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h index 39ec407..aed513b 100644 --- a/urcu/uatomic_arch_x86.h +++ b/urcu/uatomic_arch_x86.h @@ -21,18 +21,14 @@ */ #include +#include -#ifndef __SIZEOF_LONG__ -#if defined(__x86_64__) || defined(__amd64__) -#define __SIZEOF_LONG__ 8 -#else -#define __SIZEOF_LONG__ 4 -#endif -#endif +#define UATOMIC_HAS_ATOMIC_BYTE +#define UATOMIC_HAS_ATOMIC_SHORT -#ifndef BITS_PER_LONG -#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) -#endif +#ifdef __cplusplus +extern "C" { +#endif /* * Derived from AO_compare_and_swap() and AO_test_and_set_full(). @@ -43,17 +39,12 @@ struct __uatomic_dummy { }; #define __hp(x) ((struct __uatomic_dummy *)(x)) -#define uatomic_set(addr, v) \ -do { \ - ACCESS_ONCE(*(addr)) = (v); \ -} while (0) - -#define uatomic_read(addr) ACCESS_ONCE(*(addr)) +#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) /* cmpxchg */ static inline __attribute__((always_inline)) -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, +unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, unsigned long _new, int len) { switch (len) { @@ -90,7 +81,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = old; @@ -110,15 +101,15 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, return 0; } -#define uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ +#define _uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) __uatomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ sizeof(*(addr)))) /* xchg */ static inline __attribute__((always_inline)) -unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) +unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) { /* Note: the "xchg" instruction does not need a "lock" prefix. */ switch (len) { @@ -152,7 +143,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result; @@ -171,14 +162,14 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ - ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ +#define _uatomic_xchg(addr, v) \ + ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr)))) -/* uatomic_add_return, uatomic_sub_return */ +/* uatomic_add_return */ static inline __attribute__((always_inline)) -unsigned long _uatomic_add_return(void *addr, unsigned long val, +unsigned long __uatomic_add_return(void *addr, unsigned long val, int len) { switch (len) { @@ -215,7 +206,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, : "memory"); return result + (unsigned int)val; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = val; @@ -235,17 +226,15 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, return 0; } -#define uatomic_add_return(addr, v) \ - ((__typeof__(*(addr))) _uatomic_add_return((addr), \ +#define _uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) __uatomic_add_return((addr), \ (unsigned long)(v), \ sizeof(*(addr)))) -#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) - -/* uatomic_add, uatomic_sub */ +/* uatomic_add */ static inline __attribute__((always_inline)) -void _uatomic_add(void *addr, unsigned long val, int len) +void __uatomic_add(void *addr, unsigned long val, int len) { switch (len) { case 1: @@ -275,7 +264,7 @@ void _uatomic_add(void *addr, unsigned long val, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -293,16 +282,14 @@ void _uatomic_add(void *addr, unsigned long val, int len) return; } -#define uatomic_add(addr, v) \ - (_uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) - -#define uatomic_sub(addr, v) uatomic_add((addr), -(v)) +#define _uatomic_add(addr, v) \ + (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) /* uatomic_inc */ static inline __attribute__((always_inline)) -void _uatomic_inc(void *addr, int len) +void __uatomic_inc(void *addr, int len) { switch (len) { case 1: @@ -332,7 +319,7 @@ void _uatomic_inc(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -350,12 +337,12 @@ void _uatomic_inc(void *addr, int len) return; } -#define uatomic_inc(addr) (_uatomic_inc((addr), sizeof(*(addr)))) +#define _uatomic_inc(addr) (__uatomic_inc((addr), sizeof(*(addr)))) /* uatomic_dec */ static inline __attribute__((always_inline)) -void _uatomic_dec(void *addr, int len) +void __uatomic_dec(void *addr, int len) { switch (len) { case 1: @@ -385,7 +372,7 @@ void _uatomic_dec(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -403,28 +390,81 @@ void _uatomic_dec(void *addr, int len) return; } -#define uatomic_dec(addr) (_uatomic_dec((addr), sizeof(*(addr)))) +#define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) + +#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) +extern int __rcu_cas_avail; +extern int __rcu_cas_init(void); + +#define UATOMIC_COMPAT(insn) \ + ((likely(__rcu_cas_avail > 0)) \ + ? (_uatomic_##insn) \ + : ((unlikely(__rcu_cas_avail < 0) \ + ? ((__rcu_cas_init() > 0) \ + ? (_uatomic_##insn) \ + : (compat_uatomic_##insn)) \ + : (compat_uatomic_##insn)))) + +extern unsigned long _compat_uatomic_set(void *addr, + unsigned long _new, int len); +#define compat_uatomic_set(addr, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_set((addr), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) -#if (BITS_PER_LONG == 64) -#define URCU_CAS_AVAIL() 1 -#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new) -#else -extern int __urcu_cas_avail; -extern int __urcu_cas_init(void); -#define URCU_CAS_AVAIL() \ - ((likely(__urcu_cas_avail > 0)) ? \ - (1) : \ - ((unlikely(__urcu_cas_avail < 0) ? \ - (__urcu_cas_init()) : \ - (0)))) + +extern unsigned long _compat_uatomic_xchg(void *addr, + unsigned long _new, int len); +#define compat_uatomic_xchg(addr, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_xchg((addr), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long _new, int len); + unsigned long _new, int len); +#define compat_uatomic_cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _compat_uatomic_cmpxchg((addr), \ + (unsigned long)(old), \ + (unsigned long)(_new), \ + sizeof(*(addr)))) -#define compat_uatomic_cmpxchg(addr, old, _new) \ - ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\ - (unsigned long)(_new), \ +extern unsigned long _compat_uatomic_xchg(void *addr, + unsigned long _new, int len); +#define compat_uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ + (unsigned long)(v), \ sizeof(*(addr)))) + +#define compat_uatomic_add(addr, v) \ + ((void)compat_uatomic_add_return((addr), (v))) +#define compat_uatomic_inc(addr) \ + (compat_uatomic_add((addr), 1)) +#define compat_uatomic_dec(addr) \ + (compat_uatomic_add((addr), -1)) + +#else +#define UATOMIC_COMPAT(insn) (_uatomic_##insn) #endif +/* Read is atomic even in compat mode */ +#define uatomic_set(addr, v) \ + UATOMIC_COMPAT(set(addr, v)) + +#define uatomic_cmpxchg(addr, old, _new) \ + UATOMIC_COMPAT(cmpxchg(addr, old, _new)) +#define uatomic_xchg(addr, v) \ + UATOMIC_COMPAT(xchg(addr, v)) +#define uatomic_add_return(addr, v) \ + UATOMIC_COMPAT(add_return(addr, v)) + +#define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) +#define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) +#define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) + +#ifdef __cplusplus +} +#endif + +#include + #endif /* _URCU_ARCH_UATOMIC_X86_H */