X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu%2Fuatomic_arch_x86.h;h=f57d46dd1f8cd876e8969ddb158e6267acd110c7;hb=985b35b18796edab591775098fef83a50a1e55ce;hp=b57569877a05a9e126f5dab037e3ca99cdb77f45;hpb=36bc70a84250927ba68d5096a0a9740aec157f9b;p=urcu.git diff --git a/urcu/uatomic_arch_x86.h b/urcu/uatomic_arch_x86.h index b575698..f57d46d 100644 --- a/urcu/uatomic_arch_x86.h +++ b/urcu/uatomic_arch_x86.h @@ -23,22 +23,13 @@ #include #include +#define UATOMIC_HAS_ATOMIC_BYTE +#define UATOMIC_HAS_ATOMIC_SHORT + #ifdef __cplusplus extern "C" { #endif -#ifndef __SIZEOF_LONG__ -#if defined(__x86_64__) || defined(__amd64__) -#define __SIZEOF_LONG__ 8 -#else -#define __SIZEOF_LONG__ 4 -#endif -#endif - -#ifndef BITS_PER_LONG -#define BITS_PER_LONG (__SIZEOF_LONG__ * 8) -#endif - /* * Derived from AO_compare_and_swap() and AO_test_and_set_full(). */ @@ -48,8 +39,7 @@ struct __uatomic_dummy { }; #define __hp(x) ((struct __uatomic_dummy *)(x)) -#define _uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) -#define _uatomic_read(addr) LOAD_SHARED(*(addr)) +#define _uatomic_set(addr, v) CMM_STORE_SHARED(*(addr), (v)) /* cmpxchg */ @@ -91,7 +81,7 @@ unsigned long __uatomic_cmpxchg(void *addr, unsigned long old, : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = old; @@ -153,7 +143,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) : "memory"); return result; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result; @@ -176,7 +166,7 @@ unsigned long __uatomic_exchange(void *addr, unsigned long val, int len) ((__typeof__(*(addr))) __uatomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr)))) -/* uatomic_add_return, uatomic_sub_return */ +/* uatomic_add_return */ static inline __attribute__((always_inline)) unsigned long __uatomic_add_return(void *addr, unsigned long val, @@ -216,7 +206,7 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, : "memory"); return result + (unsigned int)val; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { unsigned long result = val; @@ -241,9 +231,61 @@ unsigned long __uatomic_add_return(void *addr, unsigned long val, (unsigned long)(v), \ sizeof(*(addr)))) -#define _uatomic_sub_return(addr, v) _uatomic_add_return((addr), -(v)) +/* uatomic_or */ + +static inline __attribute__((always_inline)) +void __uatomic_or(void *addr, unsigned long val, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; orb %1, %0" + : "=m"(*__hp(addr)) + : "iq" ((unsigned char)val) + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; orw %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned short)val) + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; orl %1, %0" + : "=m"(*__hp(addr)) + : "ir" ((unsigned int)val) + : "memory"); + return; + } +#if (CAA_BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; orq %1, %0" + : "=m"(*__hp(addr)) + : "er" ((unsigned long)val) + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define _uatomic_or(addr, v) \ + (__uatomic_or((addr), (unsigned long)(v), sizeof(*(addr)))) -/* uatomic_add, uatomic_sub */ +/* uatomic_add */ static inline __attribute__((always_inline)) void __uatomic_add(void *addr, unsigned long val, int len) @@ -276,7 +318,7 @@ void __uatomic_add(void *addr, unsigned long val, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -297,8 +339,6 @@ void __uatomic_add(void *addr, unsigned long val, int len) #define _uatomic_add(addr, v) \ (__uatomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) -#define _uatomic_sub(addr, v) _uatomic_add((addr), -(v)) - /* uatomic_inc */ @@ -333,7 +373,7 @@ void __uatomic_inc(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -386,7 +426,7 @@ void __uatomic_dec(void *addr, int len) : "memory"); return; } -#if (BITS_PER_LONG == 64) +#if (CAA_BITS_PER_LONG == 64) case 8: { __asm__ __volatile__( @@ -406,15 +446,15 @@ void __uatomic_dec(void *addr, int len) #define _uatomic_dec(addr) (__uatomic_dec((addr), sizeof(*(addr)))) -#if ((BITS_PER_LONG != 64) && defined(CONFIG_URCU_COMPAT_ARCH)) -extern int __urcu_cas_avail; -extern int __urcu_cas_init(void); +#if ((CAA_BITS_PER_LONG != 64) && defined(CONFIG_RCU_COMPAT_ARCH)) +extern int __rcu_cas_avail; +extern int __rcu_cas_init(void); #define UATOMIC_COMPAT(insn) \ - ((likely(__urcu_cas_avail > 0)) \ + ((likely(__rcu_cas_avail > 0)) \ ? (_uatomic_##insn) \ - : ((unlikely(__urcu_cas_avail < 0) \ - ? ((__urcu_cas_init() > 0) \ + : ((unlikely(__rcu_cas_avail < 0) \ + ? ((__rcu_cas_init() > 0) \ ? (_uatomic_##insn) \ : (compat_uatomic_##insn)) \ : (compat_uatomic_##insn)))) @@ -442,43 +482,45 @@ extern unsigned long _compat_uatomic_cmpxchg(void *addr, unsigned long old, (unsigned long)(_new), \ sizeof(*(addr)))) -extern unsigned long _compat_uatomic_xchg(void *addr, - unsigned long _new, int len); +extern unsigned long _compat_uatomic_or(void *addr, + unsigned long _new, int len); +#define compat_uatomic_or(addr, v) \ + ((__typeof__(*(addr))) _compat_uatomic_or((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +extern unsigned long _compat_uatomic_add_return(void *addr, + unsigned long _new, int len); #define compat_uatomic_add_return(addr, v) \ ((__typeof__(*(addr))) _compat_uatomic_add_return((addr), \ (unsigned long)(v), \ sizeof(*(addr)))) -#define compat_uatomic_sub_return(addr, v) \ - compat_uatomic_add_return((addr), -(v)) #define compat_uatomic_add(addr, v) \ ((void)compat_uatomic_add_return((addr), (v))) -#define compat_uatomic_sub(addr, v) \ - ((void)compat_uatomic_sub_return((addr), (v))) #define compat_uatomic_inc(addr) \ (compat_uatomic_add((addr), 1)) #define compat_uatomic_dec(addr) \ - (compat_uatomic_sub((addr), 1)) + (compat_uatomic_add((addr), -1)) #else #define UATOMIC_COMPAT(insn) (_uatomic_##insn) #endif /* Read is atomic even in compat mode */ -#define uatomic_read(addr) _uatomic_read(addr) - #define uatomic_set(addr, v) \ UATOMIC_COMPAT(set(addr, v)) + #define uatomic_cmpxchg(addr, old, _new) \ UATOMIC_COMPAT(cmpxchg(addr, old, _new)) #define uatomic_xchg(addr, v) \ UATOMIC_COMPAT(xchg(addr, v)) +#define uatomic_or(addr, v) \ + UATOMIC_COMPAT(or(addr, v)) #define uatomic_add_return(addr, v) \ UATOMIC_COMPAT(add_return(addr, v)) -#define uatomic_sub_return(addr, v) \ - UATOMIC_COMPAT(sub_return(addr, v)) + #define uatomic_add(addr, v) UATOMIC_COMPAT(add(addr, v)) -#define uatomic_sub(addr, v) UATOMIC_COMPAT(sub(addr, v)) #define uatomic_inc(addr) UATOMIC_COMPAT(inc(addr)) #define uatomic_dec(addr) UATOMIC_COMPAT(dec(addr)) @@ -486,4 +528,6 @@ extern unsigned long _compat_uatomic_xchg(void *addr, } #endif +#include + #endif /* _URCU_ARCH_UATOMIC_X86_H */