From 0fad128bbcd6f99f2a4ebbc976a76ef1e3155d1c Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 22 Sep 2009 18:44:26 -0400 Subject: [PATCH] update x86 and ppc atomic ops Signed-off-by: Mathieu Desnoyers --- arch_atomic_x86.h | 82 ++++++++++++++++++++++++++++++++++++++++++++- tests/test_atomic.c | 21 +++++++++--- 2 files changed, 97 insertions(+), 6 deletions(-) diff --git a/arch_atomic_x86.h b/arch_atomic_x86.h index 6b33d45..879ce70 100644 --- a/arch_atomic_x86.h +++ b/arch_atomic_x86.h @@ -20,6 +20,8 @@ * Boehm-Demers-Weiser conservative garbage collector. */ +#include + #ifndef BITS_PER_LONG #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif @@ -35,16 +37,24 @@ struct __atomic_dummy { }; #define __hp(x) ((struct __atomic_dummy *)(x)) +#define atomic_set(addr, v) \ +do { \ + ACCESS_ONCE(*(addr)) = (v); \ +} while (0) + +#define atomic_read(addr) ACCESS_ONCE(*(addr)) + /* cmpxchg */ static inline __attribute__((always_inline)) unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, - unsigned long _new, int len) + unsigned long _new, int len) { switch (len) { case 1: { unsigned char result = old; + __asm__ __volatile__( "lock; cmpxchgb %2, %1" : "+a"(result), "+m"(*__hp(addr)) @@ -55,6 +65,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, case 2: { unsigned short result = old; + __asm__ __volatile__( "lock; cmpxchgw %2, %1" : "+a"(result), "+m"(*__hp(addr)) @@ -65,6 +76,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, case 4: { unsigned int result = old; + __asm__ __volatile__( "lock; cmpxchgl %2, %1" : "+a"(result), "+m"(*__hp(addr)) @@ -76,6 +88,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, case 8: { unsigned long result = old; + __asm__ __volatile__( "lock; cmpxchgq %2, %1" : "+a"(result), "+m"(*__hp(addr)) @@ -156,6 +169,73 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr)))) +/* atomic_add_return, atomic_sub_return */ + +static inline __attribute__((always_inline)) +unsigned long _atomic_add_return(volatile void *addr, unsigned long val, + int len) +{ + switch (len) { + case 1: + { + unsigned char result = val; + + __asm__ __volatile__( + "lock; xaddb %1, %0" + : "+m"(*__hp(addr)), "+q" (result) + : + : "memory"); + return result + (unsigned char)val; + } + case 2: + { + unsigned short result = val; + + __asm__ __volatile__( + "lock; xaddw %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned short)val; + } + case 4: + { + unsigned int result = val; + + __asm__ __volatile__( + "lock; xaddl %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned int)val; + } +#if (BITS_PER_LONG == 64) + case 8: + { + unsigned long result = val; + + __asm__ __volatile__( + "lock; xaddq %1, %0" + : "+m"(*__hp(addr)), "+r" (result) + : + : "memory"); + return result + (unsigned long)val; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return 0; +} + +#define atomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _atomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +#define atomic_sub_return(addr, v) atomic_add_return((addr), -(v)) + /* atomic_add, atomic_sub */ static inline __attribute__((always_inline)) diff --git a/tests/test_atomic.c b/tests/test_atomic.c index 3d2b64e..102012f 100644 --- a/tests/test_atomic.c +++ b/tests/test_atomic.c @@ -16,18 +16,29 @@ do { \ __typeof__(*(ptr)) v; \ \ atomic_add(ptr, 10); \ - assert(*(ptr) == 10); \ + assert(atomic_read(ptr) == 10); \ atomic_add(ptr, -11UL); \ - assert(*(ptr) == (__typeof__(*(ptr)))-1UL); \ + assert(atomic_read(ptr) == (__typeof__(*(ptr)))-1UL); \ v = cmpxchg(ptr, -1UL, 22); \ - assert(*(ptr) == 22); \ + assert(atomic_read(ptr) == 22); \ assert(v == (__typeof__(*(ptr)))-1UL); \ v = cmpxchg(ptr, 33, 44); \ - assert(*(ptr) == 22); \ + assert(atomic_read(ptr) == 22); \ assert(v == 22); \ v = xchg(ptr, 55); \ - assert(*(ptr) == 55); \ + assert(atomic_read(ptr) == 55); \ assert(v == 22); \ + atomic_set(ptr, 22); \ + atomic_inc(ptr); \ + assert(atomic_read(ptr) == 23); \ + atomic_dec(ptr); \ + assert(atomic_read(ptr) == 22); \ + v = atomic_add_return(ptr, 100); \ + assert(v == 122); \ + assert(atomic_read(ptr) == 122); \ + v = atomic_sub_return(ptr, 1); \ + assert(v == 121); \ + assert(atomic_read(ptr) == 121); \ } while (0) int main(int argc, char **argv) -- 2.34.1