X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=arch_atomic_ppc.h;h=54c5f5caae0fa05a074a4ad6c33f172bd6eb027b;hp=13d56b4a3a16b2d6e1293e75aede6918c9bcd422;hb=ba59a0c7b244a0939a2298fc76a9002436ef9674;hpb=0114ba7f23f86623c237baeb28ec8e4b39b9bb84 diff --git a/arch_atomic_ppc.h b/arch_atomic_ppc.h index 13d56b4..54c5f5c 100644 --- a/arch_atomic_ppc.h +++ b/arch_atomic_ppc.h @@ -20,14 +20,31 @@ * Boehm-Demers-Weiser conservative garbage collector. */ +#include + +#ifndef __SIZEOF_LONG__ +#ifdef __powerpc64__ +#define __SIZEOF_LONG__ 8 +#else +#define __SIZEOF_LONG__ 4 +#endif +#endif + #ifndef BITS_PER_LONG #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif -#define ILLEGAL_INSTR .long 0xd00d00 +#define ILLEGAL_INSTR ".long 0xd00d00" #ifndef _INCLUDE_API_H +#define atomic_set(addr, v) \ +do { \ + ACCESS_ONCE(*(addr)) = (v); \ +} while (0) + +#define atomic_read(addr) ACCESS_ONCE(*(addr)) + /* * Using a isync as second barrier for exchange to provide acquire semantic. * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly @@ -35,54 +52,160 @@ * Derived from AO_compare_and_swap(), but removed the comparison. */ -static __attribute__((always_inline)) -unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val) +/* xchg */ + +static inline __attribute__((always_inline)) +unsigned long _atomic_exchange(void *addr, unsigned long val, int len) { - unsigned int result; - - __asm__ __volatile__( - "lwsync\n" - "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ - "stwcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result), - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; + switch (len) { + case 4: + { + unsigned int result; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "stwcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#if (BITS_PER_LONG == 64) + case 8: + { + unsigned long result; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "stdcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); + return 0; } -#if (BITS_PER_LONG == 64) +#define xchg(addr, v) \ + ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ + sizeof(*(addr)))) +/* cmpxchg */ -static __attribute__((always_inline)) -unsigned long atomic_exchange_64(volatile unsigned long *addr, - unsigned long val) +static inline __attribute__((always_inline)) +unsigned long _atomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) { - unsigned long result; - - __asm__ __volatile__( - "lwsync\n" - "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ - "stdcx. %2,0,%1\n" /* else store conditional */ - "bne- 1b\n" /* retry if lost reservation */ - "isync\n" - : "=&r"(result), - : "r"(addr), "r"(val) - : "memory", "cc"); - - return result; + switch (len) { + case 4: + { + unsigned int old_val; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "cmpd %0,%3\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ + "stwcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + "2:\n" + : "=&r"(old_val) + : "r"(addr), "r"((unsigned int)_new), + "r"((unsigned int)old) + : "memory", "cc"); + + return old_val; + } +#if (BITS_PER_LONG == 64) + case 8: + { + unsigned long old_val; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "cmpd %0,%3\n" /* if load is not equal to */ + "bne 2f\n" /* old, fail */ + "stdcx. %2,0,%1\n" /* else store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + "2:\n" + : "=&r"(old_val), + : "r"(addr), "r"((unsigned long)_new), + "r"((unsigned long)old) + : "memory", "cc"); + + return old_val; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__(ILLEGAL_INSTR); + return 0; } -#endif -static __attribute__((always_inline)) -unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) +#define cmpxchg(addr, old, _new) \ + ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\ + (unsigned long)(_new), \ + sizeof(*(addr)))) + +/* atomic_add_return */ + +static inline __attribute__((always_inline)) +unsigned long _atomic_add_return(void *addr, unsigned long val, + int len) { switch (len) { - case 4: return atomic_exchange_32(addr, val); + case 4: + { + unsigned int result; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ + "add %0,%2,%0\n" /* add val to value loaded */ + "stwcx. %0,0,%1\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } #if (BITS_PER_LONG == 64) - case 8: return atomic_exchange_64(addr, val); + case 8: + { + unsigned long result; + + __asm__ __volatile__( + "lwsync\n" + "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ + "add %0,%2,%0\n" /* add val to value loaded */ + "stdcx. %0,0,%1\n" /* store conditional */ + "bne- 1b\n" /* retry if lost reservation */ + "isync\n" + : "=&r"(result) + : "r"(addr), "r"(val) + : "memory", "cc"); + + return result; + } #endif } /* generate an illegal instruction. Cannot catch this with linker tricks @@ -91,8 +214,21 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) return 0; } -#define xchg(addr, v) (__typeof__(*(addr)) _atomic_exchange((addr), (v), \ - sizeof(*(addr)))) + +#define atomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _atomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +/* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */ + +#define atomic_sub_return(addr, v) atomic_add_return((addr), -(v)) + +#define atomic_add(addr, v) (void)atomic_add_return((addr), (v)) +#define atomic_sub(addr, v) (void)atomic_sub_return((addr), (v)) + +#define atomic_inc(addr) atomic_add((addr), 1) +#define atomic_dec(addr) atomic_add((addr), -1) #endif /* #ifndef _INCLUDE_API_H */