X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fuatomic_arch_ppc.h;h=39c4c2400113555519d8b3ce37d6db5cae710e4a;hp=08a622dfe6f93cd547b0684ebfd398bf9284fe92;hb=8760d94e0ef6d52260765a9246aaac073613055e;hpb=b46b23cb9949c585ddf8ccb691458c8c7c60c7ad diff --git a/urcu/uatomic_arch_ppc.h b/urcu/uatomic_arch_ppc.h index 08a622d..39c4c24 100644 --- a/urcu/uatomic_arch_ppc.h +++ b/urcu/uatomic_arch_ppc.h @@ -23,6 +23,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + #ifndef __SIZEOF_LONG__ #ifdef __powerpc64__ #define __SIZEOF_LONG__ 8 @@ -31,15 +35,18 @@ #endif #endif +#ifdef __NO_LWSYNC__ +#define LWSYNC_OPCODE "sync\n" +#else +#define LWSYNC_OPCODE "lwsync\n" +#endif + #ifndef BITS_PER_LONG #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif #define ILLEGAL_INSTR ".long 0xd00d00" -#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) -#define uatomic_read(addr) LOAD_SHARED(*(addr)) - /* * Using a isync as second barrier for exchange to provide acquire semantic. * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly @@ -58,7 +65,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) unsigned int result; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ "stwcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ @@ -75,7 +82,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len) unsigned long result; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ "stdcx. %2,0,%1\n" /* else store conditional */ "bne- 1b\n" /* retry if lost reservation */ @@ -109,7 +116,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, unsigned int old_val; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ "cmpd %0,%3\n" /* if load is not equal to */ "bne 2f\n" /* old, fail */ @@ -130,7 +137,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, unsigned long old_val; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ "cmpd %0,%3\n" /* if load is not equal to */ "bne 2f\n" /* old, fail */ @@ -171,7 +178,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, unsigned int result; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "lwarx %0,0,%1\n" /* load and reserve */ "add %0,%2,%0\n" /* add val to value loaded */ "stwcx. %0,0,%1\n" /* store conditional */ @@ -189,7 +196,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, unsigned long result; __asm__ __volatile__( - "lwsync\n" + LWSYNC_OPCODE "1:\t" "ldarx %0,0,%1\n" /* load and reserve */ "add %0,%2,%0\n" /* add val to value loaded */ "stdcx. %0,0,%1\n" /* store conditional */ @@ -215,17 +222,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, (unsigned long)(v), \ sizeof(*(addr)))) -/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ - -#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) - -#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) -#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v)) - -#define uatomic_inc(addr) uatomic_add((addr), 1) -#define uatomic_dec(addr) uatomic_add((addr), -1) +#ifdef __cplusplus +} +#endif -#define URCU_CAS_AVAIL() 1 -#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new) +#include #endif /* _URCU_ARCH_UATOMIC_PPC_H */