X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fuatomic_arch_s390.h;h=2378ebd202ba256310f5edfcffcd7d278f5fef7f;hp=6b4e17eb78ad7cda400a0cae63e24f863502ea89;hb=8760d94e0ef6d52260765a9246aaac073613055e;hpb=9c697e4db52c0af85da8a44f26a2c5e04ad27b1b diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h index 6b4e17e..2378ebd 100644 --- a/urcu/uatomic_arch_s390.h +++ b/urcu/uatomic_arch_s390.h @@ -32,6 +32,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + #ifndef __SIZEOF_LONG__ #ifdef __s390x__ #define __SIZEOF_LONG__ 8 @@ -44,32 +48,67 @@ #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif -#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) -#define uatomic_read(addr) LOAD_SHARED(*(addr)) +#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) +#define COMPILER_HAVE_SHORT_MEM_OPERAND +#endif + +/* + * MEMOP assembler operand rules: + * - op refer to MEMOP_IN operand + * - MEMOP_IN can expand to more than a single operand. Use it at the end of + * operand list only. + */ + +#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND + +#define MEMOP_OUT(addr) "=Q" (*(addr)) +#define MEMOP_IN(addr) "Q" (*(addr)) +#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */ + +#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ + +#define MEMOP_OUT(addr) "=m" (*(addr)) +#define MEMOP_IN(addr) "a" (addr), "m" (*(addr)) +#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */ + +#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ + +struct __uatomic_dummy { + unsigned long v[10]; +}; +#define __hp(x) ((struct __uatomic_dummy *)(x)) /* xchg */ + +static inline __attribute__((always_inline)) unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) { switch (len) { case 4: + { unsigned int old_val; __asm__ __volatile__( - "0: cs %0,%2,%1\n" + "0: cs %0,%2," MEMOP_REF(%3) "\n" " brc 4,0b\n" - : "=&r"(old_val), "=m" (*addr) - : "r"(val), "m" (*addr) + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) : "memory", "cc"); + return old_val; + } #if (BITS_PER_LONG == 64) case 8: + { unsigned long old_val; __asm__ __volatile__( - "0: csg %0,%2,%1\n" + "0: csg %0,%2," MEMOP_REF(%3) "\n" " brc 4,0b\n" - : "=&r"(old_val), "=m" (*addr) - : "r"(val), "m" (*addr) + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) : "memory", "cc"); + return old_val; + } #endif default: __asm__ __volatile__(".long 0xd00d00"); @@ -86,26 +125,30 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) static inline __attribute__((always_inline)) unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long new, int len) + unsigned long _new, int len) { switch (len) { case 4: + { unsigned int old_val = (unsigned int)old; __asm__ __volatile__( - " cs %0,%2,%1\n" - : "+r"(old_val), "+m"(*addr) - : "r"(new) + " cs %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) : "memory", "cc"); return old_val; + } #if (BITS_PER_LONG == 64) case 8: + { __asm__ __volatile__( - " csg %0,%2,%1\n" - : "+r"(old), "+m"(*addr) - : "r"(new) + " csg %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) : "memory", "cc"); return old; + } #endif default: __asm__ __volatile__(".long 0xd00d00"); @@ -114,10 +157,10 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, return 0; } -#define uatomic_cmpxchg(addr, old, new) \ +#define uatomic_cmpxchg(addr, old, _new) \ (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ (unsigned long)(old), \ - (unsigned long)(new), \ + (unsigned long)(_new), \ sizeof(*(addr))) /* uatomic_add_return */ @@ -162,16 +205,10 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) (unsigned long)(v), \ sizeof(*(addr)))) -/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ - -#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) - -#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) -#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v)) - -#define uatomic_inc(addr) uatomic_add((addr), 1) -#define uatomic_dec(addr) uatomic_add((addr), -1) +#ifdef __cplusplus +} +#endif -#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new) +#include #endif /* _URCU_UATOMIC_ARCH_S390_H */