X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fuatomic_arch_s390.h;h=813d9708a8efff783938a24c3e55987c64042436;hp=b37e5f0fbf4a771aec326e04e3c6abc468f4a4ff;hb=36bc70a84250927ba68d5096a0a9740aec157f9b;hpb=7d413817f09b2d17b1a79ea012590609ffab5eb6 diff --git a/urcu/uatomic_arch_s390.h b/urcu/uatomic_arch_s390.h index b37e5f0..813d970 100644 --- a/urcu/uatomic_arch_s390.h +++ b/urcu/uatomic_arch_s390.h @@ -1,5 +1,5 @@ -#ifndef _URCU_ARCH_ATOMIC_S390_H -#define _URCU_ARCH_ATOMIC_S390_H +#ifndef _URCU_UATOMIC_ARCH_S390_H +#define _URCU_UATOMIC_ARCH_S390_H /* * Atomic exchange operations for the S390 architecture. Based on information @@ -8,6 +8,7 @@ * * Copyright (c) 2009 Novell, Inc. * Author: Jan Blunck + * Copyright (c) 2009 Mathieu Desnoyers * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to @@ -28,6 +29,13 @@ * IN THE SOFTWARE. */ +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + #ifndef __SIZEOF_LONG__ #ifdef __s390x__ #define __SIZEOF_LONG__ 8 @@ -40,57 +48,70 @@ #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif -#define uatomic_set(addr, v) \ -do { \ - ACCESS_ONCE(*(addr)) = (v); \ -} while (0) +#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2) +#define COMPILER_HAVE_SHORT_MEM_OPERAND +#endif -#define uatomic_read(addr) ACCESS_ONCE(*(addr)) +/* + * MEMOP assembler operand rules: + * - op refer to MEMOP_IN operand + * - MEMOP_IN can expand to more than a single operand. Use it at the end of + * operand list only. + */ -static inline __attribute__((always_inline)) -unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val) -{ - unsigned int result; +#ifdef COMPILER_HAVE_SHORT_MEM_OPERAND - __asm__ __volatile__( - "0: cs %0,%2,%1\n" - " brc 4,0b\n" - : "=&r"(result), "=m" (*addr) - : "r"(val), "m" (*addr) - : "memory", "cc"); +#define MEMOP_OUT(addr) "=Q" (*(addr)) +#define MEMOP_IN "Q" (*(addr)) +#define MEMOP_REF(op) #op /* op refer to MEMOP_IN operand */ - return result; -} +#else /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ -#if (BITS_PER_LONG == 64) +#define MEMOP_OUT(addr) "=m" (*(addr)) +#define MEMOP_IN "a" (addr), "m" (*(addr)) +#define MEMOP_REF(op) "0(" #op ")" /* op refer to MEMOP_IN operand */ -static inline __attribute__((always_inline)) -unsigned long uatomic_exchange_64(volatile unsigned long *addr, - unsigned long val) -{ - unsigned long result; +#endif /* !COMPILER_HAVE_SHORT_MEM_OPERAND */ - __asm__ __volatile__( - "0: csg %0,%2,%1\n" - " brc 4,0b\n" - : "=&r"(result), "=m" (*addr) - : "r"(val), "m" (*addr) - : "memory", "cc"); +struct __uatomic_dummy { + unsigned long v[10]; +}; +#define __hp(x) ((struct __uatomic_dummy *)(x)) - return result; -} +#define uatomic_set(addr, v) STORE_SHARED(*(addr), (v)) +#define uatomic_read(addr) LOAD_SHARED(*(addr)) -#endif +/* xchg */ static inline __attribute__((always_inline)) unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) { switch (len) { case 4: - return uatomic_exchange_32(addr, val); + { + unsigned int old_val; + + __asm__ __volatile__( + "0: cs %0,%2," MEMOP_REF(%3) "\n" + " brc 4,0b\n" + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } #if (BITS_PER_LONG == 64) case 8: - return uatomic_exchange_64(addr, val); + { + unsigned long old_val; + + __asm__ __volatile__( + "0: csg %0,%2," MEMOP_REF(%3) "\n" + " brc 4,0b\n" + : "=&r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (val), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } #endif default: __asm__ __volatile__(".long 0xd00d00"); @@ -99,124 +120,108 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) return 0; } -#define uatomic_xchg(addr, v) \ +#define uatomic_xchg(addr, v) \ (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr))) +/* cmpxchg */ static inline __attribute__((always_inline)) -void uatomic_add_32(volatile unsigned int *addr, unsigned int val) -{ - unsigned int result, old; - - __asm__ __volatile__( - " l %0, %1\n" - "0: lr %2, %0\n" - " ar %2, %3\n" - " cs %0,%2,%1\n" - " brc 4,0b\n" - : "=&r"(old), "+m" (*addr), - "=&r"(result) - : "r"(val) - : "memory", "cc"); -} - -#if (BITS_PER_LONG == 64) - -static inline __attribute__((always_inline)) -void uatomic_add_64(volatile unsigned long *addr, unsigned long val) -{ - unsigned long result, old; - - __asm__ __volatile__( - " lg %0, %1\n" - "0: lgr %2, %0\n" - " agr %2, %3\n" - " csg %0,%2,%1\n" - " brc 4,0b\n" - : "=&r"(old), "+m" (*addr), - "=&r"(result) - : "r"(val) - : "memory", "cc"); -} - -#endif - -static inline __attribute__((always_inline)) -void _uatomic_add(void *addr, unsigned long val, int len) +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long _new, int len) { switch (len) { case 4: - uatomic_add_32(addr, val); - return; + { + unsigned int old_val = (unsigned int)old; + + __asm__ __volatile__( + " cs %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old_val), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old_val; + } #if (BITS_PER_LONG == 64) case 8: - uatomic_add_64(addr, val); - return; + { + __asm__ __volatile__( + " csg %0,%2," MEMOP_REF(%3) "\n" + : "+r" (old), MEMOP_OUT (__hp(addr)) + : "r" (_new), MEMOP_IN (__hp(addr)) + : "memory", "cc"); + return old; + } #endif default: __asm__ __volatile__(".long 0xd00d00"); } - return; + return 0; } -#define uatomic_add(addr, val) \ - _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr))) - -static inline __attribute__((always_inline)) -unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old, - unsigned int new) -{ - __asm__ __volatile__( - " cs %0,%2,%1\n" - : "+r"(old), "+m"(*addr) - : "r"(new) - : "memory", "cc"); - - return old; -} +#define uatomic_cmpxchg(addr, old, _new) \ + (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ + (unsigned long)(old), \ + (unsigned long)(_new), \ + sizeof(*(addr))) -#if (BITS_PER_LONG == 64) +/* uatomic_add_return */ static inline __attribute__((always_inline)) -unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr, - unsigned long old, unsigned long new) -{ - __asm__ __volatile__( - " csg %0,%2,%1\n" - : "+r"(old), "+m"(*addr) - : "r"(new) - : "memory", "cc"); - - return old; -} - -#endif - -unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, - unsigned long new, int len) +unsigned long _uatomic_add_return(void *addr, unsigned long val, int len) { switch (len) { case 4: - return uatomic_cmpxchg_32(addr, old, new); + { + unsigned int old, oldt; + + oldt = uatomic_read((unsigned int *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old + val, 4); + } while (oldt != old); + + return old + val; + } #if (BITS_PER_LONG == 64) case 8: - return uatomic_cmpxchg_64(addr, old, new); + { + unsigned long old, oldt; + + oldt = uatomic_read((unsigned long *)addr); + do { + old = oldt; + oldt = _uatomic_cmpxchg(addr, old, old + val, 8); + } while (oldt != old); + + return old + val; + } #endif - default: - __asm__ __volatile__(".long 0xd00d00"); } - + __builtin_trap(); return 0; } -#define uatomic_cmpxchg(addr, old, new) \ - (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ - (unsigned long)(old), \ - (unsigned long)(new), \ - sizeof(*(addr))) +#define uatomic_add_return(addr, v) \ + ((__typeof__(*(addr))) _uatomic_add_return((addr), \ + (unsigned long)(v), \ + sizeof(*(addr)))) + +/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */ + +#define uatomic_sub_return(addr, v) uatomic_add_return((addr), -(v)) + +#define uatomic_add(addr, v) (void)uatomic_add_return((addr), (v)) +#define uatomic_sub(addr, v) (void)uatomic_sub_return((addr), (v)) -#define URCU_CAS_AVAIL() 1 +#define uatomic_inc(addr) uatomic_add((addr), 1) +#define uatomic_dec(addr) uatomic_add((addr), -1) + +#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new) + +#ifdef __cplusplus +} +#endif -#endif /* _URCU_ARCH_ATOMIC_S390_H */ +#endif /* _URCU_UATOMIC_ARCH_S390_H */