From 8af575091284bb94eb10f0534ef2cdd6cc21df2c Mon Sep 17 00:00:00 2001 From: Jan Blunck Date: Tue, 6 Oct 2009 12:31:41 +0200 Subject: [PATCH] s390: Add uatomic_set(), uatomic_read(), uatomic_add(), uatomic_cmpxchg() Mathieu needs atomic cmpxchg and add for all architectures. Signed-off-by: Jan Blunck --- urcu/arch_uatomic_s390.h | 121 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/urcu/arch_uatomic_s390.h b/urcu/arch_uatomic_s390.h index c289c74..917dbf2 100644 --- a/urcu/arch_uatomic_s390.h +++ b/urcu/arch_uatomic_s390.h @@ -40,6 +40,13 @@ #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) #endif +#define uatomic_set(addr, v) \ +do { \ + ACCESS_ONCE(*(addr)) = (v); \ +} while (0) + +#define uatomic_read(addr) ACCESS_ONCE(*(addr)) + static inline __attribute__((always_inline)) unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val) { @@ -96,4 +103,118 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len) (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr))) + +static inline __attribute__((always_inline)) +void uatomic_add_32(volatile unsigned int *addr, unsigned int val) +{ + unsigned int result, old; + + __asm__ __volatile__( + " l %0, %1\n" + "0: lr %2, %0\n" + " ar %2, %3\n" + " cs %0,%2,%1\n" + " brc 4,0b\n" + : "=&r"(old), "+m" (*addr), + "=&r"(result) + : "r"(val) + : "memory", "cc"); +} + +#if (BITS_PER_LONG == 64) + +static inline __attribute__((always_inline)) +void uatomic_add_64(volatile unsigned long *addr, unsigned long val) +{ + unsigned long result, old; + + __asm__ __volatile__( + " lg %0, %1\n" + "0: lgr %2, %0\n" + " agr %2, %3\n" + " csg %0,%2,%1\n" + " brc 4,0b\n" + : "=&r"(old), "+m" (*addr), + "=&r"(result) + : "r"(val) + : "memory", "cc"); +} + +#endif + +static inline __attribute__((always_inline)) +void _uatomic_add(void *addr, unsigned long val, int len) +{ + switch (len) { + case 4: + uatomic_add_32(addr, val); + return; +#if (BITS_PER_LONG == 64) + case 8: + uatomic_add_64(addr, val); + return; +#endif + default: + __asm__ __volatile__(".long 0xd00d00"); + } + + return; +} + +#define uatomic_add(addr, val) \ + _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr))) + +static inline __attribute__((always_inline)) +unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old, + unsigned int new) +{ + __asm__ __volatile__( + " cs %0,%2,%1\n" + : "+r"(old), "+m"(*addr) + : "r"(new) + : "memory", "cc"); + + return old; +} + +#if (BITS_PER_LONG == 64) + +static inline __attribute__((always_inline)) +unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr, + unsigned long old, unsigned long new) +{ + __asm__ __volatile__( + " csg %0,%2,%1\n" + : "+r"(old), "+m"(*addr) + : "r"(new) + : "memory", "cc"); + + return old; +} + +#endif + +unsigned long _uatomic_cmpxchg(void *addr, unsigned long old, + unsigned long new, int len) +{ + switch (len) { + case 4: + return uatomic_cmpxchg_32(addr, old, new); +#if (BITS_PER_LONG == 64) + case 8: + return uatomic_cmpxchg_64(addr, old, new); +#endif + default: + __asm__ __volatile__(".long 0xd00d00"); + } + + return 0; +} + +#define uatomic_cmpxchg(addr, old, new) \ + (__typeof__(*(addr))) _uatomic_cmpxchg((addr), \ + (unsigned long)(old), \ + (unsigned long)(new), \ + sizeof(*(addr))) + #endif /* _URCU_ARCH_ATOMIC_S390_H */ -- 2.34.1