From 2c5e5fb3c4f567af1816aaada4cfbc69a0c1a882 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Tue, 22 Sep 2009 17:23:08 -0400 Subject: [PATCH] Update atomic x86_64 cmpxchg Signed-off-by: Mathieu Desnoyers --- arch_atomic_x86.h | 113 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 111 insertions(+), 2 deletions(-) diff --git a/arch_atomic_x86.h b/arch_atomic_x86.h index fdd3d6b..97a7cdd 100644 --- a/arch_atomic_x86.h +++ b/arch_atomic_x86.h @@ -77,7 +77,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old, { unsigned int result = old; __asm__ __volatile__( - "lock; cmpxchgl %2, %1" + "lock; cmpxchgq %2, %1" : "+a"(result), "+m"(*__hp(addr)) : "r"((unsigned long)_new) : "memory"); @@ -156,7 +156,7 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len) ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \ sizeof(*(addr)))) -/* atomic_add */ +/* atomic_add, atomic_sub */ static inline __attribute__((always_inline)) void _atomic_add(volatile void *addr, unsigned long val, int len) @@ -210,6 +210,115 @@ void _atomic_add(volatile void *addr, unsigned long val, int len) #define atomic_add(addr, v) \ (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr)))) +#define atomic_sub(addr, v) atomic_add((addr), -(v)) + + +/* atomic_inc */ + +static inline __attribute__((always_inline)) +void _atomic_inc(volatile void *addr, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; incb %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; incw %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; incl %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#if (BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; incq %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define atomic_inc(addr) (_atomic_inc((addr), sizeof(*(addr)))) + +/* atomic_dec */ + +static inline __attribute__((always_inline)) +void _atomic_dec(volatile void *addr, int len) +{ + switch (len) { + case 1: + { + __asm__ __volatile__( + "lock; decb %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 2: + { + __asm__ __volatile__( + "lock; decw %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } + case 4: + { + __asm__ __volatile__( + "lock; decl %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#if (BITS_PER_LONG == 64) + case 8: + { + __asm__ __volatile__( + "lock; decq %0" + : "=m"(*__hp(addr)) + : + : "memory"); + return; + } +#endif + } + /* generate an illegal instruction. Cannot catch this with linker tricks + * when optimizations are disabled. */ + __asm__ __volatile__("ud2"); + return; +} + +#define atomic_dec(addr) (_atomic_dec((addr), sizeof(*(addr)))) + #endif /* #ifndef _INCLUDE_API_H */ #endif /* ARCH_ATOMIC_X86_H */ -- 2.34.1