Add inc/dec x86 atomics
[urcu.git] / arch_atomic_x86.h
index 8423ae3f9e0e71bb5d3039f9eeabab082db4f96f..fdd3d6b58ecd28eaf620e10001920eae0193a073 100644 (file)
  * Derived from AO_compare_and_swap() and AO_test_and_set_full().
  */
 
-static __attribute__((always_inline))
-unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val)
-{
-       unsigned int result;
-
-       __asm__ __volatile__(
-               /* Note: the "xchg" instruction does not need a "lock" prefix */
-               "xchgl %0, %1"
-                       : "=&r"(result), "=m"(*addr)
-                       : "0" (val), "m"(*addr)
-                       : "memory");
+struct __atomic_dummy {
+       unsigned long v[10];
+};
+#define __hp(x)        ((struct __atomic_dummy *)(x))
 
-       return result;
-}
-
-#if (BITS_PER_LONG == 64)
+/* cmpxchg */
 
-static __attribute__((always_inline))
-unsigned long atomic_exchange_64(volatile unsigned long *addr,
-                                unsigned long val)
+static inline __attribute__((always_inline))
+unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
+               unsigned long _new, int len)
 {
-       unsigned long result;
-
-       __asm__ __volatile__(
-               /* Note: the "xchg" instruction does not need a "lock" prefix */
-               "xchgq %0, %1"
-                       : "=&r"(result), "=m"(*addr)
-                       : "0" (val), "m"(*addr)
+       switch (len) {
+       case 1:
+       {
+               unsigned char result = old;
+               __asm__ __volatile__(
+               "lock; cmpxchgb %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "q"((unsigned char)_new)
                        : "memory");
-
-       return result;
+               return result;
+       }
+       case 2:
+       {
+               unsigned short result = old;
+               __asm__ __volatile__(
+               "lock; cmpxchgw %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned short)_new)
+                       : "memory");
+               return result;
+       }
+       case 4:
+       {
+               unsigned int result = old;
+               __asm__ __volatile__(
+               "lock; cmpxchgl %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned int)_new)
+                       : "memory");
+               return result;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned int result = old;
+               __asm__ __volatile__(
+               "lock; cmpxchgl %2, %1"
+                       : "+a"(result), "+m"(*__hp(addr))
+                       : "r"((unsigned long)_new)
+                       : "memory");
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return 0;
 }
 
-#endif
+#define cmpxchg(addr, old, _new)                                           \
+       ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+/* xchg */
 
-static __attribute__((always_inline))
+static inline __attribute__((always_inline))
 unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
 {
+       /* Note: the "xchg" instruction does not need a "lock" prefix. */
        switch (len) {
-       case 4: return atomic_exchange_32(addr, val);
+       case 1:
+       {
+               unsigned char result;
+               __asm__ __volatile__(
+               "xchgb %0, %1"
+                       : "=q"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned char)val)
+                       : "memory");
+               return result;
+       }
+       case 2:
+       {
+               unsigned short result;
+               __asm__ __volatile__(
+               "xchgw %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned short)val)
+                       : "memory");
+               return result;
+       }
+       case 4:
+       {
+               unsigned int result;
+               __asm__ __volatile__(
+               "xchgl %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned int)val)
+                       : "memory");
+               return result;
+       }
 #if (BITS_PER_LONG == 64)
-       case 8: return atomic_exchange_64(addr, val);
+       case 8:
+       {
+               unsigned long result;
+               __asm__ __volatile__(
+               "xchgq %0, %1"
+                       : "=r"(result), "+m"(*__hp(addr))
+                       : "0" ((unsigned long)val)
+                       : "memory");
+               return result;
+       }
 #endif
        }
        /* generate an illegal instruction. Cannot catch this with linker tricks
@@ -84,6 +156,60 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
        ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
                                                sizeof(*(addr))))
 
+/* atomic_add */
+
+static inline __attribute__((always_inline))
+void _atomic_add(volatile void *addr, unsigned long val, int len)
+{
+       switch (len) {
+       case 1:
+       {
+               __asm__ __volatile__(
+               "lock; addb %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "iq" ((unsigned char)val)
+                       : "memory");
+               return;
+       }
+       case 2:
+       {
+               __asm__ __volatile__(
+               "lock; addw %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned short)val)
+                       : "memory");
+               return;
+       }
+       case 4:
+       {
+               __asm__ __volatile__(
+               "lock; addl %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "ir" ((unsigned int)val)
+                       : "memory");
+               return;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               __asm__ __volatile__(
+               "lock; addq %1, %0"
+                       : "=m"(*__hp(addr))
+                       : "er" ((unsigned long)val)
+                       : "memory");
+               return;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return;
+}
+
+#define atomic_add(addr, v)                                               \
+       (_atomic_add((addr), (unsigned long)(v), sizeof(*(addr))))
+
 #endif /* #ifndef _INCLUDE_API_H */
 
 #endif /* ARCH_ATOMIC_X86_H */
This page took 0.026193 seconds and 4 git commands to generate.