update x86 and ppc atomic ops
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 22:44:26 +0000 (18:44 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 22:44:26 +0000 (18:44 -0400)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
arch_atomic_x86.h
tests/test_atomic.c

index 6b33d453285f2e11535f30a9f9f7621b31a5c184..879ce7012183d84713f00ab46f3eda90cb90f545 100644 (file)
@@ -20,6 +20,8 @@
  * Boehm-Demers-Weiser conservative garbage collector.
  */
 
+#include <compiler.h>
+
 #ifndef BITS_PER_LONG
 #define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
 #endif
@@ -35,16 +37,24 @@ struct __atomic_dummy {
 };
 #define __hp(x)        ((struct __atomic_dummy *)(x))
 
+#define atomic_set(addr, v)                            \
+do {                                                   \
+       ACCESS_ONCE(*(addr)) = (v);                     \
+} while (0)
+
+#define atomic_read(addr)      ACCESS_ONCE(*(addr))
+
 /* cmpxchg */
 
 static inline __attribute__((always_inline))
 unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
-               unsigned long _new, int len)
+                             unsigned long _new, int len)
 {
        switch (len) {
        case 1:
        {
                unsigned char result = old;
+
                __asm__ __volatile__(
                "lock; cmpxchgb %2, %1"
                        : "+a"(result), "+m"(*__hp(addr))
@@ -55,6 +65,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
        case 2:
        {
                unsigned short result = old;
+
                __asm__ __volatile__(
                "lock; cmpxchgw %2, %1"
                        : "+a"(result), "+m"(*__hp(addr))
@@ -65,6 +76,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
        case 4:
        {
                unsigned int result = old;
+
                __asm__ __volatile__(
                "lock; cmpxchgl %2, %1"
                        : "+a"(result), "+m"(*__hp(addr))
@@ -76,6 +88,7 @@ unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
        case 8:
        {
                unsigned long result = old;
+
                __asm__ __volatile__(
                "lock; cmpxchgq %2, %1"
                        : "+a"(result), "+m"(*__hp(addr))
@@ -156,6 +169,73 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
        ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
                                                sizeof(*(addr))))
 
+/* atomic_add_return, atomic_sub_return */
+
+static inline __attribute__((always_inline))
+unsigned long _atomic_add_return(volatile void *addr, unsigned long val,
+                                int len)
+{
+       switch (len) {
+       case 1:
+       {
+               unsigned char result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddb %1, %0"
+                       : "+m"(*__hp(addr)), "+q" (result)
+                       :
+                       : "memory");
+               return result + (unsigned char)val;
+       }
+       case 2:
+       {
+               unsigned short result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddw %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned short)val;
+       }
+       case 4:
+       {
+               unsigned int result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddl %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned int)val;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result = val;
+
+               __asm__ __volatile__(
+               "lock; xaddq %1, %0"
+                       : "+m"(*__hp(addr)), "+r" (result)
+                       :
+                       : "memory");
+               return result + (unsigned long)val;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__("ud2");
+       return 0;
+}
+
+#define atomic_add_return(addr, v)                                     \
+       ((__typeof__(*(addr))) _atomic_add_return((addr),               \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+#define atomic_sub_return(addr, v)     atomic_add_return((addr), -(v))
+
 /* atomic_add, atomic_sub */
 
 static inline __attribute__((always_inline))
index 3d2b64e434d3ef97eb0feec03a2c49968b96bc2e..102012fe2429008cf28f9736ec1b074f7fa00c8f 100644 (file)
@@ -16,18 +16,29 @@ do {                                                \
        __typeof__(*(ptr)) v;                   \
                                                \
        atomic_add(ptr, 10);                    \
-       assert(*(ptr) == 10);                   \
+       assert(atomic_read(ptr) == 10);         \
        atomic_add(ptr, -11UL);                 \
-       assert(*(ptr) == (__typeof__(*(ptr)))-1UL);     \
+       assert(atomic_read(ptr) == (__typeof__(*(ptr)))-1UL);   \
        v = cmpxchg(ptr, -1UL, 22);             \
-       assert(*(ptr) == 22);                   \
+       assert(atomic_read(ptr) == 22);         \
        assert(v == (__typeof__(*(ptr)))-1UL);  \
        v = cmpxchg(ptr, 33, 44);               \
-       assert(*(ptr) == 22);                   \
+       assert(atomic_read(ptr) == 22);         \
        assert(v == 22);                        \
        v = xchg(ptr, 55);                      \
-       assert(*(ptr) == 55);                   \
+       assert(atomic_read(ptr) == 55);         \
        assert(v == 22);                        \
+       atomic_set(ptr, 22);                    \
+       atomic_inc(ptr);                        \
+       assert(atomic_read(ptr) == 23);         \
+       atomic_dec(ptr);                        \
+       assert(atomic_read(ptr) == 22);         \
+       v = atomic_add_return(ptr, 100);        \
+       assert(v == 122);                       \
+       assert(atomic_read(ptr) == 122);        \
+       v = atomic_sub_return(ptr, 1);          \
+       assert(v == 121);                       \
+       assert(atomic_read(ptr) == 121);        \
 } while (0)
 
 int main(int argc, char **argv)
This page took 0.027199 seconds and 4 git commands to generate.