Add missing rcu_cmpxchg_pointer define
[urcu.git] / arch_atomic_ppc.h
index d666230fea7a80b0ee495990ce2e661750ebd406..54c5f5caae0fa05a074a4ad6c33f172bd6eb027b 100644 (file)
@@ -20,6 +20,8 @@
  * Boehm-Demers-Weiser conservative garbage collector.
  */
 
+#include <compiler.h>
+
 #ifndef __SIZEOF_LONG__
 #ifdef __powerpc64__
 #define __SIZEOF_LONG__ 8
 
 #ifndef _INCLUDE_API_H
 
+#define atomic_set(addr, v)                            \
+do {                                                   \
+       ACCESS_ONCE(*(addr)) = (v);                     \
+} while (0)
+
+#define atomic_read(addr)      ACCESS_ONCE(*(addr))
+
 /*
  * Using a isync as second barrier for exchange to provide acquire semantic.
  * According to atomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
  * Derived from AO_compare_and_swap(), but removed the comparison.
  */
 
-static __attribute__((always_inline))
-unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val)
+/* xchg */
+
+static inline __attribute__((always_inline))
+unsigned long _atomic_exchange(void *addr, unsigned long val, int len)
 {
-       unsigned int result;
-
-       __asm__ __volatile__(
-               "lwsync\n"
-       "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-               "stwcx. %2,0,%1\n"      /* else store conditional */
-               "bne- 1b\n"             /* retry if lost reservation */
-               "isync\n"
-                       : "=&r"(result)
-                       : "r"(addr), "r"(val)
-                       : "memory", "cc");
-
-       return result;
+       switch (len) {
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
 }
 
-#if (BITS_PER_LONG == 64)
+#define xchg(addr, v)                                                      \
+       ((__typeof__(*(addr))) _atomic_exchange((addr), (unsigned long)(v), \
+                                               sizeof(*(addr))))
+/* cmpxchg */
 
-static __attribute__((always_inline))
-unsigned long atomic_exchange_64(volatile unsigned long *addr,
-                                unsigned long val)
+static inline __attribute__((always_inline))
+unsigned long _atomic_cmpxchg(void *addr, unsigned long old,
+                             unsigned long _new, int len)
 {
-       unsigned long result;
-
-       __asm__ __volatile__(
-               "lwsync\n"
-       "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-               "stdcx. %2,0,%1\n"      /* else store conditional */
-               "bne- 1b\n"             /* retry if lost reservation */
-               "isync\n"
-                       : "=&r"(result)
-                       : "r"(addr), "r"(val)
-                       : "memory", "cc");
-
-       return result;
+       switch (len) {
+       case 4:
+       {
+               unsigned int old_val;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val)
+                               : "r"(addr), "r"((unsigned int)_new),
+                                 "r"((unsigned int)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old_val;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val),
+                               : "r"(addr), "r"((unsigned long)_new),
+                                 "r"((unsigned long)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
 }
 
-#endif
 
-static __attribute__((always_inline))
-unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
+#define cmpxchg(addr, old, _new)                                           \
+       ((__typeof__(*(addr))) _atomic_cmpxchg((addr), (unsigned long)(old),\
+                                               (unsigned long)(_new),      \
+                                               sizeof(*(addr))))
+
+/* atomic_add_return */
+
+static inline __attribute__((always_inline))
+unsigned long _atomic_add_return(void *addr, unsigned long val,
+                                int len)
 {
        switch (len) {
-       case 4: return atomic_exchange_32(addr, val);
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stwcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
 #if (BITS_PER_LONG == 64)
-       case 8: return atomic_exchange_64(addr, val);
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stdcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
 #endif
        }
        /* generate an illegal instruction. Cannot catch this with linker tricks
@@ -99,8 +214,21 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define xchg(addr, v)  (__typeof__(*(addr))) _atomic_exchange((addr), (v), \
-                                                           sizeof(*(addr)))
+
+#define atomic_add_return(addr, v)                                     \
+       ((__typeof__(*(addr))) _atomic_add_return((addr),               \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+/* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */
+
+#define atomic_sub_return(addr, v)     atomic_add_return((addr), -(v))
+
+#define atomic_add(addr, v)            (void)atomic_add_return((addr), (v))
+#define atomic_sub(addr, v)            (void)atomic_sub_return((addr), (v))
+
+#define atomic_inc(addr)               atomic_add((addr), 1)
+#define atomic_dec(addr)               atomic_add((addr), -1)
 
 #endif /* #ifndef _INCLUDE_API_H */
 
This page took 0.024669 seconds and 4 git commands to generate.