Add powerpc atomic operations
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 22:11:17 +0000 (18:11 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 22 Sep 2009 22:11:17 +0000 (18:11 -0400)
cmpxchg
atomic_add_return
atomic_sub_return
atomic_add
atomic_sub
atomic_inc
atomic_dec

(already had xchg)

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
arch_atomic_ppc.h

index d666230fea7a80b0ee495990ce2e661750ebd406..231a57798598e3da844d6938a95d9502b402da86 100644 (file)
  * Derived from AO_compare_and_swap(), but removed the comparison.
  */
 
  * Derived from AO_compare_and_swap(), but removed the comparison.
  */
 
+/* xchg */
+
 static __attribute__((always_inline))
 static __attribute__((always_inline))
-unsigned int atomic_exchange_32(volatile unsigned int *addr, unsigned int val)
+unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
 {
 {
-       unsigned int result;
-
-       __asm__ __volatile__(
-               "lwsync\n"
-       "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
-               "stwcx. %2,0,%1\n"      /* else store conditional */
-               "bne- 1b\n"             /* retry if lost reservation */
-               "isync\n"
-                       : "=&r"(result)
-                       : "r"(addr), "r"(val)
-                       : "memory", "cc");
-
-       return result;
+       switch (len) {
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
 }
 
 }
 
-#if (BITS_PER_LONG == 64)
+#define xchg(addr, v)  (__typeof__(*(addr))) _atomic_exchange((addr), (v), \
+                                                           sizeof(*(addr)))
+
+/* cmpxchg */
 
 static __attribute__((always_inline))
 
 static __attribute__((always_inline))
-unsigned long atomic_exchange_64(volatile unsigned long *addr,
-                                unsigned long val)
+unsigned long _atomic_cmpxchg(volatile void *addr, unsigned long old,
+                             unsigned long _new, int len)
 {
 {
-       unsigned long result;
-
-       __asm__ __volatile__(
-               "lwsync\n"
-       "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
-               "stdcx. %2,0,%1\n"      /* else store conditional */
-               "bne- 1b\n"             /* retry if lost reservation */
-               "isync\n"
-                       : "=&r"(result)
-                       : "r"(addr), "r"(val)
-                       : "memory", "cc");
-
-       return result;
+       switch (len) {
+       case 4:
+       {
+               unsigned int old_val;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stwcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val),
+                               : "r"(addr), "r"((unsigned int)_new),
+                                 "r"((unsigned int)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#if (BITS_PER_LONG == 64)
+       case 8:
+       {
+               unsigned long old_val;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "cmpd %0,%3\n"          /* if load is not equal to */
+                       "bne 2f\n"              /* old, fail */
+                       "stdcx. %2,0,%1\n"      /* else store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+               "2:\n"
+                               : "=&r"(old_val),
+                               : "r"(addr), "r"((unsigned long)_new),
+                                 "r"((unsigned long)old)
+                               : "memory", "cc");
+
+               return old_val;
+       }
+#endif
+       }
+       /* generate an illegal instruction. Cannot catch this with linker tricks
+        * when optimizations are disabled. */
+       __asm__ __volatile__(ILLEGAL_INSTR);
+       return 0;
 }
 
 }
 
-#endif
+#define cmpxchg(addr, old, _new)                                       \
+       (__typeof__(*(addr))) _atomic_cmpxchg((addr), (old), (_new),    \
+                                             sizeof(*(addr)))
+
+/* atomic_add_return */
 
 static __attribute__((always_inline))
 
 static __attribute__((always_inline))
-unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
+unsigned long _atomic_add_return(volatile void *addr, unsigned long val,
+                                int len)
 {
        switch (len) {
 {
        switch (len) {
-       case 4: return atomic_exchange_32(addr, val);
+       case 4:
+       {
+               unsigned int result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stwcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
 #if (BITS_PER_LONG == 64)
 #if (BITS_PER_LONG == 64)
-       case 8: return atomic_exchange_64(addr, val);
+       case 8:
+       {
+               unsigned long result;
+
+               __asm__ __volatile__(
+                       "lwsync\n"
+               "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
+                       "add %0,%2,%0\n"        /* add val to value loaded */
+                       "stdcx. %0,0,%1\n"      /* store conditional */
+                       "bne- 1b\n"             /* retry if lost reservation */
+                       "isync\n"
+                               : "=&r"(result)
+                               : "r"(addr), "r"(val)
+                               : "memory", "cc");
+
+               return result;
+       }
 #endif
        }
        /* generate an illegal instruction. Cannot catch this with linker tricks
 #endif
        }
        /* generate an illegal instruction. Cannot catch this with linker tricks
@@ -99,8 +203,18 @@ unsigned long _atomic_exchange(volatile void *addr, unsigned long val, int len)
        return 0;
 }
 
        return 0;
 }
 
-#define xchg(addr, v)  (__typeof__(*(addr))) _atomic_exchange((addr), (v), \
-                                                           sizeof(*(addr)))
+#define atomic_add_return(addr, v)     \
+       (__typeof__(*(addr))) _atomic_add((addr), (v), sizeof(*(addr)))
+
+/* atomic_sub_return, atomic_add, atomic_sub, atomic_inc, atomic_dec */
+
+#define atomic_sub_return(addr, v)     atomic_add_return((addr), -(v))
+
+#define atomic_add(addr, v)            (void)atomic_add_return((addr), (v))
+#define atomic_sub(addr, v)            (void)atomic_sub_return((addr), (v))
+
+#define atomic_inc(addr, v)            atomic_add((addr), 1)
+#define atomic_dec(addr, v)            atomic_add((addr), -1)
 
 #endif /* #ifndef _INCLUDE_API_H */
 
 
 #endif /* #ifndef _INCLUDE_API_H */
 
This page took 0.027978 seconds and 4 git commands to generate.