sparc,ppc,s390: uatomic ops update
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 1 Dec 2009 16:19:07 +0000 (11:19 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Tue, 1 Dec 2009 16:19:07 +0000 (11:19 -0500)
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
urcu/uatomic_arch_ppc.h
urcu/uatomic_arch_s390.h
urcu/uatomic_arch_sparc64.h

index 08a622dfe6f93cd547b0684ebfd398bf9284fe92..a32c789b94ea885619c9af4e18a13741809217d6 100644 (file)
@@ -225,7 +225,6 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
 #define uatomic_inc(addr)              uatomic_add((addr), 1)
 #define uatomic_dec(addr)              uatomic_add((addr), -1)
 
-#define URCU_CAS_AVAIL()       1
 #define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
 
 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
index db05b15b996a36f61a0a9ed3fff96fddcbb977c4..6b4e17eb78ad7cda400a0cae63e24f863502ea89 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _URCU_ARCH_ATOMIC_S390_H
-#define _URCU_ARCH_ATOMIC_S390_H
+#ifndef _URCU_UATOMIC_ARCH_S390_H
+#define _URCU_UATOMIC_ARCH_S390_H
 
 /*
  * Atomic exchange operations for the S390 architecture. Based on information
@@ -8,6 +8,7 @@
  *
  * Copyright (c) 2009 Novell, Inc.
  * Author: Jan Blunck <jblunck@suse.de>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
  *
  * Permission is hereby granted, free of charge, to any person obtaining a copy
  * of this software and associated documentation files (the "Software"), to
 #define uatomic_set(addr, v)   STORE_SHARED(*(addr), (v))
 #define uatomic_read(addr)     LOAD_SHARED(*(addr))
 
-static inline __attribute__((always_inline))
-unsigned int uatomic_exchange_32(volatile unsigned int *addr, unsigned int val)
-{
-       unsigned int result;
-
-       __asm__ __volatile__(
-               "0:     cs %0,%2,%1\n"
-               "       brc 4,0b\n"
-               : "=&r"(result), "=m" (*addr)
-               : "r"(val), "m" (*addr)
-               : "memory", "cc");
-
-       return result;
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-unsigned long uatomic_exchange_64(volatile unsigned long *addr,
-                                unsigned long val)
-{
-       unsigned long result;
-
-       __asm__ __volatile__(
-               "0:     csg %0,%2,%1\n"
-               "       brc 4,0b\n"
-               : "=&r"(result), "=m" (*addr)
-               : "r"(val), "m" (*addr)
-               : "memory", "cc");
-
-       return result;
-}
-
-#endif
-
-static inline __attribute__((always_inline))
+/* xchg */
 unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
 {
        switch (len) {
        case 4:
-               return uatomic_exchange_32(addr, val);
+               unsigned int old_val;
+
+               __asm__ __volatile__(
+                       "0:     cs %0,%2,%1\n"
+                       "       brc 4,0b\n"
+                       : "=&r"(old_val), "=m" (*addr)
+                       : "r"(val), "m" (*addr)
+                       : "memory", "cc");
 #if (BITS_PER_LONG == 64)
        case 8:
-               return uatomic_exchange_64(addr, val);
+               unsigned long old_val;
+
+               __asm__ __volatile__(
+                       "0:     csg %0,%2,%1\n"
+                       "       brc 4,0b\n"
+                       : "=&r"(old_val), "=m" (*addr)
+                       : "r"(val), "m" (*addr)
+                       : "memory", "cc");
 #endif
        default:
                __asm__ __volatile__(".long     0xd00d00");
@@ -98,124 +78,100 @@ unsigned long _uatomic_exchange(volatile void *addr, unsigned long val, int len)
        return 0;
 }
 
-#define uatomic_xchg(addr, v)                                          \
+#define uatomic_xchg(addr, v)                                              \
        (__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
                                               sizeof(*(addr)))
 
+/* cmpxchg */
 
 static inline __attribute__((always_inline))
-void uatomic_add_32(volatile unsigned int *addr, unsigned int val)
-{
-       unsigned int result, old;
-
-       __asm__ __volatile__(
-               "       l %0, %1\n"
-               "0:     lr %2, %0\n"
-               "       ar %2, %3\n"
-               "       cs %0,%2,%1\n"
-               "       brc 4,0b\n"
-               : "=&r"(old), "+m" (*addr),
-                 "=&r"(result)
-               : "r"(val)
-               : "memory", "cc");
-}
-
-#if (BITS_PER_LONG == 64)
-
-static inline __attribute__((always_inline))
-void uatomic_add_64(volatile unsigned long *addr, unsigned long val)
-{
-       unsigned long result, old;
-
-       __asm__ __volatile__(
-               "       lg %0, %1\n"
-               "0:     lgr %2, %0\n"
-               "       agr %2, %3\n"
-               "       csg %0,%2,%1\n"
-               "       brc 4,0b\n"
-               : "=&r"(old), "+m" (*addr),
-                 "=&r"(result)
-               : "r"(val)
-               : "memory", "cc");
-}
-
-#endif
-
-static inline __attribute__((always_inline))
-void _uatomic_add(void *addr, unsigned long val, int len)
+unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
+                              unsigned long new, int len)
 {
        switch (len) {
        case 4:
-               uatomic_add_32(addr, val);
-               return;
+               unsigned int old_val = (unsigned int)old;
+
+               __asm__ __volatile__(
+                       "       cs %0,%2,%1\n"
+                       : "+r"(old_val), "+m"(*addr)
+                       : "r"(new)
+                       : "memory", "cc");
+               return old_val;
 #if (BITS_PER_LONG == 64)
        case 8:
-               uatomic_add_64(addr, val);
-               return;
+               __asm__ __volatile__(
+                       "       csg %0,%2,%1\n"
+                       : "+r"(old), "+m"(*addr)
+                       : "r"(new)
+                       : "memory", "cc");
+               return old;
 #endif
        default:
                __asm__ __volatile__(".long     0xd00d00");
        }
 
-       return;
+       return 0;
 }
 
-#define uatomic_add(addr, val)                                         \
-       _uatomic_add((addr), (unsigned long)(val), sizeof(*(addr)))
-
-static inline __attribute__((always_inline))
-unsigned int uatomic_cmpxchg_32(volatile unsigned int *addr, unsigned int old,
-                               unsigned int new)
-{
-       __asm__ __volatile__(
-               "       cs %0,%2,%1\n"
-               : "+r"(old), "+m"(*addr)
-               : "r"(new)
-               : "memory", "cc");
-
-       return old;
-}
+#define uatomic_cmpxchg(addr, old, new)                                        \
+       (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                  \
+                                              (unsigned long)(old),    \
+                                              (unsigned long)(new),    \
+                                              sizeof(*(addr)))
 
-#if (BITS_PER_LONG == 64)
+/* uatomic_add_return */
 
 static inline __attribute__((always_inline))
-unsigned long uatomic_cmpxchg_64(volatile unsigned long *addr,
-                                unsigned long old, unsigned long new)
-{
-       __asm__ __volatile__(
-               "       csg %0,%2,%1\n"
-               : "+r"(old), "+m"(*addr)
-               : "r"(new)
-               : "memory", "cc");
-
-       return old;
-}
-
-#endif
-
-unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
-                              unsigned long new, int len)
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
 {
        switch (len) {
        case 4:
-               return uatomic_cmpxchg_32(addr, old, new);
+       {
+               unsigned int old, oldt;
+
+               oldt = uatomic_read((unsigned int *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old + val, 4);
+               } while (oldt != old);
+
+               return old + val;
+       }
 #if (BITS_PER_LONG == 64)
        case 8:
-               return uatomic_cmpxchg_64(addr, old, new);
+       {
+               unsigned long old, oldt;
+
+               oldt = uatomic_read((unsigned long *)addr);
+               do {
+                       old = oldt;
+                       oldt = _uatomic_cmpxchg(addr, old, old + val, 8);
+               } while (oldt != old);
+
+               return old + val;
+       }
 #endif
-       default:
-               __asm__ __volatile__(".long     0xd00d00");
        }
-
+       __builtin_trap();
        return 0;
 }
 
-#define uatomic_cmpxchg(addr, old, new)                                        \
-       (__typeof__(*(addr))) _uatomic_cmpxchg((addr),                  \
-                                              (unsigned long)(old),    \
-                                              (unsigned long)(new),    \
-                                              sizeof(*(addr)))
+#define uatomic_add_return(addr, v)                                    \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
+                                                 (unsigned long)(v),   \
+                                                 sizeof(*(addr))))
+
+/* uatomic_sub_return, uatomic_add, uatomic_sub, uatomic_inc, uatomic_dec */
+
+#define uatomic_sub_return(addr, v)    uatomic_add_return((addr), -(v))
+
+#define uatomic_add(addr, v)           (void)uatomic_add_return((addr), (v))
+#define uatomic_sub(addr, v)           (void)uatomic_sub_return((addr), (v))
+
+#define uatomic_inc(addr)              uatomic_add((addr), 1)
+#define uatomic_dec(addr)              uatomic_add((addr), -1)
 
-#define URCU_CAS_AVAIL()       1
+#define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
 
-#endif /* _URCU_ARCH_ATOMIC_S390_H */
+#endif /* _URCU_UATOMIC_ARCH_S390_H */
index 28829d285811bfabce58ffc00847ff645f70e460..622fe100e8f47627b3232acd67dbe5e27bef2e80 100644 (file)
@@ -126,8 +126,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
 /* uatomic_add_return */
 
 static inline __attribute__((always_inline))
-unsigned long _uatomic_add_return(void *addr, unsigned long val,
-                                int len)
+unsigned long _uatomic_add_return(void *addr, unsigned long val, int len)
 {
        switch (len) {
        case 4:
@@ -176,7 +175,6 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
 #define uatomic_inc(addr)              uatomic_add((addr), 1)
 #define uatomic_dec(addr)              uatomic_add((addr), -1)
 
-#define URCU_CAS_AVAIL()       1
 #define compat_uatomic_cmpxchg(ptr, old, _new) uatomic_cmpxchg(ptr, old, _new)
 
 #endif /* _URCU_ARCH_UATOMIC_PPC_H */
This page took 0.029099 seconds and 4 git commands to generate.