Cleanup: remove trailing whitespaces at EOL
[urcu.git] / urcu / uatomic / ppc.h
index 3eb3d639748301f2b67d8e5ccce73b894a99786a..0e672f57260529679b7c15d31304f57767328bb9 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _URCU_ARCH_UATOMIC_PPC_H
 #define _URCU_ARCH_UATOMIC_PPC_H
 
-/* 
+/*
  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
  * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
 
 #ifdef __cplusplus
 extern "C" {
-#endif 
-
-#ifdef __NO_LWSYNC__
-#define LWSYNC_OPCODE  "sync\n"
-#else
-#define LWSYNC_OPCODE  "lwsync\n"
 #endif
 
 #define ILLEGAL_INSTR  ".long  0xd00d00"
 
 /*
- * Using a isync as second barrier for exchange to provide acquire semantic.
- * According to uatomic_ops/sysdeps/gcc/powerpc.h, the documentation is "fairly
- * explicit that this also has acquire semantics."
- * Derived from AO_compare_and_swap(), but removed the comparison.
+ * Providing sequential consistency semantic with respect to other
+ * instructions for cmpxchg and add_return family of atomic primitives.
+ *
+ * This is achieved with:
+ *   lwsync (prior loads can be reordered after following load)
+ *   lwarx
+ *   stwcx.
+ *   test if success (retry)
+ *   sync
+ *
+ * Explanation of the sequential consistency provided by this scheme
+ * from Paul E. McKenney:
+ *
+ * The reason we can get away with the lwsync before is that if a prior
+ * store reorders with the lwarx, then you have to store to the atomic
+ * variable from some other CPU to detect it.
+ *
+ * And if you do that, the lwarx will lose its reservation, so the stwcx
+ * will fail.  The atomic operation will retry, so that the caller won't be
+ * able to see the misordering.
  */
 
 /* xchg */
@@ -57,7 +67,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
                "1:\t"  "lwarx %0,0,%1\n"       /* load and reserve */
                        "stwcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                                : "=&r"(result)
                                : "r"(addr), "r"(val)
                                : "memory", "cc");
@@ -74,7 +84,7 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
                "1:\t"  "ldarx %0,0,%1\n"       /* load and reserve */
                        "stdcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                                : "=&r"(result)
                                : "r"(addr), "r"(val)
                                : "memory", "cc");
@@ -83,14 +93,17 @@ unsigned long _uatomic_exchange(void *addr, unsigned long val, int len)
        }
 #endif
        }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
+       /*
+        * generate an illegal instruction. Cannot catch this with
+        * linker tricks when optimizations are disabled.
+        */
        __asm__ __volatile__(ILLEGAL_INSTR);
        return 0;
 }
 
 #define uatomic_xchg(addr, v)                                              \
-       ((__typeof__(*(addr))) _uatomic_exchange((addr), (unsigned long)(v), \
+       ((__typeof__(*(addr))) _uatomic_exchange((addr),                    \
+                                               caa_cast_long_keep_sign(v), \
                                                sizeof(*(addr))))
 /* cmpxchg */
 
@@ -110,7 +123,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                        "bne 2f\n"              /* old, fail */
                        "stwcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                "2:\n"
                                : "=&r"(old_val)
                                : "r"(addr), "r"((unsigned int)_new),
@@ -131,7 +144,7 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
                        "bne 2f\n"              /* old, fail */
                        "stdcx. %2,0,%1\n"      /* else store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                "2:\n"
                                : "=&r"(old_val)
                                : "r"(addr), "r"((unsigned long)_new),
@@ -142,16 +155,19 @@ unsigned long _uatomic_cmpxchg(void *addr, unsigned long old,
        }
 #endif
        }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
+       /*
+        * generate an illegal instruction. Cannot catch this with
+        * linker tricks when optimizations are disabled.
+        */
        __asm__ __volatile__(ILLEGAL_INSTR);
        return 0;
 }
 
 
-#define uatomic_cmpxchg(addr, old, _new)                                   \
-       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr), (unsigned long)(old),\
-                                               (unsigned long)(_new),      \
+#define uatomic_cmpxchg(addr, old, _new)                                     \
+       ((__typeof__(*(addr))) _uatomic_cmpxchg((addr),                       \
+                                               caa_cast_long_keep_sign(old), \
+                                               caa_cast_long_keep_sign(_new),\
                                                sizeof(*(addr))))
 
 /* uatomic_add_return */
@@ -171,7 +187,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
                        "add %0,%2,%0\n"        /* add val to value loaded */
                        "stwcx. %0,0,%1\n"      /* store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                                : "=&r"(result)
                                : "r"(addr), "r"(val)
                                : "memory", "cc");
@@ -189,7 +205,7 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
                        "add %0,%2,%0\n"        /* add val to value loaded */
                        "stdcx. %0,0,%1\n"      /* store conditional */
                        "bne- 1b\n"             /* retry if lost reservation */
-                       "isync\n"
+                       "sync\n"
                                : "=&r"(result)
                                : "r"(addr), "r"(val)
                                : "memory", "cc");
@@ -198,19 +214,21 @@ unsigned long _uatomic_add_return(void *addr, unsigned long val,
        }
 #endif
        }
-       /* generate an illegal instruction. Cannot catch this with linker tricks
-        * when optimizations are disabled. */
+       /*
+        * generate an illegal instruction. Cannot catch this with
+        * linker tricks when optimizations are disabled.
+        */
        __asm__ __volatile__(ILLEGAL_INSTR);
        return 0;
 }
 
 
-#define uatomic_add_return(addr, v)                                    \
-       ((__typeof__(*(addr))) _uatomic_add_return((addr),              \
-                                                 (unsigned long)(v),   \
-                                                 sizeof(*(addr))))
+#define uatomic_add_return(addr, v)                                        \
+       ((__typeof__(*(addr))) _uatomic_add_return((addr),                  \
+                                               caa_cast_long_keep_sign(v), \
+                                               sizeof(*(addr))))
 
-#ifdef __cplusplus 
+#ifdef __cplusplus
 }
 #endif
 
This page took 0.025082 seconds and 4 git commands to generate.