x86: allocate membarrier system call number
[urcu.git] / urcu / arch / ppc.h
index a03d688837344f0bb2d438c7161d43dcb208a5ef..8a96dd9deb50626efe6a9a8b75e6cbab7c0dc628 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <urcu/compiler.h>
 #include <urcu/config.h>
+#include <urcu/syscall-compat.h>
 
 #ifdef __cplusplus
 extern "C" {
@@ -32,26 +33,52 @@ extern "C" {
 /* Include size of POWER5+ L3 cache lines: 256 bytes */
 #define CAA_CACHE_LINE_SIZE    256
 
-#define cmm_mb()    asm volatile("sync":::"memory")
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE  "sync\n"
+#else
+#define LWSYNC_OPCODE  "lwsync\n"
+#endif
+
+/*
+ * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
+ * preserve ordering of cacheable vs. non-cacheable accesses, so it
+ * should not be used to order with respect to MMIO operations.  An
+ * eieio+lwsync pair is also not enough for cmm_rmb, because it will
+ * order cacheable and non-cacheable memory operations separately---i.e.
+ * not the latter against the former.
+ */
+#define cmm_mb()         __asm__ __volatile__ ("sync":::"memory")
+
+/*
+ * lwsync orders loads in cacheable memory with respect to other loads,
+ * and stores in cacheable memory with respect to other stores.
+ * Therefore, use it for barriers ordering accesses to cacheable memory
+ * only.
+ */
+#define cmm_smp_rmb()    __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb()    __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
 
 #define mftbl()                                                \
+       __extension__                                   \
        ({                                              \
                unsigned long rval;                     \
-               asm volatile("mftbl %0" : "=r" (rval)); \
+               __asm__ __volatile__ ("mftbl %0" : "=r" (rval));        \
                rval;                                   \
        })
 
 #define mftbu()                                                \
+       __extension__                                   \
        ({                                              \
                unsigned long rval;                     \
-               asm volatile("mftbu %0" : "=r" (rval)); \
+               __asm__ __volatile__ ("mftbu %0" : "=r" (rval));        \
                rval;                                   \
        })
 
 #define mftb()                                         \
+       __extension__                                   \
        ({                                              \
                unsigned long long rval;                \
-               asm volatile("mftb %0" : "=r" (rval));  \
+               __asm__ __volatile__ ("mftb %0" : "=r" (rval));         \
                rval;                                   \
        })
 
This page took 0.023453 seconds and 4 git commands to generate.