caa_get_cycles: caa_ prefix for type, use CLOCK_MONOTONIC
[urcu.git] / urcu / arch / ppc.h
index 048b217392cc7cd471647625ca1dd128e6627e00..7e2b6218f82ddc4808a775010c8bfd0c6e25ea5c 100644 (file)
 
 #include <urcu/compiler.h>
 #include <urcu/config.h>
+#include <urcu/syscall-compat.h>
+#include <stdint.h>
 
 #ifdef __cplusplus
 extern "C" {
-#endif 
+#endif
 
 /* Include size of POWER5+ L3 cache lines: 256 bytes */
 #define CAA_CACHE_LINE_SIZE    256
 
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE  "sync\n"
+#else
+#define LWSYNC_OPCODE  "lwsync\n"
+#endif
+
 /*
  * Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
  * preserve ordering of cacheable vs. non-cacheable accesses, so it
@@ -40,7 +48,7 @@ extern "C" {
  * order cacheable and non-cacheable memory operations separately---i.e.
  * not the latter against the former.
  */
-#define cmm_mb()         asm volatile("sync":::"memory")
+#define cmm_mb()         __asm__ __volatile__ ("sync":::"memory")
 
 /*
  * lwsync orders loads in cacheable memory with respect to other loads,
@@ -48,39 +56,44 @@ extern "C" {
  * Therefore, use it for barriers ordering accesses to cacheable memory
  * only.
  */
-#define cmm_smp_rmb()    asm volatile("lwsync":::"memory")
-#define cmm_smp_wmb()    asm volatile("lwsync":::"memory")
+#define cmm_smp_rmb()    __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb()    __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
 
 #define mftbl()                                                \
+       __extension__                                   \
        ({                                              \
                unsigned long rval;                     \
-               asm volatile("mftbl %0" : "=r" (rval)); \
+               __asm__ __volatile__ ("mftbl %0" : "=r" (rval));        \
                rval;                                   \
        })
 
 #define mftbu()                                                \
+       __extension__                                   \
        ({                                              \
                unsigned long rval;                     \
-               asm volatile("mftbu %0" : "=r" (rval)); \
+               __asm__ __volatile__ ("mftbu %0" : "=r" (rval));        \
                rval;                                   \
        })
 
 #define mftb()                                         \
+       __extension__                                   \
        ({                                              \
                unsigned long long rval;                \
-               asm volatile("mftb %0" : "=r" (rval));  \
+               __asm__ __volatile__ ("mftb %0" : "=r" (rval));         \
                rval;                                   \
        })
 
-typedef unsigned long long cycles_t;
+#define HAS_CAA_GET_CYCLES
+
+typedef uint64_t caa_cycles_t;
 
 #ifdef __powerpc64__
-static inline cycles_t caa_get_cycles(void)
+static inline caa_cycles_t caa_get_cycles(void)
 {
-       return (cycles_t) mftb();
+       return (caa_cycles_t) mftb();
 }
 #else
-static inline cycles_t caa_get_cycles(void)
+static inline caa_cycles_t caa_get_cycles(void)
 {
        unsigned long h, l;
 
@@ -90,12 +103,20 @@ static inline cycles_t caa_get_cycles(void)
                l = mftbl();
                cmm_barrier();
                if (mftbu() == h)
-                       return (((cycles_t) h) << 32) + l;
+                       return (((caa_cycles_t) h) << 32) + l;
        }
 }
 #endif
 
-#ifdef __cplusplus 
+/*
+ * Define the membarrier system call number if not yet available in the
+ * system headers.
+ */
+#ifndef __NR_membarrier
+#define __NR_membarrier                365
+#endif
+
+#ifdef __cplusplus
 }
 #endif
 
This page took 0.02418 seconds and 4 git commands to generate.