#include <urcu/compiler.h>
#include <urcu/config.h>
+#include <urcu/syscall-compat.h>
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* Include size of POWER5+ L3 cache lines: 256 bytes */
#define CAA_CACHE_LINE_SIZE 256
+#ifdef __NO_LWSYNC__
+#define LWSYNC_OPCODE "sync\n"
+#else
+#define LWSYNC_OPCODE "lwsync\n"
+#endif
+
/*
* Use sync for all cmm_mb/rmb/wmb barriers because lwsync does not
* preserve ordering of cacheable vs. non-cacheable accesses, so it
* order cacheable and non-cacheable memory operations separately---i.e.
* not the latter against the former.
*/
-#define cmm_mb() asm volatile("sync":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
/*
* lwsync orders loads in cacheable memory with respect to other loads,
* Therefore, use it for barriers ordering accesses to cacheable memory
* only.
*/
-#define cmm_smp_rmb() asm volatile("lwsync":::"memory")
-#define cmm_smp_wmb() asm volatile("lwsync":::"memory")
+#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
#define mftbl() \
+ __extension__ \
({ \
unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \
rval; \
})
#define mftbu() \
+ __extension__ \
({ \
unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
rval; \
})
#define mftb() \
+ __extension__ \
({ \
unsigned long long rval; \
- asm volatile("mftb %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
rval; \
})
+#define HAS_CAA_GET_CYCLES
+
typedef unsigned long long cycles_t;
#ifdef __powerpc64__
}
#endif
-#ifdef __cplusplus
+/*
+ * Define the membarrier system call number if not yet available in the
+ * system headers.
+ */
+#ifndef __NR_membarrier
+#define __NR_membarrier 365
+#endif
+
+#ifdef __cplusplus
}
#endif