#include <urcu/compiler.h>
#include <urcu/config.h>
+#include <urcu/syscall-compat.h>
#ifdef __cplusplus
extern "C" {
* order cacheable and non-cacheable memory operations separately---i.e.
* not the latter against the former.
*/
-#define cmm_mb() asm volatile("sync":::"memory")
+#define cmm_mb() __asm__ __volatile__ ("sync":::"memory")
/*
* lwsync orders loads in cacheable memory with respect to other loads,
* Therefore, use it for barriers ordering accesses to cacheable memory
* only.
*/
-#define cmm_smp_rmb() asm volatile(LWSYNC_OPCODE:::"memory")
-#define cmm_smp_wmb() asm volatile(LWSYNC_OPCODE:::"memory")
+#define cmm_smp_rmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb() __asm__ __volatile__ (LWSYNC_OPCODE:::"memory")
#define mftbl() \
+ __extension__ \
({ \
unsigned long rval; \
- asm volatile("mftbl %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbl %0" : "=r" (rval)); \
rval; \
})
#define mftbu() \
+ __extension__ \
({ \
unsigned long rval; \
- asm volatile("mftbu %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftbu %0" : "=r" (rval)); \
rval; \
})
#define mftb() \
+ __extension__ \
({ \
unsigned long long rval; \
- asm volatile("mftb %0" : "=r" (rval)); \
+ __asm__ __volatile__ ("mftb %0" : "=r" (rval)); \
rval; \
})