projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
wfcqueue: add C++ compatibility API
[urcu.git]
/
urcu
/
arch
/
ppc.h
diff --git
a/urcu/arch/ppc.h
b/urcu/arch/ppc.h
index 2fcbf5660958a12d2ad6dd957838ed73da4f2bec..791529ebb1a08ff3db124103474522b77bf0ad15 100644
(file)
--- a/
urcu/arch/ppc.h
+++ b/
urcu/arch/ppc.h
@@
-24,10
+24,12
@@
#include <urcu/compiler.h>
#include <urcu/config.h>
#include <urcu/compiler.h>
#include <urcu/config.h>
+#include <urcu/syscall-compat.h>
+#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#ifdef __cplusplus
extern "C" {
-#endif
+#endif
/* Include size of POWER5+ L3 cache lines: 256 bytes */
#define CAA_CACHE_LINE_SIZE 256
/* Include size of POWER5+ L3 cache lines: 256 bytes */
#define CAA_CACHE_LINE_SIZE 256
@@
-46,7
+48,7
@@
extern "C" {
* order cacheable and non-cacheable memory operations separately---i.e.
* not the latter against the former.
*/
* order cacheable and non-cacheable memory operations separately---i.e.
* not the latter against the former.
*/
-#define cmm_mb()
asm volatile
("sync":::"memory")
+#define cmm_mb()
__asm__ __volatile__
("sync":::"memory")
/*
* lwsync orders loads in cacheable memory with respect to other loads,
/*
* lwsync orders loads in cacheable memory with respect to other loads,
@@
-54,39
+56,44
@@
extern "C" {
* Therefore, use it for barriers ordering accesses to cacheable memory
* only.
*/
* Therefore, use it for barriers ordering accesses to cacheable memory
* only.
*/
-#define cmm_smp_rmb()
asm volatile
(LWSYNC_OPCODE:::"memory")
-#define cmm_smp_wmb()
asm volatile
(LWSYNC_OPCODE:::"memory")
+#define cmm_smp_rmb()
__asm__ __volatile__
(LWSYNC_OPCODE:::"memory")
+#define cmm_smp_wmb()
__asm__ __volatile__
(LWSYNC_OPCODE:::"memory")
#define mftbl() \
#define mftbl() \
+ __extension__ \
({ \
unsigned long rval; \
({ \
unsigned long rval; \
-
asm volatile("mftbl %0" : "=r" (rval));
\
+
__asm__ __volatile__ ("mftbl %0" : "=r" (rval));
\
rval; \
})
#define mftbu() \
rval; \
})
#define mftbu() \
+ __extension__ \
({ \
unsigned long rval; \
({ \
unsigned long rval; \
-
asm volatile("mftbu %0" : "=r" (rval));
\
+
__asm__ __volatile__ ("mftbu %0" : "=r" (rval));
\
rval; \
})
#define mftb() \
rval; \
})
#define mftb() \
+ __extension__ \
({ \
unsigned long long rval; \
({ \
unsigned long long rval; \
-
asm volatile("mftb %0" : "=r" (rval));
\
+
__asm__ __volatile__ ("mftb %0" : "=r" (rval));
\
rval; \
})
rval; \
})
-typedef unsigned long long cycles_t;
+#define HAS_CAA_GET_CYCLES
+
+typedef uint64_t caa_cycles_t;
#ifdef __powerpc64__
#ifdef __powerpc64__
-static inline cycles_t caa_get_cycles(void)
+static inline c
aa_c
ycles_t caa_get_cycles(void)
{
{
- return (cycles_t) mftb();
+ return (c
aa_c
ycles_t) mftb();
}
#else
}
#else
-static inline cycles_t caa_get_cycles(void)
+static inline c
aa_c
ycles_t caa_get_cycles(void)
{
unsigned long h, l;
{
unsigned long h, l;
@@
-96,12
+103,20
@@
static inline cycles_t caa_get_cycles(void)
l = mftbl();
cmm_barrier();
if (mftbu() == h)
l = mftbl();
cmm_barrier();
if (mftbu() == h)
- return (((cycles_t) h) << 32) + l;
+ return (((c
aa_c
ycles_t) h) << 32) + l;
}
}
#endif
}
}
#endif
-#ifdef __cplusplus
+/*
+ * On Linux, define the membarrier system call number if not yet available in
+ * the system headers.
+ */
+#if (defined(__linux__) && !defined(__NR_membarrier))
+#define __NR_membarrier 365
+#endif
+
+#ifdef __cplusplus
}
#endif
}
#endif
This page took
0.025975 seconds
and
4
git commands to generate.