* arch_x86.h: trivial definitions for the x86 architecture.
*
* Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
-*
+ *
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
*/
#include <urcu/compiler.h>
-#include <urcu/arch_uatomic.h>
-
-/* Assume P4 or newer */
-#define CONFIG_HAVE_FENCE 1
-#define CONFIG_HAVE_MEM_COHERENCY
+#include <urcu/config.h>
-#define CACHE_LINE_SIZE 128
+#ifdef __cplusplus
+extern "C" {
+#endif
-#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
-#endif
+#define CAA_CACHE_LINE_SIZE 128
-#ifdef CONFIG_HAVE_FENCE
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
-#define wmb() asm volatile("sfence"::: "memory")
+#ifdef CONFIG_RCU_HAVE_FENCE
+#define cmm_mb() asm volatile("mfence":::"memory")
+#define cmm_rmb() asm volatile("lfence":::"memory")
+#define cmm_wmb() asm volatile("sfence"::: "memory")
#else
/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * Some non-Intel clones support out of order store. cmm_wmb() ceases to be a
* nop for these.
*/
-#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
-#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
+#define cmm_mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
+#define cmm_wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
#endif
-/*
- * Architectures without cache coherency need something like the following:
- *
- * #define mb() mc()
- * #define rmb() rmc()
- * #define wmb() wmc()
- * #define mc() arch_cache_flush()
- * #define rmc() arch_cache_flush_read()
- * #define wmc() arch_cache_flush_write()
- */
-
-#define mc() barrier()
-#define rmc() barrier()
-#define wmc() barrier()
-
-/* Assume SMP machine, given we don't have this information */
-#define CONFIG_SMP 1
-
-#ifdef CONFIG_SMP
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
-#define smp_mc() mc()
-#define smp_rmc() rmc()
-#define smp_wmc() wmc()
-#else
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-#define smp_mc() barrier()
-#define smp_rmc() barrier()
-#define smp_wmc() barrier()
-#endif
-
-/* Nop everywhere except on alpha. */
-#define smp_read_barrier_depends()
-
-static inline void rep_nop(void)
-{
- asm volatile("rep; nop" : : : "memory");
-}
-
-static inline void cpu_relax(void)
-{
- rep_nop();
-}
-
-/*
- * Serialize core instruction execution. Also acts as a compiler barrier.
- */
-#ifdef __PIC__
-/*
- * Cannot use cpuid because it clobbers the ebx register and clashes
- * with -fPIC :
- * error: PIC register 'ebx' clobbered in 'asm'
- */
-static inline void sync_core(void)
-{
- mb();
-}
-#else
-static inline void sync_core(void)
-{
- asm volatile("cpuid" : : : "memory", "eax", "ebx", "ecx", "edx");
-}
-#endif
+#define caa_cpu_relax() asm volatile("rep; nop" : : : "memory");
#define rdtscll(val) \
do { \
typedef unsigned long long cycles_t;
-static inline cycles_t get_cycles(void)
+static inline cycles_t caa_get_cycles(void)
{
cycles_t ret = 0;
return ret;
}
+#ifdef __cplusplus
+}
+#endif
+
+#include <urcu/arch_generic.h>
+
#endif /* _URCU_ARCH_X86_H */