Cleanup: move generic caa_get_cycles to arch/generic.h
[urcu.git] / urcu / arch / x86.h
1 #ifndef _URCU_ARCH_X86_H
2 #define _URCU_ARCH_X86_H
3
4 /*
5 * arch_x86.h: trivial definitions for the x86 architecture.
6 *
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25 #include <urcu/compiler.h>
26 #include <urcu/config.h>
27 #include <urcu/syscall-compat.h>
28
29 #ifdef __cplusplus
30 extern "C" {
31 #endif
32
33 #define CAA_CACHE_LINE_SIZE 128
34
35 #ifdef CONFIG_RCU_HAVE_FENCE
36 #define cmm_mb() __asm__ __volatile__ ("mfence":::"memory")
37
38 /*
39 * Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when
40 * using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are
41 * only compiler barriers, which is enough for general use.
42 */
43 #define cmm_rmb() __asm__ __volatile__ ("lfence":::"memory")
44 #define cmm_wmb() __asm__ __volatile__ ("sfence"::: "memory")
45 #define cmm_smp_rmb() cmm_barrier()
46 #define cmm_smp_wmb() cmm_barrier()
47 #else
48 /*
49 * We leave smp_rmb/smp_wmb as full barriers for processors that do not have
50 * fence instructions.
51 *
52 * An empty cmm_smp_rmb() may not be enough on old PentiumPro multiprocessor
53 * systems, due to an erratum. The Linux kernel says that "Even distro
54 * kernels should think twice before enabling this", but for now let's
55 * be conservative and leave the full barrier on 32-bit processors. Also,
56 * IDT WinChip supports weak store ordering, and the kernel may enable it
57 * under our feet; cmm_smp_wmb() ceases to be a nop for these processors.
58 */
59 #if (CAA_BITS_PER_LONG == 32)
60 #define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
61 #define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
62 #define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
63 #else
64 #define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
65 #define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
66 #define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
67 #endif
68 #endif
69
70 #define caa_cpu_relax() __asm__ __volatile__ ("rep; nop" : : : "memory")
71
72 #define HAS_CAA_GET_CYCLES
73
74 #define rdtscll(val) \
75 do { \
76 unsigned int __a, __d; \
77 __asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d)); \
78 (val) = ((unsigned long long)__a) \
79 | (((unsigned long long)__d) << 32); \
80 } while(0)
81
82 typedef unsigned long long cycles_t;
83
84 static inline cycles_t caa_get_cycles(void)
85 {
86 cycles_t ret = 0;
87
88 rdtscll(ret);
89 return ret;
90 }
91
92 /*
93 * Define the membarrier system call number if not yet available in the
94 * system headers.
95 */
96 #if (CAA_BITS_PER_LONG == 32)
97 #ifndef __NR_membarrier
98 #define __NR_membarrier 375
99 #endif
100 #else
101 #ifndef __NR_membarrier
102 #define __NR_membarrier 324
103 #endif
104 #endif
105
106 #ifdef __cplusplus
107 }
108 #endif
109
110 #include <urcu/arch/generic.h>
111
112 #endif /* _URCU_ARCH_X86_H */
This page took 0.043196 seconds and 5 git commands to generate.