x86: allocate membarrier system call number
[urcu.git] / urcu / arch / x86.h
... / ...
CommitLineData
1#ifndef _URCU_ARCH_X86_H
2#define _URCU_ARCH_X86_H
3
4/*
5 * arch_x86.h: trivial definitions for the x86 architecture.
6 *
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 *
10 * This library is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
14 *
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with this library; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <urcu/compiler.h>
26#include <urcu/config.h>
27#include <urcu/syscall-compat.h>
28
29#ifdef __cplusplus
30extern "C" {
31#endif
32
33#define CAA_CACHE_LINE_SIZE 128
34
35#ifdef CONFIG_RCU_HAVE_FENCE
36#define cmm_mb() __asm__ __volatile__ ("mfence":::"memory")
37
38/*
39 * Define cmm_rmb/cmm_wmb to "strict" barriers that may be needed when
40 * using SSE or working with I/O areas. cmm_smp_rmb/cmm_smp_wmb are
41 * only compiler barriers, which is enough for general use.
42 */
43#define cmm_rmb() __asm__ __volatile__ ("lfence":::"memory")
44#define cmm_wmb() __asm__ __volatile__ ("sfence"::: "memory")
45#define cmm_smp_rmb() cmm_barrier()
46#define cmm_smp_wmb() cmm_barrier()
47#else
48/*
49 * We leave smp_rmb/smp_wmb as full barriers for processors that do not have
50 * fence instructions.
51 *
52 * An empty cmm_smp_rmb() may not be enough on old PentiumPro multiprocessor
53 * systems, due to an erratum. The Linux kernel says that "Even distro
54 * kernels should think twice before enabling this", but for now let's
55 * be conservative and leave the full barrier on 32-bit processors. Also,
56 * IDT WinChip supports weak store ordering, and the kernel may enable it
57 * under our feet; cmm_smp_wmb() ceases to be a nop for these processors.
58 */
59#if (CAA_BITS_PER_LONG == 32)
60#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
61#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
62#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
63#else
64#define cmm_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
65#define cmm_rmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
66#define cmm_wmb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
67#endif
68#endif
69
70#define caa_cpu_relax() __asm__ __volatile__ ("rep; nop" : : : "memory")
71
72#define rdtscll(val) \
73 do { \
74 unsigned int __a, __d; \
75 __asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d)); \
76 (val) = ((unsigned long long)__a) \
77 | (((unsigned long long)__d) << 32); \
78 } while(0)
79
80typedef unsigned long long cycles_t;
81
82static inline cycles_t caa_get_cycles(void)
83{
84 cycles_t ret = 0;
85
86 rdtscll(ret);
87 return ret;
88}
89
90/*
91 * Define the membarrier system call number if not yet available in the
92 * system headers.
93 */
94#if (CAA_BITS_PER_LONG == 32)
95#ifndef __NR_membarrier
96#define __NR_membarrier 375
97#endif
98#else
99#ifndef __NR_membarrier
100#define __NR_membarrier 324
101#endif
102#endif
103
104#ifdef __cplusplus
105}
106#endif
107
108#include <urcu/arch/generic.h>
109
110#endif /* _URCU_ARCH_X86_H */
This page took 0.02225 seconds and 4 git commands to generate.