Public headers: use SPDX identifiers
[urcu.git] / include / urcu / arch / generic.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
e4d1eb09
PB
5#ifndef _URCU_ARCH_GENERIC_H
6#define _URCU_ARCH_GENERIC_H
7
8/*
9 * arch_generic.h: common definitions for multiple architectures.
e4d1eb09
PB
10 */
11
12#include <urcu/compiler.h>
13#include <urcu/config.h>
999991c6 14#include <urcu/syscall-compat.h>
e4d1eb09
PB
15
16#ifdef __cplusplus
17extern "C" {
18#endif
19
06f22bdb
DG
20#ifndef CAA_CACHE_LINE_SIZE
21#define CAA_CACHE_LINE_SIZE 64
e4d1eb09
PB
22#endif
23
5481ddb3 24#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
e4d1eb09
PB
25#define CONFIG_HAVE_MEM_COHERENCY
26/*
5481ddb3 27 * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
e4d1eb09 28 *
d0bbd9c2
MD
29 * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple
30 * compiler barrier; in addition, we provide defaults for cmm_mb (using
31 * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
e4d1eb09
PB
32 */
33
5481ddb3
DG
34#ifndef cmm_mb
35#define cmm_mb() __sync_synchronize()
e4d1eb09
PB
36#endif
37
5481ddb3
DG
38#ifndef cmm_rmb
39#define cmm_rmb() cmm_mb()
e4d1eb09
PB
40#endif
41
5481ddb3
DG
42#ifndef cmm_wmb
43#define cmm_wmb() cmm_mb()
e4d1eb09
PB
44#endif
45
5481ddb3
DG
46#define cmm_mc() cmm_barrier()
47#define cmm_rmc() cmm_barrier()
48#define cmm_wmc() cmm_barrier()
e4d1eb09
PB
49#else
50/*
51 * Architectures without cache coherency need something like the following:
52 *
67ecffc0 53 * #define cmm_mc() arch_cache_flush()
5481ddb3
DG
54 * #define cmm_rmc() arch_cache_flush_read()
55 * #define cmm_wmc() arch_cache_flush_write()
e4d1eb09 56 *
d0bbd9c2
MD
57 * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to
58 * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
e4d1eb09 59 *
d0bbd9c2 60 * #define cmm_mb() cmm_mc()
5481ddb3
DG
61 * #define cmm_rmb() cmm_rmc()
62 * #define cmm_wmb() cmm_wmc()
e4d1eb09
PB
63 */
64
5481ddb3
DG
65#ifndef cmm_mb
66#define cmm_mb() cmm_mc()
e4d1eb09
PB
67#endif
68
5481ddb3
DG
69#ifndef cmm_rmb
70#define cmm_rmb() cmm_rmc()
e4d1eb09
PB
71#endif
72
5481ddb3
DG
73#ifndef cmm_wmb
74#define cmm_wmb() cmm_wmc()
e4d1eb09
PB
75#endif
76
5481ddb3
DG
77#ifndef cmm_rmc
78#define cmm_rmc() cmm_mc()
e4d1eb09
PB
79#endif
80
5481ddb3
DG
81#ifndef cmm_wmc
82#define cmm_wmc() cmm_mc()
e4d1eb09
PB
83#endif
84#endif
85
86/* Nop everywhere except on alpha. */
5481ddb3
DG
87#ifndef cmm_read_barrier_depends
88#define cmm_read_barrier_depends()
e4d1eb09
PB
89#endif
90
91#ifdef CONFIG_RCU_SMP
0bd48ad3 92#ifndef cmm_smp_mb
5481ddb3 93#define cmm_smp_mb() cmm_mb()
0bd48ad3
PB
94#endif
95#ifndef cmm_smp_rmb
5481ddb3 96#define cmm_smp_rmb() cmm_rmb()
0bd48ad3
PB
97#endif
98#ifndef cmm_smp_wmb
5481ddb3 99#define cmm_smp_wmb() cmm_wmb()
0bd48ad3
PB
100#endif
101#ifndef cmm_smp_mc
5481ddb3 102#define cmm_smp_mc() cmm_mc()
0bd48ad3
PB
103#endif
104#ifndef cmm_smp_rmc
5481ddb3 105#define cmm_smp_rmc() cmm_rmc()
0bd48ad3
PB
106#endif
107#ifndef cmm_smp_wmc
5481ddb3 108#define cmm_smp_wmc() cmm_wmc()
0bd48ad3
PB
109#endif
110#ifndef cmm_smp_read_barrier_depends
5481ddb3 111#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
0bd48ad3 112#endif
e4d1eb09 113#else
0bd48ad3 114#ifndef cmm_smp_mb
5481ddb3 115#define cmm_smp_mb() cmm_barrier()
0bd48ad3
PB
116#endif
117#ifndef cmm_smp_rmb
5481ddb3 118#define cmm_smp_rmb() cmm_barrier()
0bd48ad3
PB
119#endif
120#ifndef cmm_smp_wmb
5481ddb3 121#define cmm_smp_wmb() cmm_barrier()
0bd48ad3
PB
122#endif
123#ifndef cmm_smp_mc
5481ddb3 124#define cmm_smp_mc() cmm_barrier()
0bd48ad3
PB
125#endif
126#ifndef cmm_smp_rmc
5481ddb3 127#define cmm_smp_rmc() cmm_barrier()
0bd48ad3
PB
128#endif
129#ifndef cmm_smp_wmc
5481ddb3 130#define cmm_smp_wmc() cmm_barrier()
0bd48ad3
PB
131#endif
132#ifndef cmm_smp_read_barrier_depends
5481ddb3 133#define cmm_smp_read_barrier_depends()
e4d1eb09 134#endif
0bd48ad3 135#endif
e4d1eb09 136
06f22bdb
DG
137#ifndef caa_cpu_relax
138#define caa_cpu_relax() cmm_barrier()
e4d1eb09
PB
139#endif
140
f8c43f45
MD
141#ifndef HAS_CAA_GET_CYCLES
142#define HAS_CAA_GET_CYCLES
f8c43f45 143
86e8ab17 144#if defined(__APPLE__)
a0307b90 145
86e8ab17
MJ
146#include <mach/mach.h>
147#include <mach/clock.h>
148#include <mach/mach_time.h>
3fa18286
MD
149#include <time.h>
150#include <stdint.h>
151
152typedef uint64_t caa_cycles_t;
153
154static inline caa_cycles_t caa_get_cycles (void)
f8c43f45 155{
86e8ab17
MJ
156 mach_timespec_t ts = { 0, 0 };
157 static clock_serv_t clock_service;
f8c43f45 158
86e8ab17
MJ
159 if (caa_unlikely(!clock_service)) {
160 if (host_get_clock_service(mach_host_self(),
161 SYSTEM_CLOCK, &clock_service))
162 return -1ULL;
163 }
164 if (caa_unlikely(clock_get_time(clock_service, &ts)))
3fa18286
MD
165 return -1ULL;
166 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
f8c43f45 167}
a0307b90 168
86e8ab17 169#elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
a0307b90 170
a0307b90
MD
171#include <time.h>
172#include <stdint.h>
173
174typedef uint64_t caa_cycles_t;
175
176static inline caa_cycles_t caa_get_cycles (void)
177{
86e8ab17 178 struct timespec ts;
a0307b90 179
86e8ab17 180 if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
a0307b90
MD
181 return -1ULL;
182 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
183}
184
185#else
186
187#error caa_get_cycles() not implemented for this platform.
188
189#endif
190
f8c43f45
MD
191#endif /* HAS_CAA_GET_CYCLES */
192
e4d1eb09
PB
193#ifdef __cplusplus
194}
195#endif
196
197#endif /* _URCU_ARCH_GENERIC_H */
This page took 0.0544 seconds and 4 git commands to generate.