urcu/annotate: Add CMM annotation
[urcu.git] / include / urcu / arch / generic.h
CommitLineData
d3d3857f
MJ
1// SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4
e4d1eb09
PB
5#ifndef _URCU_ARCH_GENERIC_H
6#define _URCU_ARCH_GENERIC_H
7
8/*
9 * arch_generic.h: common definitions for multiple architectures.
e4d1eb09
PB
10 */
11
12#include <urcu/compiler.h>
13#include <urcu/config.h>
999991c6 14#include <urcu/syscall-compat.h>
e4d1eb09
PB
15
16#ifdef __cplusplus
17extern "C" {
18#endif
19
06f22bdb
DG
20#ifndef CAA_CACHE_LINE_SIZE
21#define CAA_CACHE_LINE_SIZE 64
e4d1eb09
PB
22#endif
23
5481ddb3 24#if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
e4d1eb09
PB
25#define CONFIG_HAVE_MEM_COHERENCY
26/*
5481ddb3 27 * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
e4d1eb09 28 *
d0bbd9c2
MD
29 * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple
30 * compiler barrier; in addition, we provide defaults for cmm_mb (using
31 * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
e4d1eb09
PB
32 */
33
72d24c88
OD
34#ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
35
601922a8
OD
36# ifdef CMM_SANITIZE_THREAD
37/*
38 * This makes TSAN quiet about unsupported thread fence.
39 */
40static inline void _cmm_thread_fence_wrapper(void)
41{
42# if defined(__clang__)
43# pragma clang diagnostic push
44# pragma clang diagnostic ignored "-Wpragmas"
45# pragma clang diagnostic ignored "-Wunknown-warning-option"
46# pragma clang diagnostic ignored "-Wtsan"
47# elif defined(__GNUC__)
48# pragma GCC diagnostic push
49# pragma GCC diagnostic ignored "-Wpragmas"
50# pragma GCC diagnostic ignored "-Wtsan"
51# endif
52 __atomic_thread_fence(__ATOMIC_SEQ_CST);
53# if defined(__clang__)
54# pragma clang diagnostic pop
55# elif defined(__GNUC__)
56# pragma GCC diagnostic pop
57# endif
58}
59# endif /* CMM_SANITIZE_THREAD */
60
72d24c88 61# ifndef cmm_smp_mb
601922a8
OD
62# ifdef CMM_SANITIZE_THREAD
63# define cmm_smp_mb() _cmm_thread_fence_wrapper()
64# else
65# define cmm_smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
66# endif /* CMM_SANITIZE_THREAD */
67# endif /* !cmm_smp_mb */
72d24c88
OD
68
69#endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
70
71
72/*
73 * cmm_mb() expands to __sync_synchronize() instead of __atomic_thread_fence
74 * with SEQ_CST because the former "issues a full memory barrier" while the
75 * latter "acts as a synchronization fence between threads" which is too weak
76 * for what we want, for example with I/O devices.
77 *
78 * Even though sync_synchronize seems to be an alias for a sequential consistent
79 * atomic thread fence on every architecture on GCC and Clang, this assumption
80 * might be untrue in future. Therefore, the definitions above are used to
81 * ensure correct behavior in the future.
82 *
83 * The above defintions are quoted from the GCC manual.
84 */
5481ddb3
DG
85#ifndef cmm_mb
86#define cmm_mb() __sync_synchronize()
e4d1eb09
PB
87#endif
88
5481ddb3
DG
89#ifndef cmm_rmb
90#define cmm_rmb() cmm_mb()
e4d1eb09
PB
91#endif
92
5481ddb3
DG
93#ifndef cmm_wmb
94#define cmm_wmb() cmm_mb()
e4d1eb09
PB
95#endif
96
5481ddb3
DG
97#define cmm_mc() cmm_barrier()
98#define cmm_rmc() cmm_barrier()
99#define cmm_wmc() cmm_barrier()
e4d1eb09
PB
100#else
101/*
102 * Architectures without cache coherency need something like the following:
103 *
67ecffc0 104 * #define cmm_mc() arch_cache_flush()
5481ddb3
DG
105 * #define cmm_rmc() arch_cache_flush_read()
106 * #define cmm_wmc() arch_cache_flush_write()
e4d1eb09 107 *
d0bbd9c2
MD
108 * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to
109 * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
e4d1eb09 110 *
d0bbd9c2 111 * #define cmm_mb() cmm_mc()
5481ddb3
DG
112 * #define cmm_rmb() cmm_rmc()
113 * #define cmm_wmb() cmm_wmc()
e4d1eb09
PB
114 */
115
5481ddb3
DG
116#ifndef cmm_mb
117#define cmm_mb() cmm_mc()
e4d1eb09
PB
118#endif
119
5481ddb3
DG
120#ifndef cmm_rmb
121#define cmm_rmb() cmm_rmc()
e4d1eb09
PB
122#endif
123
5481ddb3
DG
124#ifndef cmm_wmb
125#define cmm_wmb() cmm_wmc()
e4d1eb09
PB
126#endif
127
5481ddb3
DG
128#ifndef cmm_rmc
129#define cmm_rmc() cmm_mc()
e4d1eb09
PB
130#endif
131
5481ddb3
DG
132#ifndef cmm_wmc
133#define cmm_wmc() cmm_mc()
e4d1eb09
PB
134#endif
135#endif
136
137/* Nop everywhere except on alpha. */
5481ddb3
DG
138#ifndef cmm_read_barrier_depends
139#define cmm_read_barrier_depends()
e4d1eb09
PB
140#endif
141
142#ifdef CONFIG_RCU_SMP
0bd48ad3 143#ifndef cmm_smp_mb
5481ddb3 144#define cmm_smp_mb() cmm_mb()
0bd48ad3
PB
145#endif
146#ifndef cmm_smp_rmb
5481ddb3 147#define cmm_smp_rmb() cmm_rmb()
0bd48ad3
PB
148#endif
149#ifndef cmm_smp_wmb
5481ddb3 150#define cmm_smp_wmb() cmm_wmb()
0bd48ad3
PB
151#endif
152#ifndef cmm_smp_mc
5481ddb3 153#define cmm_smp_mc() cmm_mc()
0bd48ad3
PB
154#endif
155#ifndef cmm_smp_rmc
5481ddb3 156#define cmm_smp_rmc() cmm_rmc()
0bd48ad3
PB
157#endif
158#ifndef cmm_smp_wmc
5481ddb3 159#define cmm_smp_wmc() cmm_wmc()
0bd48ad3
PB
160#endif
161#ifndef cmm_smp_read_barrier_depends
5481ddb3 162#define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
0bd48ad3 163#endif
e4d1eb09 164#else
0bd48ad3 165#ifndef cmm_smp_mb
5481ddb3 166#define cmm_smp_mb() cmm_barrier()
0bd48ad3
PB
167#endif
168#ifndef cmm_smp_rmb
5481ddb3 169#define cmm_smp_rmb() cmm_barrier()
0bd48ad3
PB
170#endif
171#ifndef cmm_smp_wmb
5481ddb3 172#define cmm_smp_wmb() cmm_barrier()
0bd48ad3
PB
173#endif
174#ifndef cmm_smp_mc
5481ddb3 175#define cmm_smp_mc() cmm_barrier()
0bd48ad3
PB
176#endif
177#ifndef cmm_smp_rmc
5481ddb3 178#define cmm_smp_rmc() cmm_barrier()
0bd48ad3
PB
179#endif
180#ifndef cmm_smp_wmc
5481ddb3 181#define cmm_smp_wmc() cmm_barrier()
0bd48ad3
PB
182#endif
183#ifndef cmm_smp_read_barrier_depends
5481ddb3 184#define cmm_smp_read_barrier_depends()
e4d1eb09 185#endif
0bd48ad3 186#endif
e4d1eb09 187
06f22bdb
DG
188#ifndef caa_cpu_relax
189#define caa_cpu_relax() cmm_barrier()
e4d1eb09
PB
190#endif
191
f8c43f45
MD
192#ifndef HAS_CAA_GET_CYCLES
193#define HAS_CAA_GET_CYCLES
f8c43f45 194
86e8ab17 195#if defined(__APPLE__)
a0307b90 196
86e8ab17
MJ
197#include <mach/mach.h>
198#include <mach/clock.h>
199#include <mach/mach_time.h>
3fa18286
MD
200#include <time.h>
201#include <stdint.h>
202
203typedef uint64_t caa_cycles_t;
204
205static inline caa_cycles_t caa_get_cycles (void)
f8c43f45 206{
86e8ab17
MJ
207 mach_timespec_t ts = { 0, 0 };
208 static clock_serv_t clock_service;
f8c43f45 209
86e8ab17
MJ
210 if (caa_unlikely(!clock_service)) {
211 if (host_get_clock_service(mach_host_self(),
212 SYSTEM_CLOCK, &clock_service))
213 return -1ULL;
214 }
215 if (caa_unlikely(clock_get_time(clock_service, &ts)))
3fa18286
MD
216 return -1ULL;
217 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
f8c43f45 218}
a0307b90 219
86e8ab17 220#elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
a0307b90 221
a0307b90
MD
222#include <time.h>
223#include <stdint.h>
224
225typedef uint64_t caa_cycles_t;
226
227static inline caa_cycles_t caa_get_cycles (void)
228{
86e8ab17 229 struct timespec ts;
a0307b90 230
86e8ab17 231 if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
a0307b90
MD
232 return -1ULL;
233 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
234}
235
236#else
237
238#error caa_get_cycles() not implemented for this platform.
239
240#endif
241
f8c43f45
MD
242#endif /* HAS_CAA_GET_CYCLES */
243
e4d1eb09
PB
244#ifdef __cplusplus
245}
246#endif
247
248#endif /* _URCU_ARCH_GENERIC_H */
This page took 0.05797 seconds and 4 git commands to generate.