ppc.h: use mftb on ppc
[urcu.git] / include / urcu / arch / generic.h
1 // SPDX-FileCopyrightText: 2010 Paolo Bonzini <pbonzini@redhat.com>
2 //
3 // SPDX-License-Identifier: LGPL-2.1-or-later
4
5 #ifndef _URCU_ARCH_GENERIC_H
6 #define _URCU_ARCH_GENERIC_H
7
8 /*
9 * arch_generic.h: common definitions for multiple architectures.
10 */
11
12 #include <urcu/compiler.h>
13 #include <urcu/config.h>
14 #include <urcu/syscall-compat.h>
15
16 #ifdef __cplusplus
17 extern "C" {
18 #endif
19
20 #ifndef CAA_CACHE_LINE_SIZE
21 #define CAA_CACHE_LINE_SIZE 64
22 #endif
23
24 #if !defined(cmm_mc) && !defined(cmm_rmc) && !defined(cmm_wmc)
25 #define CONFIG_HAVE_MEM_COHERENCY
26 /*
27 * Architectures with cache coherency must _not_ define cmm_mc/cmm_rmc/cmm_wmc.
28 *
29 * For them, cmm_mc/cmm_rmc/cmm_wmc are implemented with a simple
30 * compiler barrier; in addition, we provide defaults for cmm_mb (using
31 * GCC builtins) as well as cmm_rmb and cmm_wmb (defaulting to cmm_mb).
32 */
33
34 #ifdef CONFIG_RCU_USE_ATOMIC_BUILTINS
35
36 # ifdef CMM_SANITIZE_THREAD
37 /*
38 * This makes TSAN quiet about unsupported thread fence.
39 */
40 static inline void _cmm_thread_fence_wrapper(void)
41 {
42 # if defined(__clang__)
43 # pragma clang diagnostic push
44 # pragma clang diagnostic ignored "-Wpragmas"
45 # pragma clang diagnostic ignored "-Wunknown-warning-option"
46 # pragma clang diagnostic ignored "-Wtsan"
47 # elif defined(__GNUC__)
48 # pragma GCC diagnostic push
49 # pragma GCC diagnostic ignored "-Wpragmas"
50 # pragma GCC diagnostic ignored "-Wtsan"
51 # endif
52 __atomic_thread_fence(__ATOMIC_SEQ_CST);
53 # if defined(__clang__)
54 # pragma clang diagnostic pop
55 # elif defined(__GNUC__)
56 # pragma GCC diagnostic pop
57 # endif
58 }
59 # endif /* CMM_SANITIZE_THREAD */
60
61 # ifndef cmm_smp_mb
62 # ifdef CMM_SANITIZE_THREAD
63 # define cmm_smp_mb() _cmm_thread_fence_wrapper()
64 # else
65 # define cmm_smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
66 # endif /* CMM_SANITIZE_THREAD */
67 # endif /* !cmm_smp_mb */
68
69 #endif /* CONFIG_RCU_USE_ATOMIC_BUILTINS */
70
71
72 /*
73 * cmm_mb() expands to __sync_synchronize() instead of __atomic_thread_fence
74 * with SEQ_CST because the former "issues a full memory barrier" while the
75 * latter "acts as a synchronization fence between threads" which is too weak
76 * for what we want, for example with I/O devices.
77 *
78 * Even though sync_synchronize seems to be an alias for a sequential consistent
79 * atomic thread fence on every architecture on GCC and Clang, this assumption
80 * might be untrue in future. Therefore, the definitions above are used to
81 * ensure correct behavior in the future.
82 *
83 * The above defintions are quoted from the GCC manual.
84 */
85 #ifndef cmm_mb
86 #define cmm_mb() __sync_synchronize()
87 #endif
88
89 #ifndef cmm_rmb
90 #define cmm_rmb() cmm_mb()
91 #endif
92
93 #ifndef cmm_wmb
94 #define cmm_wmb() cmm_mb()
95 #endif
96
97 #define cmm_mc() cmm_barrier()
98 #define cmm_rmc() cmm_barrier()
99 #define cmm_wmc() cmm_barrier()
100 #else
101 /*
102 * Architectures without cache coherency need something like the following:
103 *
104 * #define cmm_mc() arch_cache_flush()
105 * #define cmm_rmc() arch_cache_flush_read()
106 * #define cmm_wmc() arch_cache_flush_write()
107 *
108 * Of these, only cmm_mc is mandatory. cmm_rmc and cmm_wmc default to
109 * cmm_mc. cmm_mb/cmm_rmb/cmm_wmb use these definitions by default:
110 *
111 * #define cmm_mb() cmm_mc()
112 * #define cmm_rmb() cmm_rmc()
113 * #define cmm_wmb() cmm_wmc()
114 */
115
116 #ifndef cmm_mb
117 #define cmm_mb() cmm_mc()
118 #endif
119
120 #ifndef cmm_rmb
121 #define cmm_rmb() cmm_rmc()
122 #endif
123
124 #ifndef cmm_wmb
125 #define cmm_wmb() cmm_wmc()
126 #endif
127
128 #ifndef cmm_rmc
129 #define cmm_rmc() cmm_mc()
130 #endif
131
132 #ifndef cmm_wmc
133 #define cmm_wmc() cmm_mc()
134 #endif
135 #endif
136
137 /* Nop everywhere except on alpha. */
138 #ifndef cmm_read_barrier_depends
139 #define cmm_read_barrier_depends()
140 #endif
141
142 #ifdef CONFIG_RCU_SMP
143 #ifndef cmm_smp_mb
144 #define cmm_smp_mb() cmm_mb()
145 #endif
146 #ifndef cmm_smp_rmb
147 #define cmm_smp_rmb() cmm_rmb()
148 #endif
149 #ifndef cmm_smp_wmb
150 #define cmm_smp_wmb() cmm_wmb()
151 #endif
152 #ifndef cmm_smp_mc
153 #define cmm_smp_mc() cmm_mc()
154 #endif
155 #ifndef cmm_smp_rmc
156 #define cmm_smp_rmc() cmm_rmc()
157 #endif
158 #ifndef cmm_smp_wmc
159 #define cmm_smp_wmc() cmm_wmc()
160 #endif
161 #ifndef cmm_smp_read_barrier_depends
162 #define cmm_smp_read_barrier_depends() cmm_read_barrier_depends()
163 #endif
164 #else
165 #ifndef cmm_smp_mb
166 #define cmm_smp_mb() cmm_barrier()
167 #endif
168 #ifndef cmm_smp_rmb
169 #define cmm_smp_rmb() cmm_barrier()
170 #endif
171 #ifndef cmm_smp_wmb
172 #define cmm_smp_wmb() cmm_barrier()
173 #endif
174 #ifndef cmm_smp_mc
175 #define cmm_smp_mc() cmm_barrier()
176 #endif
177 #ifndef cmm_smp_rmc
178 #define cmm_smp_rmc() cmm_barrier()
179 #endif
180 #ifndef cmm_smp_wmc
181 #define cmm_smp_wmc() cmm_barrier()
182 #endif
183 #ifndef cmm_smp_read_barrier_depends
184 #define cmm_smp_read_barrier_depends()
185 #endif
186 #endif
187
188 #ifndef caa_cpu_relax
189 #define caa_cpu_relax() cmm_barrier()
190 #endif
191
192 #ifndef HAS_CAA_GET_CYCLES
193 #define HAS_CAA_GET_CYCLES
194
195 #if defined(__APPLE__)
196
197 #include <mach/mach.h>
198 #include <mach/clock.h>
199 #include <mach/mach_time.h>
200 #include <time.h>
201 #include <stdint.h>
202
203 typedef uint64_t caa_cycles_t;
204
205 static inline caa_cycles_t caa_get_cycles (void)
206 {
207 mach_timespec_t ts = { 0, 0 };
208 static clock_serv_t clock_service;
209
210 if (caa_unlikely(!clock_service)) {
211 if (host_get_clock_service(mach_host_self(),
212 SYSTEM_CLOCK, &clock_service))
213 return -1ULL;
214 }
215 if (caa_unlikely(clock_get_time(clock_service, &ts)))
216 return -1ULL;
217 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
218 }
219
220 #elif defined(CONFIG_RCU_HAVE_CLOCK_GETTIME)
221
222 #include <time.h>
223 #include <stdint.h>
224
225 typedef uint64_t caa_cycles_t;
226
227 static inline caa_cycles_t caa_get_cycles (void)
228 {
229 struct timespec ts;
230
231 if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts)))
232 return -1ULL;
233 return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
234 }
235
236 #else
237
238 #error caa_get_cycles() not implemented for this platform.
239
240 #endif
241
242 #endif /* HAS_CAA_GET_CYCLES */
243
244 #ifdef __cplusplus
245 }
246 #endif
247
248 #endif /* _URCU_ARCH_GENERIC_H */
This page took 0.035353 seconds and 4 git commands to generate.