Add ACCESS_ONCE to _STORE_SHARED
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b 17 * Distributed under GPLv2
54843abc
PM
18 *
19 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
b257a10b
MD
20 */
21
1430ee0b 22#include <stdlib.h>
69a757c9 23#include <pthread.h>
1430ee0b 24
27b012e2
MD
25/* The "volatile" is due to gcc bugs */
26#define barrier() __asm__ __volatile__("": : :"memory")
27
5b1da0c8
MD
28#define likely(x) __builtin_expect(!!(x), 1)
29#define unlikely(x) __builtin_expect(!!(x), 0)
30
3a86deba
MD
31/* Assume SMP machine, given we don't have this information */
32#define CONFIG_SMP 1
33
34
b715b99e
MD
35#ifdef CONFIG_SMP
36#define smp_mb() mb()
37#define smp_rmb() rmb()
38#define smp_wmb() wmb()
3a86deba
MD
39#define smp_mc() mc()
40#define smp_rmc() rmc()
41#define smp_wmc() wmc()
b715b99e
MD
42#else
43#define smp_mb() barrier()
44#define smp_rmb() barrier()
45#define smp_wmb() barrier()
3a86deba
MD
46#define smp_mc() barrier()
47#define smp_rmc() barrier()
48#define smp_wmc() barrier()
b715b99e
MD
49#endif
50
2d6debff 51#include "arch.h"
f4a486ac 52
27b012e2
MD
53/* Nop everywhere except on alpha. */
54#define smp_read_barrier_depends()
55
41718ff9
MD
56/*
57 * Prevent the compiler from merging or refetching accesses. The compiler
58 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
59 * but only when the compiler is aware of some particular ordering. One way
60 * to make the compiler aware of ordering is to put the two invocations of
61 * ACCESS_ONCE() in different C statements.
62 *
63 * This macro does absolutely -nothing- to prevent the CPU from reordering,
64 * merging, or refetching absolutely anything at any time. Its main intended
65 * use is to mediate communication between process-level code and irq/NMI
66 * handlers, all running on the same CPU.
67 */
68#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
69
b0d5e790
MD
70/*
71 * Identify a shared load. A smp_rmc() or smp_mc() should come before the load.
72 */
73#define _LOAD_SHARED(p) ACCESS_ONCE(p)
74
3a86deba 75/*
8895e525 76 * Load a data from shared memory, doing a cache flush if required.
3a86deba 77 */
b0d5e790
MD
78#define LOAD_SHARED(p) \
79 ({ \
80 smp_rmc(); \
81 _LOAD_SHARED(p); \
82 })
83
84
85/*
86 * Identify a shared store. A smp_wmc() or smp_mc() should follow the store.
87 */
88#define _STORE_SHARED(x, v) \
89 do { \
ad7de003 90 ACCESS_ONCE(x) = (v); \
b0d5e790 91 } while (0)
3a86deba
MD
92
93/*
8895e525 94 * Store v into x, where x is located in shared memory. Performs the required
3a86deba
MD
95 * cache flush after writing.
96 */
8895e525 97#define STORE_SHARED(x, v) \
3a86deba 98 do { \
b0d5e790
MD
99 _STORE_SHARED(x, v); \
100 smp_wmc(); \
3a86deba
MD
101 } while (0)
102
41718ff9
MD
103/**
104 * rcu_dereference - fetch an RCU-protected pointer in an
105 * RCU read-side critical section. This pointer may later
106 * be safely dereferenced.
107 *
108 * Inserts memory barriers on architectures that require them
109 * (currently only the Alpha), and, more importantly, documents
110 * exactly which pointers are protected by RCU.
111 */
112
113#define rcu_dereference(p) ({ \
8895e525 114 typeof(p) _________p1 = LOAD_SHARED(p); \
41718ff9
MD
115 smp_read_barrier_depends(); \
116 (_________p1); \
117 })
118
27b012e2
MD
119#define SIGURCU SIGUSR1
120
40e140c9
MD
121/*
122 * If a reader is really non-cooperative and refuses to commit its
123 * urcu_active_readers count to memory (there is no barrier in the reader
124 * per-se), kick it after a few loops waiting for it.
125 */
126#define KICK_READER_LOOPS 10000
127
cf380c2f
MD
128#ifdef DEBUG_YIELD
129#include <sched.h>
9b171f46
MD
130#include <time.h>
131#include <pthread.h>
bb488185 132#include <unistd.h>
cf380c2f
MD
133
134#define YIELD_READ (1 << 0)
135#define YIELD_WRITE (1 << 1)
136
bb488185
MD
137/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
138#ifdef DEBUG_FULL_MB
139/* maximum sleep delay, in us */
140#define MAX_SLEEP 50
141#else
142#define MAX_SLEEP 30000
143#endif
144
9d335088
MD
145extern unsigned int yield_active;
146extern unsigned int __thread rand_yield;
cf380c2f
MD
147
148static inline void debug_yield_read(void)
149{
150 if (yield_active & YIELD_READ)
9d335088 151 if (rand_r(&rand_yield) & 0x1)
bb488185 152 usleep(rand_r(&rand_yield) % MAX_SLEEP);
cf380c2f
MD
153}
154
155static inline void debug_yield_write(void)
156{
157 if (yield_active & YIELD_WRITE)
9d335088 158 if (rand_r(&rand_yield) & 0x1)
bb488185 159 usleep(rand_r(&rand_yield) % MAX_SLEEP);
9d335088
MD
160}
161
162static inline void debug_yield_init(void)
163{
164 rand_yield = time(NULL) ^ pthread_self();
cf380c2f
MD
165}
166#else
167static inline void debug_yield_read(void)
168{
169}
170
171static inline void debug_yield_write(void)
172{
9d335088
MD
173}
174
175static inline void debug_yield_init(void)
176{
177
cf380c2f
MD
178}
179#endif
180
bb488185 181#ifdef DEBUG_FULL_MB
3a86deba 182static inline void reader_barrier()
bb488185 183{
b715b99e 184 smp_mb();
bb488185
MD
185}
186#else
3a86deba 187static inline void reader_barrier()
bb488185
MD
188{
189 barrier();
190}
191#endif
192
1430ee0b 193/*
4917a879
MD
194 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
195 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
1430ee0b 196 */
6e32665b 197#define RCU_GP_COUNT (1UL << 0)
4917a879 198/* Use the amount of bits equal to half of the architecture long size */
6e32665b 199#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
1430ee0b
MD
200#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
201
5b1da0c8
MD
202/*
203 * Global quiescent period counter with low-order bits unused.
204 * Using a int rather than a char to eliminate false register dependencies
205 * causing stalls on some architectures.
206 */
6e8b8429 207extern long urcu_gp_ctr;
27b012e2 208
6e8b8429 209extern long __thread urcu_active_readers;
27b012e2 210
128166c9 211static inline int rcu_old_gp_ongoing(long *value)
27b012e2 212{
6e8b8429 213 long v;
1430ee0b
MD
214
215 if (value == NULL)
216 return 0;
9598a481
MD
217 /*
218 * Make sure both tests below are done on the same version of *value
219 * to insure consistency.
220 */
8895e525 221 v = LOAD_SHARED(*value);
1430ee0b 222 return (v & RCU_GP_CTR_NEST_MASK) &&
9598a481 223 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
27b012e2
MD
224}
225
1430ee0b 226static inline void rcu_read_lock(void)
27b012e2 227{
6e8b8429 228 long tmp;
1430ee0b 229
1430ee0b 230 tmp = urcu_active_readers;
3a9e6e9d 231 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
3a86deba
MD
232 /*
233 * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
b0d5e790
MD
234 * serializes those two memory operations. The memory barrier in the
235 * signal handler ensures we receive the proper memory commit barriers
236 * required by _STORE_SHARED and _LOAD_SHARED whenever communication
237 * with the writer is needed.
3a86deba 238 */
5b1da0c8 239 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
b0d5e790 240 _STORE_SHARED(urcu_active_readers, _LOAD_SHARED(urcu_gp_ctr));
1430ee0b 241 else
b0d5e790 242 _STORE_SHARED(urcu_active_readers, tmp + RCU_GP_COUNT);
27b012e2
MD
243 /*
244 * Increment active readers count before accessing the pointer.
245 * See force_mb_all_threads().
246 */
3a86deba 247 reader_barrier();
27b012e2
MD
248}
249
1430ee0b 250static inline void rcu_read_unlock(void)
27b012e2 251{
3a86deba 252 reader_barrier();
27b012e2
MD
253 /*
254 * Finish using rcu before decrementing the pointer.
255 * See force_mb_all_threads().
256 */
b0d5e790 257 _STORE_SHARED(urcu_active_readers, urcu_active_readers - RCU_GP_COUNT);
27b012e2
MD
258}
259
e462817e
MD
260/**
261 * rcu_assign_pointer - assign (publicize) a pointer to a newly
262 * initialized structure that will be dereferenced by RCU read-side
263 * critical sections. Returns the value assigned.
264 *
265 * Inserts memory barriers on architectures that require them
266 * (pretty much all of them other than x86), and also prevents
267 * the compiler from reordering the code that initializes the
268 * structure after the pointer assignment. More importantly, this
269 * call documents which pointers will be dereferenced by RCU read-side
270 * code.
271 */
272
273#define rcu_assign_pointer(p, v) \
274 ({ \
275 if (!__builtin_constant_p(v) || \
276 ((v) != NULL)) \
277 wmb(); \
b0d5e790 278 STORE_SHARED(p, v); \
e462817e
MD
279 })
280
f4a486ac
MD
281#define rcu_xchg_pointer(p, v) \
282 ({ \
283 if (!__builtin_constant_p(v) || \
284 ((v) != NULL)) \
285 wmb(); \
286 xchg(p, v); \
287 })
288
e462817e 289extern void synchronize_rcu(void);
27b012e2 290
f4a486ac
MD
291/*
292 * Exchanges the pointer and waits for quiescent state.
293 * The pointer returned can be freed.
294 */
295#define urcu_publish_content(p, v) \
296 ({ \
297 void *oldptr; \
f4a486ac
MD
298 oldptr = rcu_xchg_pointer(p, v); \
299 synchronize_rcu(); \
300 oldptr; \
301 })
302
27b012e2
MD
303/*
304 * Reader thread registration.
305 */
306extern void urcu_register_thread(void);
5e7e64b9 307extern void urcu_unregister_thread(void);
27b012e2
MD
308
309#endif /* _URCU_H */
This page took 0.035975 seconds and 4 git commands to generate.