Add barriers
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b
MD
17 * Distributed under GPLv2
18 */
19
1430ee0b 20#include <stdlib.h>
69a757c9 21#include <pthread.h>
1430ee0b 22
27b012e2
MD
23/* The "volatile" is due to gcc bugs */
24#define barrier() __asm__ __volatile__("": : :"memory")
25
5b1da0c8
MD
26#define likely(x) __builtin_expect(!!(x), 1)
27#define unlikely(x) __builtin_expect(!!(x), 0)
28
27b012e2
MD
29/* x86 32/64 specific */
30#define mb() asm volatile("mfence":::"memory")
31#define rmb() asm volatile("lfence":::"memory")
32#define wmb() asm volatile("sfence" ::: "memory")
33
b715b99e
MD
34/* Assume SMP machine, given we don't have this information */
35#define CONFIG_SMP 1
36
37#ifdef CONFIG_SMP
38#define smp_mb() mb()
39#define smp_rmb() rmb()
40#define smp_wmb() wmb()
41#else
42#define smp_mb() barrier()
43#define smp_rmb() barrier()
44#define smp_wmb() barrier()
45#endif
46
27b012e2
MD
47static inline void atomic_inc(int *v)
48{
49 asm volatile("lock; incl %0"
f69f195a 50 : "+m" (*v));
27b012e2
MD
51}
52
f4a486ac
MD
53#define xchg(ptr, v) \
54 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
55
56struct __xchg_dummy {
57 unsigned long a[100];
58};
59#define __xg(x) ((struct __xchg_dummy *)(x))
60
61/*
62 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
63 * Note 2: xchg has side effect, so that attribute volatile is necessary,
64 * but generally the primitive is invalid, *ptr is output argument. --ANK
65 */
66static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
67 int size)
68{
69 switch (size) {
70 case 1:
71 asm volatile("xchgb %b0,%1"
72 : "=q" (x)
73 : "m" (*__xg(ptr)), "0" (x)
74 : "memory");
75 break;
76 case 2:
77 asm volatile("xchgw %w0,%1"
78 : "=r" (x)
79 : "m" (*__xg(ptr)), "0" (x)
80 : "memory");
81 break;
82 case 4:
5b1da0c8
MD
83 asm volatile("xchgl %k0,%1"
84 : "=r" (x)
85 : "m" (*__xg(ptr)), "0" (x)
86 : "memory");
87 break;
88 case 8:
89 asm volatile("xchgq %0,%1"
f4a486ac
MD
90 : "=r" (x)
91 : "m" (*__xg(ptr)), "0" (x)
92 : "memory");
93 break;
94 }
95 return x;
96}
97
27b012e2
MD
98/* Nop everywhere except on alpha. */
99#define smp_read_barrier_depends()
100
41718ff9
MD
101/*
102 * Prevent the compiler from merging or refetching accesses. The compiler
103 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
104 * but only when the compiler is aware of some particular ordering. One way
105 * to make the compiler aware of ordering is to put the two invocations of
106 * ACCESS_ONCE() in different C statements.
107 *
108 * This macro does absolutely -nothing- to prevent the CPU from reordering,
109 * merging, or refetching absolutely anything at any time. Its main intended
110 * use is to mediate communication between process-level code and irq/NMI
111 * handlers, all running on the same CPU.
112 */
113#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
114
115/**
116 * rcu_dereference - fetch an RCU-protected pointer in an
117 * RCU read-side critical section. This pointer may later
118 * be safely dereferenced.
119 *
120 * Inserts memory barriers on architectures that require them
121 * (currently only the Alpha), and, more importantly, documents
122 * exactly which pointers are protected by RCU.
123 */
124
125#define rcu_dereference(p) ({ \
126 typeof(p) _________p1 = ACCESS_ONCE(p); \
127 smp_read_barrier_depends(); \
128 (_________p1); \
129 })
130
27b012e2
MD
131#define SIGURCU SIGUSR1
132
40e140c9
MD
133/*
134 * If a reader is really non-cooperative and refuses to commit its
135 * urcu_active_readers count to memory (there is no barrier in the reader
136 * per-se), kick it after a few loops waiting for it.
137 */
138#define KICK_READER_LOOPS 10000
139
cf380c2f
MD
140#ifdef DEBUG_YIELD
141#include <sched.h>
9b171f46
MD
142#include <time.h>
143#include <pthread.h>
bb488185 144#include <unistd.h>
cf380c2f
MD
145
146#define YIELD_READ (1 << 0)
147#define YIELD_WRITE (1 << 1)
148
bb488185
MD
149/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
150#ifdef DEBUG_FULL_MB
151/* maximum sleep delay, in us */
152#define MAX_SLEEP 50
153#else
154#define MAX_SLEEP 30000
155#endif
156
9d335088
MD
157extern unsigned int yield_active;
158extern unsigned int __thread rand_yield;
cf380c2f
MD
159
160static inline void debug_yield_read(void)
161{
162 if (yield_active & YIELD_READ)
9d335088 163 if (rand_r(&rand_yield) & 0x1)
bb488185 164 usleep(rand_r(&rand_yield) % MAX_SLEEP);
cf380c2f
MD
165}
166
167static inline void debug_yield_write(void)
168{
169 if (yield_active & YIELD_WRITE)
9d335088 170 if (rand_r(&rand_yield) & 0x1)
bb488185 171 usleep(rand_r(&rand_yield) % MAX_SLEEP);
9d335088
MD
172}
173
174static inline void debug_yield_init(void)
175{
176 rand_yield = time(NULL) ^ pthread_self();
cf380c2f
MD
177}
178#else
179static inline void debug_yield_read(void)
180{
181}
182
183static inline void debug_yield_write(void)
184{
9d335088
MD
185}
186
187static inline void debug_yield_init(void)
188{
189
cf380c2f
MD
190}
191#endif
192
bb488185
MD
193#ifdef DEBUG_FULL_MB
194static inline void read_barrier()
195{
b715b99e 196 smp_mb();
bb488185
MD
197}
198#else
199static inline void read_barrier()
200{
201 barrier();
202}
203#endif
204
1430ee0b 205/*
4917a879
MD
206 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
207 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
1430ee0b 208 */
6e32665b 209#define RCU_GP_COUNT (1UL << 0)
4917a879 210/* Use the amount of bits equal to half of the architecture long size */
6e32665b 211#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
1430ee0b
MD
212#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
213
5b1da0c8
MD
214/*
215 * Global quiescent period counter with low-order bits unused.
216 * Using a int rather than a char to eliminate false register dependencies
217 * causing stalls on some architectures.
218 */
6e8b8429 219extern long urcu_gp_ctr;
27b012e2 220
6e8b8429 221extern long __thread urcu_active_readers;
27b012e2 222
128166c9 223static inline int rcu_old_gp_ongoing(long *value)
27b012e2 224{
6e8b8429 225 long v;
1430ee0b
MD
226
227 if (value == NULL)
228 return 0;
9598a481
MD
229 /*
230 * Make sure both tests below are done on the same version of *value
231 * to insure consistency.
232 */
1430ee0b 233 v = ACCESS_ONCE(*value);
1430ee0b 234 return (v & RCU_GP_CTR_NEST_MASK) &&
9598a481 235 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
27b012e2
MD
236}
237
1430ee0b 238static inline void rcu_read_lock(void)
27b012e2 239{
6e8b8429 240 long tmp;
1430ee0b 241
1430ee0b 242 tmp = urcu_active_readers;
3a9e6e9d 243 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
40e140c9
MD
244 /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
245 * serializes those two memory operations. */
5b1da0c8 246 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
128166c9 247 urcu_active_readers = urcu_gp_ctr;
1430ee0b
MD
248 else
249 urcu_active_readers = tmp + RCU_GP_COUNT;
27b012e2
MD
250 /*
251 * Increment active readers count before accessing the pointer.
252 * See force_mb_all_threads().
253 */
bb488185 254 read_barrier();
27b012e2
MD
255}
256
1430ee0b 257static inline void rcu_read_unlock(void)
27b012e2 258{
bb488185 259 read_barrier();
27b012e2
MD
260 /*
261 * Finish using rcu before decrementing the pointer.
262 * See force_mb_all_threads().
263 */
1430ee0b 264 urcu_active_readers -= RCU_GP_COUNT;
27b012e2
MD
265}
266
e462817e
MD
267/**
268 * rcu_assign_pointer - assign (publicize) a pointer to a newly
269 * initialized structure that will be dereferenced by RCU read-side
270 * critical sections. Returns the value assigned.
271 *
272 * Inserts memory barriers on architectures that require them
273 * (pretty much all of them other than x86), and also prevents
274 * the compiler from reordering the code that initializes the
275 * structure after the pointer assignment. More importantly, this
276 * call documents which pointers will be dereferenced by RCU read-side
277 * code.
278 */
279
280#define rcu_assign_pointer(p, v) \
281 ({ \
282 if (!__builtin_constant_p(v) || \
283 ((v) != NULL)) \
284 wmb(); \
285 (p) = (v); \
286 })
287
f4a486ac
MD
288#define rcu_xchg_pointer(p, v) \
289 ({ \
290 if (!__builtin_constant_p(v) || \
291 ((v) != NULL)) \
292 wmb(); \
293 xchg(p, v); \
294 })
295
e462817e 296extern void synchronize_rcu(void);
27b012e2 297
f4a486ac
MD
298/*
299 * Exchanges the pointer and waits for quiescent state.
300 * The pointer returned can be freed.
301 */
302#define urcu_publish_content(p, v) \
303 ({ \
304 void *oldptr; \
f4a486ac
MD
305 oldptr = rcu_xchg_pointer(p, v); \
306 synchronize_rcu(); \
307 oldptr; \
308 })
309
27b012e2
MD
310/*
311 * Reader thread registration.
312 */
313extern void urcu_register_thread(void);
5e7e64b9 314extern void urcu_unregister_thread(void);
27b012e2
MD
315
316#endif /* _URCU_H */
This page took 0.035096 seconds and 4 git commands to generate.