Turn *_REMOTE into *_SHARED
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b
MD
17 * Distributed under GPLv2
18 */
19
1430ee0b 20#include <stdlib.h>
69a757c9 21#include <pthread.h>
1430ee0b 22
27b012e2
MD
23/* The "volatile" is due to gcc bugs */
24#define barrier() __asm__ __volatile__("": : :"memory")
25
5b1da0c8
MD
26#define likely(x) __builtin_expect(!!(x), 1)
27#define unlikely(x) __builtin_expect(!!(x), 0)
28
3a86deba
MD
29/*
30 * Assume the architecture has coherent caches. Blackfin will want this unset.
31 */
32#define CONFIG_HAVE_MEM_COHERENCY 1
33
82faadb5 34/* Assume P4 or newer */
3a86deba
MD
35#define CONFIG_HAVE_FENCE 1
36
37/* Assume SMP machine, given we don't have this information */
38#define CONFIG_SMP 1
39
40
41#ifdef CONFIG_HAVE_MEM_COHERENCY
42/*
43 * Caches are coherent, no need to flush them.
44 */
45#define mc() barrier()
46#define rmc() barrier()
47#define wmc() barrier()
48#else
49#error "The architecture must create its own cache flush primitives"
50#define mc() arch_cache_flush()
51#define rmc() arch_cache_flush_read()
52#define wmc() arch_cache_flush_write()
53#endif
54
55
56#ifdef CONFIG_HAVE_MEM_COHERENCY
82faadb5 57
27b012e2 58/* x86 32/64 specific */
3a86deba 59#ifdef CONFIG_HAVE_FENCE
27b012e2
MD
60#define mb() asm volatile("mfence":::"memory")
61#define rmb() asm volatile("lfence":::"memory")
82faadb5
MD
62#define wmb() asm volatile("sfence"::: "memory")
63#else
64/*
65 * Some non-Intel clones support out of order store. wmb() ceases to be a
66 * nop for these.
67 */
68#define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
69#define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
70#define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
71#endif
27b012e2 72
3a86deba
MD
73#else /* !CONFIG_HAVE_MEM_COHERENCY */
74
75/*
76 * Without cache coherency, the memory barriers become cache flushes.
77 */
78#define mb() mc()
79#define rmb() rmc()
80#define wmb() wmc()
81
82#endif /* !CONFIG_HAVE_MEM_COHERENCY */
83
b715b99e
MD
84
85#ifdef CONFIG_SMP
86#define smp_mb() mb()
87#define smp_rmb() rmb()
88#define smp_wmb() wmb()
3a86deba
MD
89#define smp_mc() mc()
90#define smp_rmc() rmc()
91#define smp_wmc() wmc()
b715b99e
MD
92#else
93#define smp_mb() barrier()
94#define smp_rmb() barrier()
95#define smp_wmb() barrier()
3a86deba
MD
96#define smp_mc() barrier()
97#define smp_rmc() barrier()
98#define smp_wmc() barrier()
b715b99e
MD
99#endif
100
3a86deba
MD
101/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
102static inline void rep_nop(void)
103{
104 asm volatile("rep; nop" ::: "memory");
105}
106
107static inline void cpu_relax(void)
108{
109 rep_nop();
110}
111
27b012e2
MD
112static inline void atomic_inc(int *v)
113{
114 asm volatile("lock; incl %0"
f69f195a 115 : "+m" (*v));
27b012e2
MD
116}
117
f4a486ac
MD
118#define xchg(ptr, v) \
119 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
120
121struct __xchg_dummy {
122 unsigned long a[100];
123};
124#define __xg(x) ((struct __xchg_dummy *)(x))
125
126/*
127 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
128 * Note 2: xchg has side effect, so that attribute volatile is necessary,
129 * but generally the primitive is invalid, *ptr is output argument. --ANK
3a86deba 130 * x is considered local, ptr is considered remote.
f4a486ac
MD
131 */
132static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
133 int size)
134{
135 switch (size) {
136 case 1:
137 asm volatile("xchgb %b0,%1"
138 : "=q" (x)
139 : "m" (*__xg(ptr)), "0" (x)
140 : "memory");
141 break;
142 case 2:
143 asm volatile("xchgw %w0,%1"
144 : "=r" (x)
145 : "m" (*__xg(ptr)), "0" (x)
146 : "memory");
147 break;
148 case 4:
5b1da0c8
MD
149 asm volatile("xchgl %k0,%1"
150 : "=r" (x)
151 : "m" (*__xg(ptr)), "0" (x)
152 : "memory");
153 break;
154 case 8:
155 asm volatile("xchgq %0,%1"
f4a486ac
MD
156 : "=r" (x)
157 : "m" (*__xg(ptr)), "0" (x)
158 : "memory");
159 break;
160 }
3a86deba 161 smp_wmc();
f4a486ac
MD
162 return x;
163}
164
27b012e2
MD
165/* Nop everywhere except on alpha. */
166#define smp_read_barrier_depends()
167
41718ff9
MD
168/*
169 * Prevent the compiler from merging or refetching accesses. The compiler
170 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
171 * but only when the compiler is aware of some particular ordering. One way
172 * to make the compiler aware of ordering is to put the two invocations of
173 * ACCESS_ONCE() in different C statements.
174 *
175 * This macro does absolutely -nothing- to prevent the CPU from reordering,
176 * merging, or refetching absolutely anything at any time. Its main intended
177 * use is to mediate communication between process-level code and irq/NMI
178 * handlers, all running on the same CPU.
179 */
180#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
181
3a86deba 182/*
8895e525 183 * Load a data from shared memory, doing a cache flush if required.
3a86deba 184 */
8895e525 185#define LOAD_SHARED(p) ({ \
3a86deba
MD
186 smp_rmc(); \
187 typeof(p) _________p1 = ACCESS_ONCE(p); \
188 (_________p1); \
189 })
190
191/*
8895e525 192 * Store v into x, where x is located in shared memory. Performs the required
3a86deba
MD
193 * cache flush after writing.
194 */
8895e525 195#define STORE_SHARED(x, v) \
3a86deba
MD
196 do { \
197 (x) = (v); \
198 smp_wmc; \
199 } while (0)
200
41718ff9
MD
201/**
202 * rcu_dereference - fetch an RCU-protected pointer in an
203 * RCU read-side critical section. This pointer may later
204 * be safely dereferenced.
205 *
206 * Inserts memory barriers on architectures that require them
207 * (currently only the Alpha), and, more importantly, documents
208 * exactly which pointers are protected by RCU.
209 */
210
211#define rcu_dereference(p) ({ \
8895e525 212 typeof(p) _________p1 = LOAD_SHARED(p); \
41718ff9
MD
213 smp_read_barrier_depends(); \
214 (_________p1); \
215 })
216
3a86deba
MD
217
218
27b012e2
MD
219#define SIGURCU SIGUSR1
220
40e140c9
MD
221/*
222 * If a reader is really non-cooperative and refuses to commit its
223 * urcu_active_readers count to memory (there is no barrier in the reader
224 * per-se), kick it after a few loops waiting for it.
225 */
226#define KICK_READER_LOOPS 10000
227
cf380c2f
MD
228#ifdef DEBUG_YIELD
229#include <sched.h>
9b171f46
MD
230#include <time.h>
231#include <pthread.h>
bb488185 232#include <unistd.h>
cf380c2f
MD
233
234#define YIELD_READ (1 << 0)
235#define YIELD_WRITE (1 << 1)
236
bb488185
MD
237/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
238#ifdef DEBUG_FULL_MB
239/* maximum sleep delay, in us */
240#define MAX_SLEEP 50
241#else
242#define MAX_SLEEP 30000
243#endif
244
9d335088
MD
245extern unsigned int yield_active;
246extern unsigned int __thread rand_yield;
cf380c2f
MD
247
248static inline void debug_yield_read(void)
249{
250 if (yield_active & YIELD_READ)
9d335088 251 if (rand_r(&rand_yield) & 0x1)
bb488185 252 usleep(rand_r(&rand_yield) % MAX_SLEEP);
cf380c2f
MD
253}
254
255static inline void debug_yield_write(void)
256{
257 if (yield_active & YIELD_WRITE)
9d335088 258 if (rand_r(&rand_yield) & 0x1)
bb488185 259 usleep(rand_r(&rand_yield) % MAX_SLEEP);
9d335088
MD
260}
261
262static inline void debug_yield_init(void)
263{
264 rand_yield = time(NULL) ^ pthread_self();
cf380c2f
MD
265}
266#else
267static inline void debug_yield_read(void)
268{
269}
270
271static inline void debug_yield_write(void)
272{
9d335088
MD
273}
274
275static inline void debug_yield_init(void)
276{
277
cf380c2f
MD
278}
279#endif
280
bb488185 281#ifdef DEBUG_FULL_MB
3a86deba 282static inline void reader_barrier()
bb488185 283{
b715b99e 284 smp_mb();
bb488185
MD
285}
286#else
3a86deba 287static inline void reader_barrier()
bb488185
MD
288{
289 barrier();
290}
291#endif
292
1430ee0b 293/*
4917a879
MD
294 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
295 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
1430ee0b 296 */
6e32665b 297#define RCU_GP_COUNT (1UL << 0)
4917a879 298/* Use the amount of bits equal to half of the architecture long size */
6e32665b 299#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
1430ee0b
MD
300#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
301
5b1da0c8
MD
302/*
303 * Global quiescent period counter with low-order bits unused.
304 * Using a int rather than a char to eliminate false register dependencies
305 * causing stalls on some architectures.
306 */
6e8b8429 307extern long urcu_gp_ctr;
27b012e2 308
6e8b8429 309extern long __thread urcu_active_readers;
27b012e2 310
128166c9 311static inline int rcu_old_gp_ongoing(long *value)
27b012e2 312{
6e8b8429 313 long v;
1430ee0b
MD
314
315 if (value == NULL)
316 return 0;
9598a481
MD
317 /*
318 * Make sure both tests below are done on the same version of *value
319 * to insure consistency.
320 */
8895e525 321 v = LOAD_SHARED(*value);
1430ee0b 322 return (v & RCU_GP_CTR_NEST_MASK) &&
9598a481 323 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
27b012e2
MD
324}
325
1430ee0b 326static inline void rcu_read_lock(void)
27b012e2 327{
6e8b8429 328 long tmp;
1430ee0b 329
1430ee0b 330 tmp = urcu_active_readers;
3a9e6e9d 331 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
3a86deba
MD
332 /*
333 * The data dependency "read urcu_gp_ctr, write urcu_active_readers",
8895e525
MD
334 * serializes those two memory operations. We are not using STORE_SHARED
335 * and LOAD_SHARED here (although we should) because the writer will
3a86deba
MD
336 * wake us up with a signal which does a flush in its handler to perform
337 * urcu_gp_ctr re-read and urcu_active_readers commit to main memory.
338 */
5b1da0c8 339 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
b0b31506 340 urcu_active_readers = ACCESS_ONCE(urcu_gp_ctr);
1430ee0b
MD
341 else
342 urcu_active_readers = tmp + RCU_GP_COUNT;
27b012e2
MD
343 /*
344 * Increment active readers count before accessing the pointer.
345 * See force_mb_all_threads().
346 */
3a86deba 347 reader_barrier();
27b012e2
MD
348}
349
1430ee0b 350static inline void rcu_read_unlock(void)
27b012e2 351{
3a86deba 352 reader_barrier();
27b012e2
MD
353 /*
354 * Finish using rcu before decrementing the pointer.
355 * See force_mb_all_threads().
356 */
1430ee0b 357 urcu_active_readers -= RCU_GP_COUNT;
27b012e2
MD
358}
359
e462817e
MD
360/**
361 * rcu_assign_pointer - assign (publicize) a pointer to a newly
362 * initialized structure that will be dereferenced by RCU read-side
363 * critical sections. Returns the value assigned.
364 *
365 * Inserts memory barriers on architectures that require them
366 * (pretty much all of them other than x86), and also prevents
367 * the compiler from reordering the code that initializes the
368 * structure after the pointer assignment. More importantly, this
369 * call documents which pointers will be dereferenced by RCU read-side
370 * code.
371 */
372
373#define rcu_assign_pointer(p, v) \
374 ({ \
375 if (!__builtin_constant_p(v) || \
376 ((v) != NULL)) \
377 wmb(); \
378 (p) = (v); \
de4f917e 379 smp_wmc(); \
e462817e
MD
380 })
381
f4a486ac
MD
382#define rcu_xchg_pointer(p, v) \
383 ({ \
384 if (!__builtin_constant_p(v) || \
385 ((v) != NULL)) \
386 wmb(); \
387 xchg(p, v); \
388 })
389
e462817e 390extern void synchronize_rcu(void);
27b012e2 391
f4a486ac
MD
392/*
393 * Exchanges the pointer and waits for quiescent state.
394 * The pointer returned can be freed.
395 */
396#define urcu_publish_content(p, v) \
397 ({ \
398 void *oldptr; \
f4a486ac
MD
399 oldptr = rcu_xchg_pointer(p, v); \
400 synchronize_rcu(); \
401 oldptr; \
402 })
403
27b012e2
MD
404/*
405 * Reader thread registration.
406 */
407extern void urcu_register_thread(void);
5e7e64b9 408extern void urcu_unregister_thread(void);
27b012e2
MD
409
410#endif /* _URCU_H */
This page took 0.061934 seconds and 4 git commands to generate.