Add Promela model
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b
MD
17 * Distributed under GPLv2
18 */
19
1430ee0b 20#include <stdlib.h>
69a757c9 21#include <pthread.h>
1430ee0b 22
27b012e2
MD
23/* The "volatile" is due to gcc bugs */
24#define barrier() __asm__ __volatile__("": : :"memory")
25
5b1da0c8
MD
26#define likely(x) __builtin_expect(!!(x), 1)
27#define unlikely(x) __builtin_expect(!!(x), 0)
28
27b012e2
MD
29/* x86 32/64 specific */
30#define mb() asm volatile("mfence":::"memory")
31#define rmb() asm volatile("lfence":::"memory")
32#define wmb() asm volatile("sfence" ::: "memory")
33
27b012e2
MD
34static inline void atomic_inc(int *v)
35{
36 asm volatile("lock; incl %0"
f69f195a 37 : "+m" (*v));
27b012e2
MD
38}
39
f4a486ac
MD
40#define xchg(ptr, v) \
41 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
42
43struct __xchg_dummy {
44 unsigned long a[100];
45};
46#define __xg(x) ((struct __xchg_dummy *)(x))
47
48/*
49 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
50 * Note 2: xchg has side effect, so that attribute volatile is necessary,
51 * but generally the primitive is invalid, *ptr is output argument. --ANK
52 */
53static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
54 int size)
55{
56 switch (size) {
57 case 1:
58 asm volatile("xchgb %b0,%1"
59 : "=q" (x)
60 : "m" (*__xg(ptr)), "0" (x)
61 : "memory");
62 break;
63 case 2:
64 asm volatile("xchgw %w0,%1"
65 : "=r" (x)
66 : "m" (*__xg(ptr)), "0" (x)
67 : "memory");
68 break;
69 case 4:
5b1da0c8
MD
70 asm volatile("xchgl %k0,%1"
71 : "=r" (x)
72 : "m" (*__xg(ptr)), "0" (x)
73 : "memory");
74 break;
75 case 8:
76 asm volatile("xchgq %0,%1"
f4a486ac
MD
77 : "=r" (x)
78 : "m" (*__xg(ptr)), "0" (x)
79 : "memory");
80 break;
81 }
82 return x;
83}
84
27b012e2
MD
85/* Nop everywhere except on alpha. */
86#define smp_read_barrier_depends()
87
41718ff9
MD
88/*
89 * Prevent the compiler from merging or refetching accesses. The compiler
90 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
91 * but only when the compiler is aware of some particular ordering. One way
92 * to make the compiler aware of ordering is to put the two invocations of
93 * ACCESS_ONCE() in different C statements.
94 *
95 * This macro does absolutely -nothing- to prevent the CPU from reordering,
96 * merging, or refetching absolutely anything at any time. Its main intended
97 * use is to mediate communication between process-level code and irq/NMI
98 * handlers, all running on the same CPU.
99 */
100#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
101
102/**
103 * rcu_dereference - fetch an RCU-protected pointer in an
104 * RCU read-side critical section. This pointer may later
105 * be safely dereferenced.
106 *
107 * Inserts memory barriers on architectures that require them
108 * (currently only the Alpha), and, more importantly, documents
109 * exactly which pointers are protected by RCU.
110 */
111
112#define rcu_dereference(p) ({ \
113 typeof(p) _________p1 = ACCESS_ONCE(p); \
114 smp_read_barrier_depends(); \
115 (_________p1); \
116 })
117
27b012e2
MD
118#define SIGURCU SIGUSR1
119
cf380c2f
MD
120#ifdef DEBUG_YIELD
121#include <sched.h>
9b171f46
MD
122#include <time.h>
123#include <pthread.h>
bb488185 124#include <unistd.h>
cf380c2f
MD
125
126#define YIELD_READ (1 << 0)
127#define YIELD_WRITE (1 << 1)
128
bb488185
MD
129/* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
130#ifdef DEBUG_FULL_MB
131/* maximum sleep delay, in us */
132#define MAX_SLEEP 50
133#else
134#define MAX_SLEEP 30000
135#endif
136
9d335088
MD
137extern unsigned int yield_active;
138extern unsigned int __thread rand_yield;
cf380c2f
MD
139
140static inline void debug_yield_read(void)
141{
142 if (yield_active & YIELD_READ)
9d335088 143 if (rand_r(&rand_yield) & 0x1)
bb488185 144 usleep(rand_r(&rand_yield) % MAX_SLEEP);
cf380c2f
MD
145}
146
147static inline void debug_yield_write(void)
148{
149 if (yield_active & YIELD_WRITE)
9d335088 150 if (rand_r(&rand_yield) & 0x1)
bb488185 151 usleep(rand_r(&rand_yield) % MAX_SLEEP);
9d335088
MD
152}
153
154static inline void debug_yield_init(void)
155{
156 rand_yield = time(NULL) ^ pthread_self();
cf380c2f
MD
157}
158#else
159static inline void debug_yield_read(void)
160{
161}
162
163static inline void debug_yield_write(void)
164{
9d335088
MD
165}
166
167static inline void debug_yield_init(void)
168{
169
cf380c2f
MD
170}
171#endif
172
bb488185
MD
173#ifdef DEBUG_FULL_MB
174static inline void read_barrier()
175{
176 mb();
177}
178#else
179static inline void read_barrier()
180{
181 barrier();
182}
183#endif
184
1430ee0b 185/*
4917a879
MD
186 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
187 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
1430ee0b 188 */
6e32665b 189#define RCU_GP_COUNT (1UL << 0)
4917a879 190/* Use the amount of bits equal to half of the architecture long size */
6e32665b 191#define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
1430ee0b
MD
192#define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
193
5b1da0c8
MD
194/*
195 * Global quiescent period counter with low-order bits unused.
196 * Using a int rather than a char to eliminate false register dependencies
197 * causing stalls on some architectures.
198 */
6e8b8429 199extern long urcu_gp_ctr;
27b012e2 200
6e8b8429 201extern long __thread urcu_active_readers;
27b012e2 202
128166c9 203static inline int rcu_old_gp_ongoing(long *value)
27b012e2 204{
6e8b8429 205 long v;
1430ee0b
MD
206
207 if (value == NULL)
208 return 0;
209 debug_yield_write();
9598a481
MD
210 /*
211 * Make sure both tests below are done on the same version of *value
212 * to insure consistency.
213 */
1430ee0b
MD
214 v = ACCESS_ONCE(*value);
215 debug_yield_write();
216 return (v & RCU_GP_CTR_NEST_MASK) &&
9598a481 217 ((v ^ urcu_gp_ctr) & RCU_GP_CTR_BIT);
27b012e2
MD
218}
219
1430ee0b 220static inline void rcu_read_lock(void)
27b012e2 221{
6e8b8429 222 long tmp;
1430ee0b 223
cf380c2f 224 debug_yield_read();
1430ee0b 225 tmp = urcu_active_readers;
cf380c2f 226 debug_yield_read();
3a9e6e9d 227 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
5b1da0c8 228 if (likely(!(tmp & RCU_GP_CTR_NEST_MASK)))
128166c9 229 urcu_active_readers = urcu_gp_ctr;
1430ee0b
MD
230 else
231 urcu_active_readers = tmp + RCU_GP_COUNT;
cf380c2f 232 debug_yield_read();
27b012e2
MD
233 /*
234 * Increment active readers count before accessing the pointer.
235 * See force_mb_all_threads().
236 */
bb488185 237 read_barrier();
cf380c2f 238 debug_yield_read();
27b012e2
MD
239}
240
1430ee0b 241static inline void rcu_read_unlock(void)
27b012e2 242{
cf380c2f 243 debug_yield_read();
bb488185 244 read_barrier();
cf380c2f 245 debug_yield_read();
27b012e2
MD
246 /*
247 * Finish using rcu before decrementing the pointer.
248 * See force_mb_all_threads().
249 */
1430ee0b 250 urcu_active_readers -= RCU_GP_COUNT;
cf380c2f 251 debug_yield_read();
27b012e2
MD
252}
253
e462817e
MD
254/**
255 * rcu_assign_pointer - assign (publicize) a pointer to a newly
256 * initialized structure that will be dereferenced by RCU read-side
257 * critical sections. Returns the value assigned.
258 *
259 * Inserts memory barriers on architectures that require them
260 * (pretty much all of them other than x86), and also prevents
261 * the compiler from reordering the code that initializes the
262 * structure after the pointer assignment. More importantly, this
263 * call documents which pointers will be dereferenced by RCU read-side
264 * code.
265 */
266
267#define rcu_assign_pointer(p, v) \
268 ({ \
269 if (!__builtin_constant_p(v) || \
270 ((v) != NULL)) \
271 wmb(); \
272 (p) = (v); \
273 })
274
f4a486ac
MD
275#define rcu_xchg_pointer(p, v) \
276 ({ \
277 if (!__builtin_constant_p(v) || \
278 ((v) != NULL)) \
279 wmb(); \
280 xchg(p, v); \
281 })
282
e462817e 283extern void synchronize_rcu(void);
27b012e2 284
f4a486ac
MD
285/*
286 * Exchanges the pointer and waits for quiescent state.
287 * The pointer returned can be freed.
288 */
289#define urcu_publish_content(p, v) \
290 ({ \
291 void *oldptr; \
292 debug_yield_write(); \
293 oldptr = rcu_xchg_pointer(p, v); \
294 synchronize_rcu(); \
295 oldptr; \
296 })
297
27b012e2
MD
298/*
299 * Reader thread registration.
300 */
301extern void urcu_register_thread(void);
5e7e64b9 302extern void urcu_unregister_thread(void);
27b012e2
MD
303
304#endif /* _URCU_H */
This page took 0.053469 seconds and 4 git commands to generate.