Add rcu_assign_pointer
[urcu.git] / urcu.h
1 #ifndef _URCU_H
2 #define _URCU_H
3
4 /*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
17 * Distributed under GPLv2
18 */
19
20 #include <stdlib.h>
21
22 /* The "volatile" is due to gcc bugs */
23 #define barrier() __asm__ __volatile__("": : :"memory")
24
25 /* x86 32/64 specific */
26 #define mb() asm volatile("mfence":::"memory")
27 #define rmb() asm volatile("lfence":::"memory")
28 #define wmb() asm volatile("sfence" ::: "memory")
29
30 static inline void atomic_inc(int *v)
31 {
32 asm volatile("lock; incl %0"
33 : "+m" (*v));
34 }
35
36 /* Nop everywhere except on alpha. */
37 #define smp_read_barrier_depends()
38
39 /*
40 * Prevent the compiler from merging or refetching accesses. The compiler
41 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
42 * but only when the compiler is aware of some particular ordering. One way
43 * to make the compiler aware of ordering is to put the two invocations of
44 * ACCESS_ONCE() in different C statements.
45 *
46 * This macro does absolutely -nothing- to prevent the CPU from reordering,
47 * merging, or refetching absolutely anything at any time. Its main intended
48 * use is to mediate communication between process-level code and irq/NMI
49 * handlers, all running on the same CPU.
50 */
51 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
52
53 /**
54 * rcu_dereference - fetch an RCU-protected pointer in an
55 * RCU read-side critical section. This pointer may later
56 * be safely dereferenced.
57 *
58 * Inserts memory barriers on architectures that require them
59 * (currently only the Alpha), and, more importantly, documents
60 * exactly which pointers are protected by RCU.
61 */
62
63 #define rcu_dereference(p) ({ \
64 typeof(p) _________p1 = ACCESS_ONCE(p); \
65 smp_read_barrier_depends(); \
66 (_________p1); \
67 })
68
69 #define SIGURCU SIGUSR1
70
71 #ifdef DEBUG_YIELD
72 #include <sched.h>
73
74 #define YIELD_READ (1 << 0)
75 #define YIELD_WRITE (1 << 1)
76
77 extern unsigned int yield_active;
78 extern unsigned int __thread rand_yield;
79
80 static inline void debug_yield_read(void)
81 {
82 if (yield_active & YIELD_READ)
83 if (rand_r(&rand_yield) & 0x1)
84 sched_yield();
85 }
86
87 static inline void debug_yield_write(void)
88 {
89 if (yield_active & YIELD_WRITE)
90 if (rand_r(&rand_yield) & 0x1)
91 sched_yield();
92 }
93
94 static inline void debug_yield_init(void)
95 {
96 rand_yield = time(NULL) ^ pthread_self();
97 }
98 #else
99 static inline void debug_yield_read(void)
100 {
101 }
102
103 static inline void debug_yield_write(void)
104 {
105 }
106
107 static inline void debug_yield_init(void)
108 {
109
110 }
111 #endif
112
113 /*
114 * Limiting the nesting level to 256 to keep instructions small in the read
115 * fast-path.
116 */
117 #define RCU_GP_COUNT (1U << 0)
118 #define RCU_GP_CTR_BIT (1U << 8)
119 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
120
121 /* Global quiescent period counter with low-order bits unused. */
122 extern int urcu_gp_ctr;
123
124 extern int __thread urcu_active_readers;
125
126 static inline int rcu_old_gp_ongoing(int *value)
127 {
128 int v;
129
130 if (value == NULL)
131 return 0;
132 debug_yield_write();
133 v = ACCESS_ONCE(*value);
134 debug_yield_write();
135 return (v & RCU_GP_CTR_NEST_MASK) &&
136 ((v ^ ACCESS_ONCE(urcu_gp_ctr)) & RCU_GP_CTR_BIT);
137 }
138
139 static inline void rcu_read_lock(void)
140 {
141 int tmp;
142
143 debug_yield_read();
144 tmp = urcu_active_readers;
145 debug_yield_read();
146 if (!(tmp & RCU_GP_CTR_NEST_MASK))
147 urcu_active_readers = urcu_gp_ctr + RCU_GP_COUNT;
148 else
149 urcu_active_readers = tmp + RCU_GP_COUNT;
150 debug_yield_read();
151 /*
152 * Increment active readers count before accessing the pointer.
153 * See force_mb_all_threads().
154 */
155 barrier();
156 debug_yield_read();
157 }
158
159 static inline void rcu_read_unlock(void)
160 {
161 debug_yield_read();
162 barrier();
163 debug_yield_read();
164 /*
165 * Finish using rcu before decrementing the pointer.
166 * See force_mb_all_threads().
167 */
168 urcu_active_readers -= RCU_GP_COUNT;
169 debug_yield_read();
170 }
171
172 /**
173 * rcu_assign_pointer - assign (publicize) a pointer to a newly
174 * initialized structure that will be dereferenced by RCU read-side
175 * critical sections. Returns the value assigned.
176 *
177 * Inserts memory barriers on architectures that require them
178 * (pretty much all of them other than x86), and also prevents
179 * the compiler from reordering the code that initializes the
180 * structure after the pointer assignment. More importantly, this
181 * call documents which pointers will be dereferenced by RCU read-side
182 * code.
183 */
184
185 #define rcu_assign_pointer(p, v) \
186 ({ \
187 if (!__builtin_constant_p(v) || \
188 ((v) != NULL)) \
189 wmb(); \
190 (p) = (v); \
191 })
192
193 extern void *urcu_publish_content(void **ptr, void *new);
194 extern void synchronize_rcu(void);
195
196 /*
197 * Reader thread registration.
198 */
199 extern void urcu_register_thread(void);
200 extern void urcu_unregister_thread(void);
201
202 #endif /* _URCU_H */
This page took 0.033142 seconds and 5 git commands to generate.