9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
17 * Distributed under GPLv2
23 /* The "volatile" is due to gcc bugs */
24 #define barrier() __asm__ __volatile__("": : :"memory")
26 #define likely(x) __builtin_expect(!!(x), 1)
27 #define unlikely(x) __builtin_expect(!!(x), 0)
29 /* Assume P4 or newer */
30 #define CONFIG_HAS_FENCE 1
32 /* x86 32/64 specific */
33 #ifdef CONFIG_HAS_FENCE
34 #define mb() asm volatile("mfence":::"memory")
35 #define rmb() asm volatile("lfence":::"memory")
36 #define wmb() asm volatile("sfence"::: "memory")
39 * Some non-Intel clones support out of order store. wmb() ceases to be a
42 #define mb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
43 #define rmb() asm volatile("lock; addl $0,0(%%esp)":::"memory")
44 #define wmb() asm volatile("lock; addl $0,0(%%esp)"::: "memory")
47 /* Assume SMP machine, given we don't have this information */
52 #define smp_rmb() rmb()
53 #define smp_wmb() wmb()
55 #define smp_mb() barrier()
56 #define smp_rmb() barrier()
57 #define smp_wmb() barrier()
60 static inline void atomic_inc(int *v
)
62 asm volatile("lock; incl %0"
66 #define xchg(ptr, v) \
67 ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
72 #define __xg(x) ((struct __xchg_dummy *)(x))
75 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
76 * Note 2: xchg has side effect, so that attribute volatile is necessary,
77 * but generally the primitive is invalid, *ptr is output argument. --ANK
79 static inline unsigned long __xchg(unsigned long x
, volatile void *ptr
,
84 asm volatile("xchgb %b0,%1"
86 : "m" (*__xg(ptr
)), "0" (x
)
90 asm volatile("xchgw %w0,%1"
92 : "m" (*__xg(ptr
)), "0" (x
)
96 asm volatile("xchgl %k0,%1"
98 : "m" (*__xg(ptr
)), "0" (x
)
102 asm volatile("xchgq %0,%1"
104 : "m" (*__xg(ptr
)), "0" (x
)
111 /* Nop everywhere except on alpha. */
112 #define smp_read_barrier_depends()
115 * Prevent the compiler from merging or refetching accesses. The compiler
116 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
117 * but only when the compiler is aware of some particular ordering. One way
118 * to make the compiler aware of ordering is to put the two invocations of
119 * ACCESS_ONCE() in different C statements.
121 * This macro does absolutely -nothing- to prevent the CPU from reordering,
122 * merging, or refetching absolutely anything at any time. Its main intended
123 * use is to mediate communication between process-level code and irq/NMI
124 * handlers, all running on the same CPU.
126 #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
129 * rcu_dereference - fetch an RCU-protected pointer in an
130 * RCU read-side critical section. This pointer may later
131 * be safely dereferenced.
133 * Inserts memory barriers on architectures that require them
134 * (currently only the Alpha), and, more importantly, documents
135 * exactly which pointers are protected by RCU.
138 #define rcu_dereference(p) ({ \
139 typeof(p) _________p1 = ACCESS_ONCE(p); \
140 smp_read_barrier_depends(); \
144 #define SIGURCU SIGUSR1
147 * If a reader is really non-cooperative and refuses to commit its
148 * urcu_active_readers count to memory (there is no barrier in the reader
149 * per-se), kick it after a few loops waiting for it.
151 #define KICK_READER_LOOPS 10000
159 #define YIELD_READ (1 << 0)
160 #define YIELD_WRITE (1 << 1)
162 /* Updates without DEBUG_FULL_MB are much slower. Account this in the delay */
164 /* maximum sleep delay, in us */
167 #define MAX_SLEEP 30000
170 extern unsigned int yield_active
;
171 extern unsigned int __thread rand_yield
;
173 static inline void debug_yield_read(void)
175 if (yield_active
& YIELD_READ
)
176 if (rand_r(&rand_yield
) & 0x1)
177 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
180 static inline void debug_yield_write(void)
182 if (yield_active
& YIELD_WRITE
)
183 if (rand_r(&rand_yield
) & 0x1)
184 usleep(rand_r(&rand_yield
) % MAX_SLEEP
);
187 static inline void debug_yield_init(void)
189 rand_yield
= time(NULL
) ^ pthread_self();
192 static inline void debug_yield_read(void)
196 static inline void debug_yield_write(void)
200 static inline void debug_yield_init(void)
207 static inline void read_barrier()
212 static inline void read_barrier()
219 * The trick here is that RCU_GP_CTR_BIT must be a multiple of 8 so we can use a
220 * full 8-bits, 16-bits or 32-bits bitmask for the lower order bits.
222 #define RCU_GP_COUNT (1UL << 0)
223 /* Use the amount of bits equal to half of the architecture long size */
224 #define RCU_GP_CTR_BIT (1UL << (sizeof(long) << 2))
225 #define RCU_GP_CTR_NEST_MASK (RCU_GP_CTR_BIT - 1)
228 * Global quiescent period counter with low-order bits unused.
229 * Using a int rather than a char to eliminate false register dependencies
230 * causing stalls on some architectures.
232 extern long urcu_gp_ctr
;
234 extern long __thread urcu_active_readers
;
236 static inline int rcu_old_gp_ongoing(long *value
)
243 * Make sure both tests below are done on the same version of *value
244 * to insure consistency.
246 v
= ACCESS_ONCE(*value
);
247 return (v
& RCU_GP_CTR_NEST_MASK
) &&
248 ((v
^ urcu_gp_ctr
) & RCU_GP_CTR_BIT
);
251 static inline void rcu_read_lock(void)
255 tmp
= urcu_active_readers
;
256 /* urcu_gp_ctr = RCU_GP_COUNT | (~RCU_GP_CTR_BIT or RCU_GP_CTR_BIT) */
257 /* The data dependency "read urcu_gp_ctr, write urcu_active_readers",
258 * serializes those two memory operations. */
259 if (likely(!(tmp
& RCU_GP_CTR_NEST_MASK
)))
260 urcu_active_readers
= ACCESS_ONCE(urcu_gp_ctr
);
262 urcu_active_readers
= tmp
+ RCU_GP_COUNT
;
264 * Increment active readers count before accessing the pointer.
265 * See force_mb_all_threads().
270 static inline void rcu_read_unlock(void)
274 * Finish using rcu before decrementing the pointer.
275 * See force_mb_all_threads().
277 urcu_active_readers
-= RCU_GP_COUNT
;
281 * rcu_assign_pointer - assign (publicize) a pointer to a newly
282 * initialized structure that will be dereferenced by RCU read-side
283 * critical sections. Returns the value assigned.
285 * Inserts memory barriers on architectures that require them
286 * (pretty much all of them other than x86), and also prevents
287 * the compiler from reordering the code that initializes the
288 * structure after the pointer assignment. More importantly, this
289 * call documents which pointers will be dereferenced by RCU read-side
293 #define rcu_assign_pointer(p, v) \
295 if (!__builtin_constant_p(v) || \
301 #define rcu_xchg_pointer(p, v) \
303 if (!__builtin_constant_p(v) || \
309 extern void synchronize_rcu(void);
312 * Exchanges the pointer and waits for quiescent state.
313 * The pointer returned can be freed.
315 #define urcu_publish_content(p, v) \
318 oldptr = rcu_xchg_pointer(p, v); \
324 * Reader thread registration.
326 extern void urcu_register_thread(void);
327 extern void urcu_unregister_thread(void);