update Makefile, -Wall
[urcu.git] / urcu.h
CommitLineData
27b012e2
MD
1#ifndef _URCU_H
2#define _URCU_H
3
b257a10b
MD
4/*
5 * urcu.h
6 *
7 * Userspace RCU header
8 *
9 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
5e7e64b9
MD
11 * Credits for Paul e. McKenney <paulmck@linux.vnet.ibm.com>
12 * for inspiration coming from the Linux kernel RCU and rcu-preempt.
13 *
14 * The barrier, mb, rmb, wmb, atomic_inc, smp_read_barrier_depends, ACCESS_ONCE
15 * and rcu_dereference primitives come from the Linux kernel.
16 *
b257a10b
MD
17 * Distributed under GPLv2
18 */
19
27b012e2
MD
20/* The "volatile" is due to gcc bugs */
21#define barrier() __asm__ __volatile__("": : :"memory")
22
23/* x86 32/64 specific */
24#define mb() asm volatile("mfence":::"memory")
25#define rmb() asm volatile("lfence":::"memory")
26#define wmb() asm volatile("sfence" ::: "memory")
27
27b012e2
MD
28static inline void atomic_inc(int *v)
29{
30 asm volatile("lock; incl %0"
f69f195a 31 : "+m" (*v));
27b012e2
MD
32}
33
34/* Nop everywhere except on alpha. */
35#define smp_read_barrier_depends()
36
41718ff9
MD
37/*
38 * Prevent the compiler from merging or refetching accesses. The compiler
39 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
40 * but only when the compiler is aware of some particular ordering. One way
41 * to make the compiler aware of ordering is to put the two invocations of
42 * ACCESS_ONCE() in different C statements.
43 *
44 * This macro does absolutely -nothing- to prevent the CPU from reordering,
45 * merging, or refetching absolutely anything at any time. Its main intended
46 * use is to mediate communication between process-level code and irq/NMI
47 * handlers, all running on the same CPU.
48 */
49#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
50
51/**
52 * rcu_dereference - fetch an RCU-protected pointer in an
53 * RCU read-side critical section. This pointer may later
54 * be safely dereferenced.
55 *
56 * Inserts memory barriers on architectures that require them
57 * (currently only the Alpha), and, more importantly, documents
58 * exactly which pointers are protected by RCU.
59 */
60
61#define rcu_dereference(p) ({ \
62 typeof(p) _________p1 = ACCESS_ONCE(p); \
63 smp_read_barrier_depends(); \
64 (_________p1); \
65 })
66
27b012e2
MD
67#define SIGURCU SIGUSR1
68
69/* Global quiescent period parity */
70extern int urcu_qparity;
71
72extern int __thread urcu_active_readers[2];
73
74static inline int get_urcu_qparity(void)
75{
76 return urcu_qparity;
77}
78
79/*
80 * returns urcu_parity.
81 */
82static inline int rcu_read_lock(void)
83{
84 int urcu_parity = get_urcu_qparity();
85 urcu_active_readers[urcu_parity]++;
86 /*
87 * Increment active readers count before accessing the pointer.
88 * See force_mb_all_threads().
89 */
90 barrier();
91 return urcu_parity;
92}
93
94static inline void rcu_read_unlock(int urcu_parity)
95{
96 barrier();
97 /*
98 * Finish using rcu before decrementing the pointer.
99 * See force_mb_all_threads().
100 */
101 urcu_active_readers[urcu_parity]--;
102}
103
41718ff9
MD
104extern void rcu_write_lock(void);
105extern void rcu_write_unlock(void);
106
cdcb92bb 107extern void *urcu_publish_content(void **ptr, void *new);
27b012e2
MD
108
109/*
110 * Reader thread registration.
111 */
112extern void urcu_register_thread(void);
5e7e64b9 113extern void urcu_unregister_thread(void);
27b012e2
MD
114
115#endif /* _URCU_H */
This page took 0.026226 seconds and 4 git commands to generate.