Fix: compat_futex_noasync race condition
[urcu.git] / urcu / static / urcu-pointer.h
CommitLineData
7e30abe3
MD
1#ifndef _URCU_POINTER_STATIC_H
2#define _URCU_POINTER_STATIC_H
3
4/*
5 * urcu-pointer-static.h
6 *
7 * Userspace RCU header. Operations on pointers.
8 *
a5a9f428
PM
9 * TO BE INCLUDED ONLY IN CODE THAT IS TO BE RECOMPILED ON EACH LIBURCU
10 * RELEASE. See urcu.h for linking dynamically with the userspace rcu library.
7e30abe3 11 *
6982d6d7 12 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7e30abe3
MD
13 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
14 *
15 * This library is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU Lesser General Public
17 * License as published by the Free Software Foundation; either
18 * version 2.1 of the License, or (at your option) any later version.
19 *
20 * This library is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * Lesser General Public License for more details.
24 *
25 * You should have received a copy of the GNU Lesser General Public
26 * License along with this library; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 *
29 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
30 */
31
32#include <urcu/compiler.h>
33#include <urcu/arch.h>
34#include <urcu/system.h>
a2e7bf9c 35#include <urcu/uatomic.h>
7e30abe3 36
36bc70a8
MD
37#ifdef __cplusplus
38extern "C" {
39#endif
40
7e30abe3
MD
41/**
42 * _rcu_dereference - reads (copy) a RCU-protected pointer to a local variable
43 * into a RCU read-side critical section. The pointer can later be safely
44 * dereferenced within the critical section.
45 *
46 * This ensures that the pointer copy is invariant thorough the whole critical
47 * section.
48 *
49 * Inserts memory barriers on architectures that require them (currently only
50 * Alpha) and documents which pointers are protected by RCU.
51 *
6cf3827c 52 * The compiler memory barrier in CMM_LOAD_SHARED() ensures that value-speculative
7e30abe3
MD
53 * optimizations (e.g. VSS: Value Speculation Scheduling) does not perform the
54 * data read before the pointer read by speculating the value of the pointer.
55 * Correct ordering is ensured because the pointer is read as a volatile access.
56 * This acts as a global side-effect operation, which forbids reordering of
57 * dependent memory operations. Note that such concern about dependency-breaking
58 * optimizations will eventually be taken care of by the "memory_order_consume"
59 * addition to forthcoming C++ standard.
60 *
61 * Should match rcu_assign_pointer() or rcu_xchg_pointer().
a5a9f428
PM
62 *
63 * This macro is less than 10 lines long. The intent is that this macro
64 * meets the 10-line criterion in LGPL, allowing this function to be
65 * expanded directly in non-LGPL code.
7e30abe3 66 */
7e30abe3 67#define _rcu_dereference(p) ({ \
bdffa73a 68 __typeof__(p) _________p1 = CMM_LOAD_SHARED(p); \
5481ddb3 69 cmm_smp_read_barrier_depends(); \
7e30abe3
MD
70 (_________p1); \
71 })
72
73/**
74 * _rcu_cmpxchg_pointer - same as rcu_assign_pointer, but tests if the pointer
75 * is as expected by "old". If succeeds, returns the previous pointer to the
76 * data structure, which can be safely freed after waiting for a quiescent state
77 * using synchronize_rcu(). If fails (unexpected value), returns old (which
78 * should not be freed !).
a5a9f428
PM
79 *
80 * This macro is less than 10 lines long. The intent is that this macro
81 * meets the 10-line criterion in LGPL, allowing this function to be
82 * expanded directly in non-LGPL code.
7e30abe3 83 */
2b5554c9
MD
84#define _rcu_cmpxchg_pointer(p, old, _new) \
85 ({ \
bdffa73a
MD
86 __typeof__(*p) _________pold = (old); \
87 __typeof__(*p) _________pnew = (_new); \
2b5554c9
MD
88 if (!__builtin_constant_p(_new) || \
89 ((_new) != NULL)) \
5481ddb3 90 cmm_wmb(); \
bf9de1b7 91 uatomic_cmpxchg(p, _________pold, _________pnew); \
7e30abe3
MD
92 })
93
94/**
95 * _rcu_xchg_pointer - same as rcu_assign_pointer, but returns the previous
96 * pointer to the data structure, which can be safely freed after waiting for a
97 * quiescent state using synchronize_rcu().
a5a9f428
PM
98 *
99 * This macro is less than 10 lines long. The intent is that this macro
100 * meets the 10-line criterion in LGPL, allowing this function to be
101 * expanded directly in non-LGPL code.
7e30abe3 102 */
7e30abe3
MD
103#define _rcu_xchg_pointer(p, v) \
104 ({ \
bdffa73a 105 __typeof__(*p) _________pv = (v); \
7e30abe3
MD
106 if (!__builtin_constant_p(v) || \
107 ((v) != NULL)) \
5481ddb3 108 cmm_wmb(); \
2b5554c9 109 uatomic_xchg(p, _________pv); \
7e30abe3
MD
110 })
111
112
113#define _rcu_set_pointer(p, v) \
3daae22a 114 do { \
bdffa73a 115 __typeof__(*p) _________pv = (v); \
7e30abe3
MD
116 if (!__builtin_constant_p(v) || \
117 ((v) != NULL)) \
5481ddb3 118 cmm_wmb(); \
bf9de1b7 119 uatomic_set(p, _________pv); \
3daae22a 120 } while (0)
7e30abe3 121
7e30abe3
MD
122/**
123 * _rcu_assign_pointer - assign (publicize) a pointer to a new data structure
124 * meant to be read by RCU read-side critical sections. Returns the assigned
125 * value.
126 *
127 * Documents which pointers will be dereferenced by RCU read-side critical
128 * sections and adds the required memory barriers on architectures requiring
129 * them. It also makes sure the compiler does not reorder code initializing the
130 * data structure before its publication.
131 *
edf8de69 132 * Should match rcu_dereference().
a5a9f428
PM
133 *
134 * This macro is less than 10 lines long. The intent is that this macro
135 * meets the 10-line criterion in LGPL, allowing this function to be
136 * expanded directly in non-LGPL code.
7e30abe3 137 */
7e30abe3
MD
138#define _rcu_assign_pointer(p, v) _rcu_set_pointer(&(p), v)
139
36bc70a8
MD
140#ifdef __cplusplus
141}
142#endif
143
7e30abe3 144#endif /* _URCU_POINTER_STATIC_H */
This page took 0.035593 seconds and 4 git commands to generate.