Commit | Line | Data |
---|---|---|
9f1621ca | 1 | /* |
7ac06cef | 2 | * urcu-qsbr.c |
9f1621ca | 3 | * |
7ac06cef | 4 | * Userspace RCU QSBR library |
9f1621ca MD |
5 | * |
6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
8 | * | |
9 | * This library is free software; you can redistribute it and/or | |
10 | * modify it under the terms of the GNU Lesser General Public | |
11 | * License as published by the Free Software Foundation; either | |
12 | * version 2.1 of the License, or (at your option) any later version. | |
13 | * | |
14 | * This library is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
17 | * Lesser General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU Lesser General Public | |
20 | * License along with this library; if not, write to the Free Software | |
21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
22 | * | |
23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. | |
24 | */ | |
25 | ||
26 | #include <stdio.h> | |
27 | #include <pthread.h> | |
28 | #include <signal.h> | |
29 | #include <assert.h> | |
30 | #include <stdlib.h> | |
31 | #include <string.h> | |
32 | #include <errno.h> | |
33 | #include <poll.h> | |
34 | ||
727f819d | 35 | #define BUILD_QSBR_LIB |
7ac06cef | 36 | #include "urcu-qsbr-static.h" |
9f1621ca | 37 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
7ac06cef | 38 | #include "urcu-qsbr.h" |
9f1621ca | 39 | |
f6d18c64 MD |
40 | void __attribute__((destructor)) rcu_exit(void); |
41 | ||
81d1f1f5 | 42 | static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; |
9f1621ca | 43 | |
bc6c15bb MD |
44 | int gp_futex; |
45 | ||
9f1621ca MD |
46 | /* |
47 | * Global grace period counter. | |
48 | */ | |
ac258107 | 49 | unsigned long urcu_gp_ctr = RCU_GP_ONLINE; |
9f1621ca MD |
50 | |
51 | /* | |
52 | * Written to only by each individual reader. Read by both the reader and the | |
53 | * writers. | |
54 | */ | |
4f8e3380 | 55 | struct urcu_reader __thread urcu_reader; |
9f1621ca MD |
56 | |
57 | #ifdef DEBUG_YIELD | |
58 | unsigned int yield_active; | |
59 | unsigned int __thread rand_yield; | |
60 | #endif | |
61 | ||
4f8e3380 | 62 | static LIST_HEAD(registry); |
9f1621ca | 63 | |
90c1618a | 64 | static void internal_urcu_lock(void) |
9f1621ca MD |
65 | { |
66 | int ret; | |
67 | ||
68 | #ifndef DISTRUST_SIGNALS_EXTREME | |
69 | ret = pthread_mutex_lock(&urcu_mutex); | |
70 | if (ret) { | |
71 | perror("Error in pthread mutex lock"); | |
72 | exit(-1); | |
73 | } | |
74 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
75 | while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { | |
76 | if (ret != EBUSY && ret != EINTR) { | |
77 | printf("ret = %d, errno = %d\n", ret, errno); | |
78 | perror("Error in pthread mutex lock"); | |
79 | exit(-1); | |
80 | } | |
9f1621ca MD |
81 | poll(NULL,0,10); |
82 | } | |
83 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
84 | } | |
85 | ||
90c1618a | 86 | static void internal_urcu_unlock(void) |
9f1621ca MD |
87 | { |
88 | int ret; | |
89 | ||
90 | ret = pthread_mutex_unlock(&urcu_mutex); | |
91 | if (ret) { | |
92 | perror("Error in pthread mutex unlock"); | |
93 | exit(-1); | |
94 | } | |
95 | } | |
96 | ||
bc6c15bb MD |
97 | /* |
98 | * synchronize_rcu() waiting. Single thread. | |
99 | */ | |
4f8e3380 | 100 | static void wait_gp(struct urcu_reader *index) |
bc6c15bb | 101 | { |
ec4e58a3 | 102 | uatomic_dec(&gp_futex); |
bc6c15bb | 103 | smp_mb(); /* Write futex before read reader_gp */ |
4f8e3380 | 104 | if (!rcu_gp_ongoing(&index->ctr)) { |
bc6c15bb MD |
105 | /* Read reader_gp before write futex */ |
106 | smp_mb(); | |
107 | /* Callbacks are queued, don't wait. */ | |
ec4e58a3 | 108 | uatomic_set(&gp_futex, 0); |
bc6c15bb MD |
109 | } else { |
110 | /* Read reader_gp before read futex */ | |
111 | smp_rmb(); | |
ec4e58a3 | 112 | if (uatomic_read(&gp_futex) == -1) |
bc6c15bb MD |
113 | futex(&gp_futex, FUTEX_WAIT, -1, |
114 | NULL, NULL, 0); | |
115 | } | |
116 | } | |
117 | ||
90c1618a | 118 | static void wait_for_quiescent_state(void) |
9f1621ca | 119 | { |
4f8e3380 | 120 | struct urcu_reader *index; |
9f1621ca | 121 | |
4f8e3380 | 122 | if (list_empty(®istry)) |
9f1621ca MD |
123 | return; |
124 | /* | |
3395d46c | 125 | * Wait for each thread rcu_reader_qs_gp count to become 0. |
9f1621ca | 126 | */ |
4f8e3380 | 127 | list_for_each_entry(index, ®istry, head) { |
bc6c15bb MD |
128 | int wait_loops = 0; |
129 | ||
4f8e3380 | 130 | while (rcu_gp_ongoing(&index->ctr)) { |
bc6c15bb MD |
131 | if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) { |
132 | wait_gp(index); | |
133 | } else { | |
9f1621ca | 134 | #ifndef HAS_INCOHERENT_CACHES |
bc6c15bb | 135 | cpu_relax(); |
9f1621ca | 136 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
bc6c15bb | 137 | smp_mb(); |
9f1621ca | 138 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
bc6c15bb MD |
139 | } |
140 | } | |
9f1621ca MD |
141 | } |
142 | } | |
143 | ||
47d2f29e MD |
144 | /* |
145 | * Using a two-subphases algorithm for architectures with smaller than 64-bit | |
146 | * long-size to ensure we do not encounter an overflow bug. | |
147 | */ | |
148 | ||
149 | #if (BITS_PER_LONG < 64) | |
150 | /* | |
151 | * called with urcu_mutex held. | |
152 | */ | |
153 | static void switch_next_urcu_qparity(void) | |
154 | { | |
155 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR); | |
156 | } | |
157 | ||
158 | void synchronize_rcu(void) | |
159 | { | |
bc49c323 MD |
160 | unsigned long was_online; |
161 | ||
4f8e3380 | 162 | was_online = urcu_reader.ctr; |
bc49c323 | 163 | |
47d2f29e MD |
164 | /* All threads should read qparity before accessing data structure |
165 | * where new ptr points to. | |
166 | */ | |
167 | /* Write new ptr before changing the qparity */ | |
168 | smp_mb(); | |
169 | ||
bc49c323 MD |
170 | /* |
171 | * Mark the writer thread offline to make sure we don't wait for | |
172 | * our own quiescent state. This allows using synchronize_rcu() in | |
173 | * threads registered as readers. | |
174 | */ | |
175 | if (was_online) | |
4f8e3380 | 176 | STORE_SHARED(urcu_reader.ctr, 0); |
bc49c323 | 177 | |
47d2f29e MD |
178 | internal_urcu_lock(); |
179 | ||
180 | switch_next_urcu_qparity(); /* 0 -> 1 */ | |
181 | ||
182 | /* | |
183 | * Must commit qparity update to memory before waiting for parity | |
184 | * 0 quiescent state. Failure to do so could result in the writer | |
185 | * waiting forever while new readers are always accessing data (no | |
186 | * progress). | |
187 | * Ensured by STORE_SHARED and LOAD_SHARED. | |
188 | */ | |
189 | ||
190 | /* | |
191 | * Wait for previous parity to be empty of readers. | |
192 | */ | |
193 | wait_for_quiescent_state(); /* Wait readers in parity 0 */ | |
194 | ||
195 | /* | |
196 | * Must finish waiting for quiescent state for parity 0 before | |
197 | * committing qparity update to memory. Failure to do so could result in | |
198 | * the writer waiting forever while new readers are always accessing | |
199 | * data (no progress). | |
200 | * Ensured by STORE_SHARED and LOAD_SHARED. | |
201 | */ | |
202 | ||
203 | switch_next_urcu_qparity(); /* 1 -> 0 */ | |
204 | ||
205 | /* | |
206 | * Must commit qparity update to memory before waiting for parity | |
207 | * 1 quiescent state. Failure to do so could result in the writer | |
208 | * waiting forever while new readers are always accessing data (no | |
209 | * progress). | |
210 | * Ensured by STORE_SHARED and LOAD_SHARED. | |
211 | */ | |
212 | ||
213 | /* | |
214 | * Wait for previous parity to be empty of readers. | |
215 | */ | |
216 | wait_for_quiescent_state(); /* Wait readers in parity 1 */ | |
217 | ||
218 | internal_urcu_unlock(); | |
219 | ||
bc49c323 MD |
220 | /* |
221 | * Finish waiting for reader threads before letting the old ptr being | |
47d2f29e MD |
222 | * freed. |
223 | */ | |
bc49c323 | 224 | if (was_online) |
4f8e3380 | 225 | _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr)); |
47d2f29e MD |
226 | smp_mb(); |
227 | } | |
228 | #else /* !(BITS_PER_LONG < 64) */ | |
9f1621ca MD |
229 | void synchronize_rcu(void) |
230 | { | |
f0f7dbdd | 231 | unsigned long was_online; |
ff2f67a0 | 232 | |
4f8e3380 | 233 | was_online = urcu_reader.ctr; |
ff2f67a0 MD |
234 | |
235 | /* | |
236 | * Mark the writer thread offline to make sure we don't wait for | |
237 | * our own quiescent state. This allows using synchronize_rcu() in | |
238 | * threads registered as readers. | |
239 | */ | |
8f50d1ce | 240 | smp_mb(); |
ff2f67a0 | 241 | if (was_online) |
4f8e3380 | 242 | STORE_SHARED(urcu_reader.ctr, 0); |
ff2f67a0 | 243 | |
4e27f058 | 244 | internal_urcu_lock(); |
55570466 | 245 | STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR); |
9f1621ca | 246 | wait_for_quiescent_state(); |
9f1621ca | 247 | internal_urcu_unlock(); |
ff2f67a0 MD |
248 | |
249 | if (was_online) | |
4f8e3380 | 250 | _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr)); |
8f50d1ce | 251 | smp_mb(); |
9f1621ca | 252 | } |
47d2f29e | 253 | #endif /* !(BITS_PER_LONG < 64) */ |
9f1621ca MD |
254 | |
255 | /* | |
256 | * library wrappers to be used by non-LGPL compatible source code. | |
257 | */ | |
258 | ||
259 | void rcu_read_lock(void) | |
260 | { | |
261 | _rcu_read_lock(); | |
262 | } | |
263 | ||
264 | void rcu_read_unlock(void) | |
265 | { | |
266 | _rcu_read_unlock(); | |
267 | } | |
268 | ||
269 | void *rcu_dereference(void *p) | |
270 | { | |
271 | return _rcu_dereference(p); | |
272 | } | |
273 | ||
274 | void *rcu_assign_pointer_sym(void **p, void *v) | |
275 | { | |
276 | wmb(); | |
277 | return STORE_SHARED(p, v); | |
278 | } | |
279 | ||
4d1ce26f MD |
280 | void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new) |
281 | { | |
282 | wmb(); | |
ec4e58a3 | 283 | return uatomic_cmpxchg(p, old, _new); |
4d1ce26f MD |
284 | } |
285 | ||
9f1621ca MD |
286 | void *rcu_xchg_pointer_sym(void **p, void *v) |
287 | { | |
288 | wmb(); | |
ec4e58a3 | 289 | return uatomic_xchg(p, v); |
9f1621ca MD |
290 | } |
291 | ||
292 | void *rcu_publish_content_sym(void **p, void *v) | |
293 | { | |
294 | void *oldptr; | |
295 | ||
296 | oldptr = _rcu_xchg_pointer(p, v); | |
297 | synchronize_rcu(); | |
298 | return oldptr; | |
299 | } | |
300 | ||
7ac06cef MD |
301 | void rcu_quiescent_state(void) |
302 | { | |
303 | _rcu_quiescent_state(); | |
304 | } | |
305 | ||
306 | void rcu_thread_offline(void) | |
307 | { | |
308 | _rcu_thread_offline(); | |
309 | } | |
310 | ||
311 | void rcu_thread_online(void) | |
312 | { | |
313 | _rcu_thread_online(); | |
314 | } | |
315 | ||
9f1621ca MD |
316 | void rcu_register_thread(void) |
317 | { | |
4f8e3380 MD |
318 | urcu_reader.tid = pthread_self(); |
319 | assert(urcu_reader.ctr == 0); | |
320 | ||
9f1621ca | 321 | internal_urcu_lock(); |
4f8e3380 | 322 | list_add(&urcu_reader.head, ®istry); |
9f1621ca | 323 | internal_urcu_unlock(); |
5f373c84 | 324 | _rcu_thread_online(); |
9f1621ca MD |
325 | } |
326 | ||
327 | void rcu_unregister_thread(void) | |
328 | { | |
76f3022f MD |
329 | /* |
330 | * We have to make the thread offline otherwise we end up dealocking | |
331 | * with a waiting writer. | |
332 | */ | |
333 | _rcu_thread_offline(); | |
9f1621ca | 334 | internal_urcu_lock(); |
4f8e3380 | 335 | list_del(&urcu_reader.head); |
9f1621ca MD |
336 | internal_urcu_unlock(); |
337 | } | |
f6d18c64 MD |
338 | |
339 | void rcu_exit(void) | |
340 | { | |
341 | assert(list_empty(®istry)); | |
342 | } |