Fix: call_rcu: teardown default call_rcu worker on application exit
[urcu.git] / src / urcu-qsbr.c
1 /*
2 * urcu-qsbr.c
3 *
4 * Userspace RCU QSBR library
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26 #define URCU_NO_COMPAT_IDENTIFIERS
27 #define _LGPL_SOURCE
28 #include <stdio.h>
29 #include <pthread.h>
30 #include <signal.h>
31 #include <assert.h>
32 #include <stdlib.h>
33 #include <stdint.h>
34 #include <string.h>
35 #include <errno.h>
36 #include <poll.h>
37
38 #include <urcu/wfcqueue.h>
39 #include <urcu/map/urcu-qsbr.h>
40 #define BUILD_QSBR_LIB
41 #include <urcu/static/urcu-qsbr.h>
42 #include <urcu/pointer.h>
43 #include <urcu/tls-compat.h>
44
45 #include "urcu-die.h"
46 #include "urcu-wait.h"
47 #include "urcu-utils.h"
48
49 #define URCU_API_MAP
50 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
51 #undef _LGPL_SOURCE
52 #include <urcu/urcu-qsbr.h>
53 #define _LGPL_SOURCE
54
55 void __attribute__((destructor)) urcu_qsbr_exit(void);
56 static void urcu_call_rcu_exit(void);
57
58 /*
59 * rcu_gp_lock ensures mutual exclusion between threads calling
60 * synchronize_rcu().
61 */
62 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
63 /*
64 * rcu_registry_lock ensures mutual exclusion between threads
65 * registering and unregistering themselves to/from the registry, and
66 * with threads reading that registry from synchronize_rcu(). However,
67 * this lock is not held all the way through the completion of awaiting
68 * for the grace period. It is sporadically released between iterations
69 * on the registry.
70 * rcu_registry_lock may nest inside rcu_gp_lock.
71 */
72 static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
73 struct urcu_gp urcu_qsbr_gp = { .ctr = URCU_QSBR_GP_ONLINE };
74 URCU_ATTR_ALIAS("urcu_qsbr_gp") extern struct urcu_gp rcu_gp_qsbr;
75
76 /*
77 * Active attempts to check for reader Q.S. before calling futex().
78 */
79 #define RCU_QS_ACTIVE_ATTEMPTS 100
80
81 /*
82 * Written to only by each individual reader. Read by both the reader and the
83 * writers.
84 */
85 DEFINE_URCU_TLS(struct urcu_qsbr_reader, urcu_qsbr_reader);
86 DEFINE_URCU_TLS_ALIAS(struct urcu_qsbr_reader, urcu_qsbr_reader, rcu_reader_qsbr);
87
88 static CDS_LIST_HEAD(registry);
89
90 /*
91 * Queue keeping threads awaiting to wait for a grace period. Contains
92 * struct gp_waiters_thread objects.
93 */
94 static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
95
96 static void mutex_lock(pthread_mutex_t *mutex)
97 {
98 int ret;
99
100 #ifndef DISTRUST_SIGNALS_EXTREME
101 ret = pthread_mutex_lock(mutex);
102 if (ret)
103 urcu_die(ret);
104 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
105 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
106 if (ret != EBUSY && ret != EINTR)
107 urcu_die(ret);
108 poll(NULL,0,10);
109 }
110 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
111 }
112
113 static void mutex_unlock(pthread_mutex_t *mutex)
114 {
115 int ret;
116
117 ret = pthread_mutex_unlock(mutex);
118 if (ret)
119 urcu_die(ret);
120 }
121
122 /*
123 * synchronize_rcu() waiting. Single thread.
124 */
125 static void wait_gp(void)
126 {
127 /* Read reader_gp before read futex */
128 cmm_smp_rmb();
129 while (uatomic_read(&urcu_qsbr_gp.futex) == -1) {
130 if (!futex_noasync(&urcu_qsbr_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
131 /*
132 * Prior queued wakeups queued by unrelated code
133 * using the same address can cause futex wait to
134 * return 0 even through the futex value is still
135 * -1 (spurious wakeups). Check the value again
136 * in user-space to validate whether it really
137 * differs from -1.
138 */
139 continue;
140 }
141 switch (errno) {
142 case EAGAIN:
143 /* Value already changed. */
144 return;
145 case EINTR:
146 /* Retry if interrupted by signal. */
147 break; /* Get out of switch. Check again. */
148 default:
149 /* Unexpected error. */
150 urcu_die(errno);
151 }
152 }
153 }
154
155 /*
156 * Always called with rcu_registry lock held. Releases this lock between
157 * iterations and grabs it again. Holds the lock when it returns.
158 */
159 static void wait_for_readers(struct cds_list_head *input_readers,
160 struct cds_list_head *cur_snap_readers,
161 struct cds_list_head *qsreaders)
162 {
163 unsigned int wait_loops = 0;
164 struct urcu_qsbr_reader *index, *tmp;
165
166 /*
167 * Wait for each thread URCU_TLS(urcu_qsbr_reader).ctr to either
168 * indicate quiescence (offline), or for them to observe the
169 * current urcu_qsbr_gp.ctr value.
170 */
171 for (;;) {
172 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
173 wait_loops++;
174 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
175 uatomic_set(&urcu_qsbr_gp.futex, -1);
176 /*
177 * Write futex before write waiting (the other side
178 * reads them in the opposite order).
179 */
180 cmm_smp_wmb();
181 cds_list_for_each_entry(index, input_readers, node) {
182 _CMM_STORE_SHARED(index->waiting, 1);
183 }
184 /* Write futex before read reader_gp */
185 cmm_smp_mb();
186 }
187 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
188 switch (urcu_qsbr_reader_state(&index->ctr)) {
189 case URCU_READER_ACTIVE_CURRENT:
190 if (cur_snap_readers) {
191 cds_list_move(&index->node,
192 cur_snap_readers);
193 break;
194 }
195 /* Fall-through */
196 case URCU_READER_INACTIVE:
197 cds_list_move(&index->node, qsreaders);
198 break;
199 case URCU_READER_ACTIVE_OLD:
200 /*
201 * Old snapshot. Leaving node in
202 * input_readers will make us busy-loop
203 * until the snapshot becomes current or
204 * the reader becomes inactive.
205 */
206 break;
207 }
208 }
209
210 if (cds_list_empty(input_readers)) {
211 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
212 /* Read reader_gp before write futex */
213 cmm_smp_mb();
214 uatomic_set(&urcu_qsbr_gp.futex, 0);
215 }
216 break;
217 } else {
218 /* Temporarily unlock the registry lock. */
219 mutex_unlock(&rcu_registry_lock);
220 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
221 wait_gp();
222 } else {
223 #ifndef HAS_INCOHERENT_CACHES
224 caa_cpu_relax();
225 #else /* #ifndef HAS_INCOHERENT_CACHES */
226 cmm_smp_mb();
227 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
228 }
229 /* Re-lock the registry lock before the next loop. */
230 mutex_lock(&rcu_registry_lock);
231 }
232 }
233 }
234
235 /*
236 * Using a two-subphases algorithm for architectures with smaller than 64-bit
237 * long-size to ensure we do not encounter an overflow bug.
238 */
239
240 #if (CAA_BITS_PER_LONG < 64)
241 void urcu_qsbr_synchronize_rcu(void)
242 {
243 CDS_LIST_HEAD(cur_snap_readers);
244 CDS_LIST_HEAD(qsreaders);
245 unsigned long was_online;
246 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
247 struct urcu_waiters waiters;
248
249 was_online = urcu_qsbr_read_ongoing();
250
251 /* All threads should read qparity before accessing data structure
252 * where new ptr points to. In the "then" case, rcu_thread_offline
253 * includes a memory barrier.
254 *
255 * Mark the writer thread offline to make sure we don't wait for
256 * our own quiescent state. This allows using synchronize_rcu()
257 * in threads registered as readers.
258 */
259 if (was_online)
260 urcu_qsbr_thread_offline();
261 else
262 cmm_smp_mb();
263
264 /*
265 * Add ourself to gp_waiters queue of threads awaiting to wait
266 * for a grace period. Proceed to perform the grace period only
267 * if we are the first thread added into the queue.
268 */
269 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
270 /* Not first in queue: will be awakened by another thread. */
271 urcu_adaptative_busy_wait(&wait);
272 goto gp_end;
273 }
274 /* We won't need to wake ourself up */
275 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
276
277 mutex_lock(&rcu_gp_lock);
278
279 /*
280 * Move all waiters into our local queue.
281 */
282 urcu_move_waiters(&waiters, &gp_waiters);
283
284 mutex_lock(&rcu_registry_lock);
285
286 if (cds_list_empty(&registry))
287 goto out;
288
289 /*
290 * Wait for readers to observe original parity or be quiescent.
291 * wait_for_readers() can release and grab again rcu_registry_lock
292 * interally.
293 */
294 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
295
296 /*
297 * Must finish waiting for quiescent state for original parity
298 * before committing next urcu_qsbr_gp.ctr update to memory. Failure
299 * to do so could result in the writer waiting forever while new
300 * readers are always accessing data (no progress). Enforce
301 * compiler-order of load URCU_TLS(urcu_qsbr_reader).ctr before store
302 * to urcu_qsbr_gp.ctr.
303 */
304 cmm_barrier();
305
306 /*
307 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
308 * model easier to understand. It does not have a big performance impact
309 * anyway, given this is the write-side.
310 */
311 cmm_smp_mb();
312
313 /* Switch parity: 0 -> 1, 1 -> 0 */
314 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr ^ URCU_QSBR_GP_CTR);
315
316 /*
317 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
318 * quiescent state. Failure to do so could result in the writer
319 * waiting forever while new readers are always accessing data
320 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
321 * before load URCU_TLS(urcu_qsbr_reader).ctr.
322 */
323 cmm_barrier();
324
325 /*
326 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
327 * model easier to understand. It does not have a big performance impact
328 * anyway, given this is the write-side.
329 */
330 cmm_smp_mb();
331
332 /*
333 * Wait for readers to observe new parity or be quiescent.
334 * wait_for_readers() can release and grab again rcu_registry_lock
335 * interally.
336 */
337 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
338
339 /*
340 * Put quiescent reader list back into registry.
341 */
342 cds_list_splice(&qsreaders, &registry);
343 out:
344 mutex_unlock(&rcu_registry_lock);
345 mutex_unlock(&rcu_gp_lock);
346 urcu_wake_all_waiters(&waiters);
347 gp_end:
348 /*
349 * Finish waiting for reader threads before letting the old ptr being
350 * freed.
351 */
352 if (was_online)
353 urcu_qsbr_thread_online();
354 else
355 cmm_smp_mb();
356 }
357 #else /* !(CAA_BITS_PER_LONG < 64) */
358 void urcu_qsbr_synchronize_rcu(void)
359 {
360 CDS_LIST_HEAD(qsreaders);
361 unsigned long was_online;
362 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
363 struct urcu_waiters waiters;
364
365 was_online = urcu_qsbr_read_ongoing();
366
367 /*
368 * Mark the writer thread offline to make sure we don't wait for
369 * our own quiescent state. This allows using synchronize_rcu()
370 * in threads registered as readers.
371 */
372 if (was_online)
373 urcu_qsbr_thread_offline();
374 else
375 cmm_smp_mb();
376
377 /*
378 * Add ourself to gp_waiters queue of threads awaiting to wait
379 * for a grace period. Proceed to perform the grace period only
380 * if we are the first thread added into the queue.
381 */
382 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
383 /* Not first in queue: will be awakened by another thread. */
384 urcu_adaptative_busy_wait(&wait);
385 goto gp_end;
386 }
387 /* We won't need to wake ourself up */
388 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
389
390 mutex_lock(&rcu_gp_lock);
391
392 /*
393 * Move all waiters into our local queue.
394 */
395 urcu_move_waiters(&waiters, &gp_waiters);
396
397 mutex_lock(&rcu_registry_lock);
398
399 if (cds_list_empty(&registry))
400 goto out;
401
402 /* Increment current G.P. */
403 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr + URCU_QSBR_GP_CTR);
404
405 /*
406 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
407 * quiescent state. Failure to do so could result in the writer
408 * waiting forever while new readers are always accessing data
409 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
410 * before load URCU_TLS(urcu_qsbr_reader).ctr.
411 */
412 cmm_barrier();
413
414 /*
415 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
416 * model easier to understand. It does not have a big performance impact
417 * anyway, given this is the write-side.
418 */
419 cmm_smp_mb();
420
421 /*
422 * Wait for readers to observe new count of be quiescent.
423 * wait_for_readers() can release and grab again rcu_registry_lock
424 * interally.
425 */
426 wait_for_readers(&registry, NULL, &qsreaders);
427
428 /*
429 * Put quiescent reader list back into registry.
430 */
431 cds_list_splice(&qsreaders, &registry);
432 out:
433 mutex_unlock(&rcu_registry_lock);
434 mutex_unlock(&rcu_gp_lock);
435 urcu_wake_all_waiters(&waiters);
436 gp_end:
437 if (was_online)
438 urcu_qsbr_thread_online();
439 else
440 cmm_smp_mb();
441 }
442 #endif /* !(CAA_BITS_PER_LONG < 64) */
443 URCU_ATTR_ALIAS("urcu_qsbr_synchronize_rcu")
444 void synchronize_rcu_qsbr();
445
446 /*
447 * library wrappers to be used by non-LGPL compatible source code.
448 */
449
450 void urcu_qsbr_read_lock(void)
451 {
452 _urcu_qsbr_read_lock();
453 }
454 URCU_ATTR_ALIAS("urcu_qsbr_read_lock") void rcu_read_lock_qsbr();
455
456 void urcu_qsbr_read_unlock(void)
457 {
458 _urcu_qsbr_read_unlock();
459 }
460 URCU_ATTR_ALIAS("urcu_qsbr_read_unlock") void rcu_read_unlock_qsbr();
461
462 int urcu_qsbr_read_ongoing(void)
463 {
464 return _urcu_qsbr_read_ongoing();
465 }
466 URCU_ATTR_ALIAS("urcu_qsbr_read_ongoing")
467 void rcu_read_ongoing_qsbr();
468
469 void urcu_qsbr_quiescent_state(void)
470 {
471 _urcu_qsbr_quiescent_state();
472 }
473 URCU_ATTR_ALIAS("urcu_qsbr_quiescent_state")
474 void rcu_quiescent_state_qsbr();
475
476 void urcu_qsbr_thread_offline(void)
477 {
478 _urcu_qsbr_thread_offline();
479 }
480 URCU_ATTR_ALIAS("urcu_qsbr_thread_offline")
481 void rcu_thread_offline_qsbr();
482
483 void urcu_qsbr_thread_online(void)
484 {
485 _urcu_qsbr_thread_online();
486 }
487 URCU_ATTR_ALIAS("urcu_qsbr_thread_online")
488 void rcu_thread_online_qsbr();
489
490 void urcu_qsbr_register_thread(void)
491 {
492 URCU_TLS(urcu_qsbr_reader).tid = pthread_self();
493 assert(URCU_TLS(urcu_qsbr_reader).ctr == 0);
494
495 mutex_lock(&rcu_registry_lock);
496 assert(!URCU_TLS(urcu_qsbr_reader).registered);
497 URCU_TLS(urcu_qsbr_reader).registered = 1;
498 cds_list_add(&URCU_TLS(urcu_qsbr_reader).node, &registry);
499 mutex_unlock(&rcu_registry_lock);
500 _urcu_qsbr_thread_online();
501 }
502 URCU_ATTR_ALIAS("urcu_qsbr_register_thread")
503 void rcu_register_thread_qsbr();
504
505 void urcu_qsbr_unregister_thread(void)
506 {
507 /*
508 * We have to make the thread offline otherwise we end up dealocking
509 * with a waiting writer.
510 */
511 _urcu_qsbr_thread_offline();
512 assert(URCU_TLS(urcu_qsbr_reader).registered);
513 URCU_TLS(urcu_qsbr_reader).registered = 0;
514 mutex_lock(&rcu_registry_lock);
515 cds_list_del(&URCU_TLS(urcu_qsbr_reader).node);
516 mutex_unlock(&rcu_registry_lock);
517 }
518 URCU_ATTR_ALIAS("urcu_qsbr_unregister_thread")
519 void rcu_unregister_thread_qsbr();
520
521 void urcu_qsbr_exit(void)
522 {
523 /*
524 * Assertion disabled because call_rcu threads are now rcu
525 * readers, and left running at exit.
526 * assert(cds_list_empty(&registry));
527 */
528 urcu_call_rcu_exit();
529 }
530 URCU_ATTR_ALIAS("urcu_qsbr_exit") void rcu_exit_qsbr();
531
532 DEFINE_RCU_FLAVOR(rcu_flavor);
533 DEFINE_RCU_FLAVOR_ALIAS(rcu_flavor, alias_rcu_flavor);
534
535 #include "urcu-call-rcu-impl.h"
536 #include "urcu-defer-impl.h"
This page took 0.039279 seconds and 4 git commands to generate.