Fix: urcu: futex wait: handle spurious futex wakeups
[urcu.git] / src / urcu.c
CommitLineData
b257a10b
MD
1/*
2 * urcu.c
3 *
4 * Userspace RCU library
5 *
6982d6d7 6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
af02d47e 7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
b257a10b 8 *
af02d47e
MD
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
54843abc
PM
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
b257a10b
MD
24 */
25
e37faee1 26#define URCU_NO_COMPAT_IDENTIFIERS
fdf01eed 27#define _BSD_SOURCE
71c811bf 28#define _LGPL_SOURCE
82d50e1a 29#define _DEFAULT_SOURCE
27b012e2
MD
30#include <stdio.h>
31#include <pthread.h>
32#include <signal.h>
33#include <assert.h>
f69f195a 34#include <stdlib.h>
6d841bc2 35#include <stdint.h>
f69f195a 36#include <string.h>
09a9f986 37#include <errno.h>
c0bb9f69 38#include <stdbool.h>
e8043c1b 39#include <poll.h>
27b012e2 40
c6bc6503 41#include <urcu/config.h>
4477a870
MD
42#include <urcu/arch.h>
43#include <urcu/wfcqueue.h>
44#include <urcu/map/urcu.h>
45#include <urcu/static/urcu.h>
46#include <urcu/pointer.h>
47#include <urcu/tls-compat.h>
71c811bf 48
4a6d7378 49#include "urcu-die.h"
5bffdd5d 50#include "urcu-wait.h"
4477a870 51#include "urcu-utils.h"
4a6d7378 52
4477a870 53#define URCU_API_MAP
121a5d44 54/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
71c811bf 55#undef _LGPL_SOURCE
4477a870 56#include <urcu/urcu.h>
71c811bf 57#define _LGPL_SOURCE
27b012e2 58
3a71751e
PB
59/*
60 * If a reader is really non-cooperative and refuses to commit its
61 * rcu_active_readers count to memory (there is no barrier in the reader
9340c38d 62 * per-se), kick it after 10 loops waiting for it.
3a71751e 63 */
9340c38d 64#define KICK_READER_LOOPS 10
3a71751e
PB
65
66/*
67 * Active attempts to check for reader Q.S. before calling futex().
68 */
69#define RCU_QS_ACTIVE_ATTEMPTS 100
70
999991c6
MD
71/* If the headers do not support membarrier system call, fall back on RCU_MB */
72#ifdef __NR_membarrier
73# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
553b7eb9
MD
74#else
75# define membarrier(...) -ENOSYS
76#endif
77
64f469e6 78enum membarrier_cmd {
c0bb9f69
MD
79 MEMBARRIER_CMD_QUERY = 0,
80 MEMBARRIER_CMD_SHARED = (1 << 0),
81 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
82 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
83 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
84 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
64f469e6 85};
553b7eb9 86
fdf01eed 87#ifdef RCU_MEMBARRIER
834a45ba 88static int init_done;
4477a870 89static int urcu_memb_has_sys_membarrier_private_expedited;
c0bb9f69 90
d8d9a340 91#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
4477a870
MD
92/*
93 * Explicitly initialize to zero because we can't alias a non-static
94 * uninitialized variable.
95 */
96int urcu_memb_has_sys_membarrier = 0;
ce28e67a 97URCU_ATTR_ALIAS("urcu_memb_has_sys_membarrier")
4477a870 98extern int rcu_has_sys_membarrier_memb;
d8d9a340 99#endif
834a45ba 100
02be5561 101void __attribute__((constructor)) rcu_init(void);
fdf01eed
MD
102#endif
103
104#ifdef RCU_MB
02be5561 105void rcu_init(void)
e90a6e9c
MD
106{
107}
ce28e67a 108URCU_ATTR_ALIAS(urcu_stringify(rcu_init))
4477a870 109void alias_rcu_init(void);
e90a6e9c 110#endif
8a5fb4c9 111
fdf01eed
MD
112#ifdef RCU_SIGNAL
113static int init_done;
114
115void __attribute__((constructor)) rcu_init(void);
116void __attribute__((destructor)) rcu_exit(void);
117#endif
118
731ccb96
MD
119/*
120 * rcu_gp_lock ensures mutual exclusion between threads calling
121 * synchronize_rcu().
122 */
6abb4bd5 123static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
731ccb96
MD
124/*
125 * rcu_registry_lock ensures mutual exclusion between threads
126 * registering and unregistering themselves to/from the registry, and
127 * with threads reading that registry from synchronize_rcu(). However,
128 * this lock is not held all the way through the completion of awaiting
129 * for the grace period. It is sporadically released between iterations
130 * on the registry.
131 * rcu_registry_lock may nest inside rcu_gp_lock.
132 */
133static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
4477a870 134struct urcu_gp rcu_gp = { .ctr = URCU_GP_COUNT };
ce28e67a 135URCU_ATTR_ALIAS(urcu_stringify(rcu_gp))
4477a870 136extern struct urcu_gp alias_rcu_gp;
27b012e2 137
b0d5e790
MD
138/*
139 * Written to only by each individual reader. Read by both the reader and the
140 * writers.
141 */
4477a870 142DEFINE_URCU_TLS(struct urcu_reader, rcu_reader);
99bfa9e9 143DEFINE_URCU_TLS_ALIAS(struct urcu_reader, rcu_reader, alias_rcu_reader);
27b012e2 144
16aa9ee8 145static CDS_LIST_HEAD(registry);
27b012e2 146
5bffdd5d
MD
147/*
148 * Queue keeping threads awaiting to wait for a grace period. Contains
149 * struct gp_waiters_thread objects.
150 */
151static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
152
6abb4bd5 153static void mutex_lock(pthread_mutex_t *mutex)
41718ff9
MD
154{
155 int ret;
09a9f986
PM
156
157#ifndef DISTRUST_SIGNALS_EXTREME
6abb4bd5 158 ret = pthread_mutex_lock(mutex);
4a6d7378
MD
159 if (ret)
160 urcu_die(ret);
09a9f986 161#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
6abb4bd5 162 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
4a6d7378
MD
163 if (ret != EBUSY && ret != EINTR)
164 urcu_die(ret);
bd252a04 165 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) {
5481ddb3 166 cmm_smp_mb();
bd252a04 167 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
5481ddb3 168 cmm_smp_mb();
09a9f986 169 }
8999a9ee 170 (void) poll(NULL, 0, 10);
09a9f986
PM
171 }
172#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
41718ff9
MD
173}
174
6abb4bd5 175static void mutex_unlock(pthread_mutex_t *mutex)
41718ff9
MD
176{
177 int ret;
178
6abb4bd5 179 ret = pthread_mutex_unlock(mutex);
4a6d7378
MD
180 if (ret)
181 urcu_die(ret);
41718ff9
MD
182}
183
fdf01eed 184#ifdef RCU_MEMBARRIER
a4922ed9 185static void smp_mb_master(void)
fdf01eed 186{
4477a870
MD
187 if (caa_likely(urcu_memb_has_sys_membarrier)) {
188 if (membarrier(urcu_memb_has_sys_membarrier_private_expedited ?
c0bb9f69
MD
189 MEMBARRIER_CMD_PRIVATE_EXPEDITED :
190 MEMBARRIER_CMD_SHARED, 0))
191 urcu_die(errno);
192 } else {
5481ddb3 193 cmm_smp_mb();
c0bb9f69 194 }
fdf01eed
MD
195}
196#endif
197
02be5561 198#ifdef RCU_MB
a4922ed9 199static void smp_mb_master(void)
40e140c9 200{
5481ddb3 201 cmm_smp_mb();
40e140c9 202}
fdf01eed
MD
203#endif
204
205#ifdef RCU_SIGNAL
78ff9419 206static void force_mb_all_readers(void)
27b012e2 207{
4477a870 208 struct urcu_reader *index;
e3b0cef0 209
27b012e2 210 /*
5481ddb3 211 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
27b012e2
MD
212 * compiler barriers around rcu read lock as real memory barriers.
213 */
16aa9ee8 214 if (cds_list_empty(&registry))
27b012e2 215 return;
3a86deba 216 /*
5481ddb3 217 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
157dca95 218 * a cache flush on architectures with non-coherent cache. Let's play
5481ddb3 219 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
157dca95 220 * cache flush is enforced.
3a86deba 221 */
16aa9ee8 222 cds_list_for_each_entry(index, &registry, node) {
6cf3827c 223 CMM_STORE_SHARED(index->need_mb, 1);
02be5561 224 pthread_kill(index->tid, SIGRCU);
09a9f986 225 }
27b012e2
MD
226 /*
227 * Wait for sighandler (and thus mb()) to execute on every thread.
09a9f986
PM
228 *
229 * Note that the pthread_kill() will never be executed on systems
230 * that correctly deliver signals in a timely manner. However, it
231 * is not uncommon for kernels to have bugs that can result in
232 * lost or unduly delayed signals.
233 *
234 * If you are seeing the below pthread_kill() executing much at
235 * all, we suggest testing the underlying kernel and filing the
236 * relevant bug report. For Linux kernels, we recommend getting
237 * the Linux Test Project (LTP).
27b012e2 238 */
16aa9ee8 239 cds_list_for_each_entry(index, &registry, node) {
6cf3827c 240 while (CMM_LOAD_SHARED(index->need_mb)) {
02be5561 241 pthread_kill(index->tid, SIGRCU);
8999a9ee 242 (void) poll(NULL, 0, 1);
09a9f986
PM
243 }
244 }
5481ddb3 245 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
27b012e2 246}
9d7e3f89 247
a4922ed9 248static void smp_mb_master(void)
9d7e3f89
MD
249{
250 force_mb_all_readers();
251}
fdf01eed 252#endif /* #ifdef RCU_SIGNAL */
27b012e2 253
bc6c15bb
MD
254/*
255 * synchronize_rcu() waiting. Single thread.
fca9fb96
MD
256 * Always called with rcu_registry lock held. Releases this lock and
257 * grabs it again. Holds the lock when it returns.
bc6c15bb 258 */
cfe78e25 259static void wait_gp(void)
bc6c15bb 260{
fca9fb96
MD
261 /*
262 * Read reader_gp before read futex. smp_mb_master() needs to
263 * be called with the rcu registry lock held in RCU_SIGNAL
264 * flavor.
265 */
a4922ed9 266 smp_mb_master();
fca9fb96
MD
267 /* Temporarily unlock the registry lock. */
268 mutex_unlock(&rcu_registry_lock);
1c7364b7
MD
269 while (uatomic_read(&rcu_gp.futex) == -1) {
270 if (!futex_async(&rcu_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
271 /*
272 * Prior queued wakeups queued by unrelated code
273 * using the same address can cause futex wait to
274 * return 0 even through the futex value is still
275 * -1 (spurious wakeups). Check the value again
276 * in user-space to validate whether it really
277 * differs from -1.
278 */
279 continue;
280 }
b0a841b4 281 switch (errno) {
1c7364b7 282 case EAGAIN:
b0a841b4 283 /* Value already changed. */
fca9fb96 284 goto end;
b0a841b4
MD
285 case EINTR:
286 /* Retry if interrupted by signal. */
1c7364b7 287 break; /* Get out of switch. Check again. */
b0a841b4
MD
288 default:
289 /* Unexpected error. */
290 urcu_die(errno);
291 }
292 }
fca9fb96
MD
293end:
294 /*
295 * Re-lock the registry lock before the next loop.
296 */
297 mutex_lock(&rcu_registry_lock);
bc6c15bb
MD
298}
299
731ccb96
MD
300/*
301 * Always called with rcu_registry lock held. Releases this lock between
302 * iterations and grabs it again. Holds the lock when it returns.
303 */
fd189fa5
MD
304static void wait_for_readers(struct cds_list_head *input_readers,
305 struct cds_list_head *cur_snap_readers,
306 struct cds_list_head *qsreaders)
27b012e2 307{
9340c38d 308 unsigned int wait_loops = 0;
4477a870 309 struct urcu_reader *index, *tmp;
9340c38d
MD
310#ifdef HAS_INCOHERENT_CACHES
311 unsigned int wait_gp_loops = 0;
312#endif /* HAS_INCOHERENT_CACHES */
27b012e2 313
40e140c9 314 /*
c9488684
MD
315 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
316 * indicate quiescence (not nested), or observe the current
ed1b099e 317 * rcu_gp.ctr value.
27b012e2 318 */
cfe78e25 319 for (;;) {
5e81fed7
MD
320 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
321 wait_loops++;
9340c38d 322 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
ed1b099e 323 uatomic_dec(&rcu_gp.futex);
cfe78e25 324 /* Write futex before read reader_gp */
a4922ed9 325 smp_mb_master();
cfe78e25
MD
326 }
327
fd189fa5 328 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
4477a870
MD
329 switch (urcu_common_reader_state(&rcu_gp, &index->ctr)) {
330 case URCU_READER_ACTIVE_CURRENT:
fd189fa5
MD
331 if (cur_snap_readers) {
332 cds_list_move(&index->node,
333 cur_snap_readers);
334 break;
335 }
336 /* Fall-through */
4477a870 337 case URCU_READER_INACTIVE:
fd189fa5
MD
338 cds_list_move(&index->node, qsreaders);
339 break;
4477a870 340 case URCU_READER_ACTIVE_OLD:
fd189fa5
MD
341 /*
342 * Old snapshot. Leaving node in
343 * input_readers will make us busy-loop
344 * until the snapshot becomes current or
345 * the reader becomes inactive.
346 */
347 break;
348 }
cfe78e25
MD
349 }
350
e8043c1b 351#ifndef HAS_INCOHERENT_CACHES
fd189fa5 352 if (cds_list_empty(input_readers)) {
9340c38d 353 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
cfe78e25 354 /* Read reader_gp before write futex */
a4922ed9 355 smp_mb_master();
ed1b099e 356 uatomic_set(&rcu_gp.futex, 0);
bc6c15bb 357 }
cfe78e25
MD
358 break;
359 } else {
fca9fb96
MD
360 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
361 /* wait_gp unlocks/locks registry lock. */
cfe78e25 362 wait_gp();
fca9fb96
MD
363 } else {
364 /* Temporarily unlock the registry lock. */
365 mutex_unlock(&rcu_registry_lock);
06f22bdb 366 caa_cpu_relax();
fca9fb96
MD
367 /*
368 * Re-lock the registry lock before the
369 * next loop.
370 */
371 mutex_lock(&rcu_registry_lock);
372 }
bc6c15bb 373 }
e8043c1b 374#else /* #ifndef HAS_INCOHERENT_CACHES */
27b012e2 375 /*
40e140c9 376 * BUSY-LOOP. Force the reader thread to commit its
bd252a04
MD
377 * URCU_TLS(rcu_reader).ctr update to memory if we wait
378 * for too long.
27b012e2 379 */
fd189fa5 380 if (cds_list_empty(input_readers)) {
9340c38d 381 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
cfe78e25 382 /* Read reader_gp before write futex */
a4922ed9 383 smp_mb_master();
ed1b099e 384 uatomic_set(&rcu_gp.futex, 0);
cfe78e25
MD
385 }
386 break;
387 } else {
9340c38d 388 if (wait_gp_loops == KICK_READER_LOOPS) {
a4922ed9 389 smp_mb_master();
9340c38d
MD
390 wait_gp_loops = 0;
391 }
392 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
fca9fb96 393 /* wait_gp unlocks/locks registry lock. */
9340c38d
MD
394 wait_gp();
395 wait_gp_loops++;
396 } else {
fca9fb96
MD
397 /* Temporarily unlock the registry lock. */
398 mutex_unlock(&rcu_registry_lock);
06f22bdb 399 caa_cpu_relax();
fca9fb96
MD
400 /*
401 * Re-lock the registry lock before the
402 * next loop.
403 */
404 mutex_lock(&rcu_registry_lock);
40e140c9
MD
405 }
406 }
e8043c1b 407#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
27b012e2 408 }
27b012e2
MD
409}
410
9598a481 411void synchronize_rcu(void)
2bc59bd7 412{
fd189fa5
MD
413 CDS_LIST_HEAD(cur_snap_readers);
414 CDS_LIST_HEAD(qsreaders);
5bffdd5d
MD
415 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
416 struct urcu_waiters waiters;
417
418 /*
419 * Add ourself to gp_waiters queue of threads awaiting to wait
420 * for a grace period. Proceed to perform the grace period only
421 * if we are the first thread added into the queue.
422 * The implicit memory barrier before urcu_wait_add()
423 * orders prior memory accesses of threads put into the wait
424 * queue before their insertion into the wait queue.
425 */
426 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
427 /* Not first in queue: will be awakened by another thread. */
428 urcu_adaptative_busy_wait(&wait);
429 /* Order following memory accesses after grace period. */
430 cmm_smp_mb();
431 return;
432 }
433 /* We won't need to wake ourself up */
434 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
fd189fa5 435
6abb4bd5 436 mutex_lock(&rcu_gp_lock);
135530fd 437
5bffdd5d
MD
438 /*
439 * Move all waiters into our local queue.
440 */
441 urcu_move_waiters(&waiters, &gp_waiters);
442
731ccb96
MD
443 mutex_lock(&rcu_registry_lock);
444
16aa9ee8 445 if (cds_list_empty(&registry))
2dfb8b5e
MD
446 goto out;
447
731ccb96
MD
448 /*
449 * All threads should read qparity before accessing data structure
450 * where new ptr points to. Must be done within rcu_registry_lock
451 * because it iterates on reader threads.
452 */
9598a481 453 /* Write new ptr before changing the qparity */
a4922ed9 454 smp_mb_master();
9598a481 455
9598a481 456 /*
c9488684 457 * Wait for readers to observe original parity or be quiescent.
731ccb96
MD
458 * wait_for_readers() can release and grab again rcu_registry_lock
459 * interally.
9598a481 460 */
fd189fa5 461 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
9598a481
MD
462
463 /*
c9488684 464 * Must finish waiting for quiescent state for original parity before
ed1b099e 465 * committing next rcu_gp.ctr update to memory. Failure to do so could
d40fde2c
MD
466 * result in the writer waiting forever while new readers are always
467 * accessing data (no progress). Enforce compiler-order of load
ed1b099e 468 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
9598a481 469 */
5481ddb3 470 cmm_barrier();
9598a481 471
5dba80f9 472 /*
5481ddb3 473 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
5dba80f9
MD
474 * model easier to understand. It does not have a big performance impact
475 * anyway, given this is the write-side.
476 */
5481ddb3 477 cmm_smp_mb();
67c2d80b 478
c9488684 479 /* Switch parity: 0 -> 1, 1 -> 0 */
4477a870 480 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ URCU_GP_CTR_PHASE);
c9488684
MD
481
482 /*
ed1b099e 483 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
c9488684
MD
484 * state. Failure to do so could result in the writer waiting forever
485 * while new readers are always accessing data (no progress). Enforce
ed1b099e 486 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
c9488684
MD
487 */
488 cmm_barrier();
489
490 /*
491 *
492 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
493 * model easier to understand. It does not have a big performance impact
494 * anyway, given this is the write-side.
495 */
496 cmm_smp_mb();
497
9598a481 498 /*
c9488684 499 * Wait for readers to observe new parity or be quiescent.
731ccb96
MD
500 * wait_for_readers() can release and grab again rcu_registry_lock
501 * interally.
9598a481 502 */
fd189fa5
MD
503 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
504
505 /*
506 * Put quiescent reader list back into registry.
507 */
508 cds_list_splice(&qsreaders, &registry);
9598a481 509
731ccb96
MD
510 /*
511 * Finish waiting for reader threads before letting the old ptr
512 * being freed. Must be done within rcu_registry_lock because it
513 * iterates on reader threads.
514 */
a4922ed9 515 smp_mb_master();
2dfb8b5e 516out:
731ccb96 517 mutex_unlock(&rcu_registry_lock);
6abb4bd5 518 mutex_unlock(&rcu_gp_lock);
5bffdd5d
MD
519
520 /*
521 * Wakeup waiters only after we have completed the grace period
522 * and have ensured the memory barriers at the end of the grace
523 * period have been issued.
524 */
525 urcu_wake_all_waiters(&waiters);
2bc59bd7 526}
ce28e67a 527URCU_ATTR_ALIAS(urcu_stringify(synchronize_rcu))
4477a870 528void alias_synchronize_rcu();
2bc59bd7 529
121a5d44
MD
530/*
531 * library wrappers to be used by non-LGPL compatible source code.
532 */
533
534void rcu_read_lock(void)
535{
536 _rcu_read_lock();
537}
ce28e67a 538URCU_ATTR_ALIAS(urcu_stringify(rcu_read_lock))
4477a870 539void alias_rcu_read_lock();
121a5d44
MD
540
541void rcu_read_unlock(void)
542{
543 _rcu_read_unlock();
544}
ce28e67a 545URCU_ATTR_ALIAS(urcu_stringify(rcu_read_unlock))
4477a870 546void alias_rcu_read_unlock();
121a5d44 547
882f3357
MD
548int rcu_read_ongoing(void)
549{
550 return _rcu_read_ongoing();
551}
ce28e67a 552URCU_ATTR_ALIAS(urcu_stringify(rcu_read_ongoing))
4477a870 553void alias_rcu_read_ongoing();
882f3357 554
121a5d44 555void rcu_register_thread(void)
27b012e2 556{
bd252a04
MD
557 URCU_TLS(rcu_reader).tid = pthread_self();
558 assert(URCU_TLS(rcu_reader).need_mb == 0);
4477a870 559 assert(!(URCU_TLS(rcu_reader).ctr & URCU_GP_CTR_NEST_MASK));
02be5561 560
731ccb96 561 mutex_lock(&rcu_registry_lock);
a77f7d82
MD
562 assert(!URCU_TLS(rcu_reader).registered);
563 URCU_TLS(rcu_reader).registered = 1;
02be5561 564 rcu_init(); /* In case gcc does not support constructor attribute */
bd252a04 565 cds_list_add(&URCU_TLS(rcu_reader).node, &registry);
731ccb96 566 mutex_unlock(&rcu_registry_lock);
27b012e2 567}
ce28e67a 568URCU_ATTR_ALIAS(urcu_stringify(rcu_register_thread))
4477a870 569void alias_rcu_register_thread();
27b012e2 570
121a5d44 571void rcu_unregister_thread(void)
27b012e2 572{
731ccb96 573 mutex_lock(&rcu_registry_lock);
a77f7d82
MD
574 assert(URCU_TLS(rcu_reader).registered);
575 URCU_TLS(rcu_reader).registered = 0;
bd252a04 576 cds_list_del(&URCU_TLS(rcu_reader).node);
731ccb96 577 mutex_unlock(&rcu_registry_lock);
27b012e2 578}
ce28e67a 579URCU_ATTR_ALIAS(urcu_stringify(rcu_unregister_thread))
4477a870 580void alias_rcu_unregister_thread();
27b012e2 581
fdf01eed 582#ifdef RCU_MEMBARRIER
d8d9a340
MD
583
584#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
585static
c0bb9f69 586void rcu_sys_membarrier_status(bool available)
d8d9a340
MD
587{
588 if (!available)
589 abort();
590}
591#else
592static
c0bb9f69 593void rcu_sys_membarrier_status(bool available)
d8d9a340 594{
c0bb9f69
MD
595 if (!available)
596 return;
4477a870 597 urcu_memb_has_sys_membarrier = 1;
d8d9a340
MD
598}
599#endif
600
c0bb9f69
MD
601static
602void rcu_sys_membarrier_init(void)
fdf01eed 603{
c0bb9f69
MD
604 bool available = false;
605 int mask;
606
607 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
608 if (mask >= 0) {
609 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
610 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
611 urcu_die(errno);
4477a870 612 urcu_memb_has_sys_membarrier_private_expedited = 1;
c0bb9f69
MD
613 available = true;
614 } else if (mask & MEMBARRIER_CMD_SHARED) {
615 available = true;
616 }
617 }
618 rcu_sys_membarrier_status(available);
619}
64f469e6 620
c0bb9f69
MD
621void rcu_init(void)
622{
fdf01eed
MD
623 if (init_done)
624 return;
625 init_done = 1;
c0bb9f69 626 rcu_sys_membarrier_init();
fdf01eed 627}
ce28e67a 628URCU_ATTR_ALIAS(urcu_stringify(rcu_init))
4477a870 629void alias_rcu_init(void);
fdf01eed
MD
630#endif
631
632#ifdef RCU_SIGNAL
17a8f206
MJ
633static void sigrcu_handler(int signo __attribute__((unused)),
634 siginfo_t *siginfo __attribute__((unused)),
635 void *context __attribute__((unused)))
27b012e2 636{
40e140c9 637 /*
5481ddb3
DG
638 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
639 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
40e140c9
MD
640 * executed on.
641 */
5481ddb3 642 cmm_smp_mb();
bd252a04 643 _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0);
5481ddb3 644 cmm_smp_mb();
27b012e2
MD
645}
646
8a5fb4c9 647/*
02be5561 648 * rcu_init constructor. Called when the library is linked, but also when
8a5fb4c9
MD
649 * reader threads are calling rcu_register_thread().
650 * Should only be called by a single thread at a given time. This is ensured by
731ccb96
MD
651 * holing the rcu_registry_lock from rcu_register_thread() or by running
652 * at library load time, which should not be executed by multiple
653 * threads nor concurrently with rcu_register_thread() anyway.
8a5fb4c9 654 */
02be5561 655void rcu_init(void)
27b012e2
MD
656{
657 struct sigaction act;
658 int ret;
659
8a5fb4c9
MD
660 if (init_done)
661 return;
662 init_done = 1;
663
02be5561 664 act.sa_sigaction = sigrcu_handler;
dd052bd3 665 act.sa_flags = SA_SIGINFO | SA_RESTART;
c297c21c 666 sigemptyset(&act.sa_mask);
02be5561 667 ret = sigaction(SIGRCU, &act, NULL);
4a6d7378
MD
668 if (ret)
669 urcu_die(errno);
27b012e2 670}
ce28e67a 671URCU_ATTR_ALIAS(urcu_stringify(rcu_init))
4477a870 672void alias_rcu_init(void);
27b012e2 673
02be5561 674void rcu_exit(void)
27b012e2 675{
71210954
MD
676 /*
677 * Don't unregister the SIGRCU signal handler anymore, because
678 * call_rcu threads could still be using it shortly before the
679 * application exits.
680 * Assertion disabled because call_rcu threads are now rcu
681 * readers, and left running at exit.
682 * assert(cds_list_empty(&registry));
683 */
27b012e2 684}
ce28e67a 685URCU_ATTR_ALIAS(urcu_stringify(rcu_exit))
4477a870 686void alias_rcu_exit(void);
5e77fc1f 687
fdf01eed 688#endif /* #ifdef RCU_SIGNAL */
5e77fc1f 689
5e6b23a6 690DEFINE_RCU_FLAVOR(rcu_flavor);
4477a870 691DEFINE_RCU_FLAVOR_ALIAS(rcu_flavor, alias_rcu_flavor);
541d828d 692
5e77fc1f 693#include "urcu-call-rcu-impl.h"
0376e7b2 694#include "urcu-defer-impl.h"
This page took 0.086422 seconds and 4 git commands to generate.