4 * Userspace RCU library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
39 #include "urcu/wfcqueue.h"
40 #include "urcu/map/urcu.h"
41 #include "urcu/static/urcu.h"
42 #include "urcu-pointer.h"
43 #include "urcu/tls-compat.h"
46 #include "urcu-wait.h"
48 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
54 * If a reader is really non-cooperative and refuses to commit its
55 * rcu_active_readers count to memory (there is no barrier in the reader
56 * per-se), kick it after 10 loops waiting for it.
58 #define KICK_READER_LOOPS 10
61 * Active attempts to check for reader Q.S. before calling futex().
63 #define RCU_QS_ACTIVE_ATTEMPTS 100
66 * RCU_MEMBARRIER is only possibly available on Linux.
68 #if defined(RCU_MEMBARRIER) && defined(__linux__)
69 #include <urcu/syscall-compat.h>
72 /* If the headers do not support SYS_membarrier, fall back on RCU_MB */
74 # define membarrier(...) syscall(SYS_membarrier, __VA_ARGS__)
76 # define membarrier(...) -ENOSYS
79 #define MEMBARRIER_EXPEDITED (1 << 0)
80 #define MEMBARRIER_DELAYED (1 << 1)
81 #define MEMBARRIER_QUERY (1 << 16)
85 int rcu_has_sys_membarrier
;
87 void __attribute__((constructor
)) rcu_init(void);
99 void __attribute__((constructor
)) rcu_init(void);
100 void __attribute__((destructor
)) rcu_exit(void);
104 * rcu_gp_lock ensures mutual exclusion between threads calling
107 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
109 * rcu_registry_lock ensures mutual exclusion between threads
110 * registering and unregistering themselves to/from the registry, and
111 * with threads reading that registry from synchronize_rcu(). However,
112 * this lock is not held all the way through the completion of awaiting
113 * for the grace period. It is sporadically released between iterations
115 * rcu_registry_lock may nest inside rcu_gp_lock.
117 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
118 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
121 * Written to only by each individual reader. Read by both the reader and the
124 DEFINE_URCU_TLS(struct rcu_reader
, rcu_reader
);
126 static CDS_LIST_HEAD(registry
);
129 * Queue keeping threads awaiting to wait for a grace period. Contains
130 * struct gp_waiters_thread objects.
132 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
134 static void mutex_lock(pthread_mutex_t
*mutex
)
138 #ifndef DISTRUST_SIGNALS_EXTREME
139 ret
= pthread_mutex_lock(mutex
);
142 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
143 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
144 if (ret
!= EBUSY
&& ret
!= EINTR
)
146 if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader
).need_mb
)) {
148 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
153 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
156 static void mutex_unlock(pthread_mutex_t
*mutex
)
160 ret
= pthread_mutex_unlock(mutex
);
165 #ifdef RCU_MEMBARRIER
166 static void smp_mb_master(int group
)
168 if (caa_likely(rcu_has_sys_membarrier
))
169 (void) membarrier(MEMBARRIER_EXPEDITED
);
176 static void smp_mb_master(int group
)
183 static void force_mb_all_readers(void)
185 struct rcu_reader
*index
;
188 * Ask for each threads to execute a cmm_smp_mb() so we can consider the
189 * compiler barriers around rcu read lock as real memory barriers.
191 if (cds_list_empty(®istry
))
194 * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs
195 * a cache flush on architectures with non-coherent cache. Let's play
196 * safe and don't assume anything : we use cmm_smp_mc() to make sure the
197 * cache flush is enforced.
199 cds_list_for_each_entry(index
, ®istry
, node
) {
200 CMM_STORE_SHARED(index
->need_mb
, 1);
201 pthread_kill(index
->tid
, SIGRCU
);
204 * Wait for sighandler (and thus mb()) to execute on every thread.
206 * Note that the pthread_kill() will never be executed on systems
207 * that correctly deliver signals in a timely manner. However, it
208 * is not uncommon for kernels to have bugs that can result in
209 * lost or unduly delayed signals.
211 * If you are seeing the below pthread_kill() executing much at
212 * all, we suggest testing the underlying kernel and filing the
213 * relevant bug report. For Linux kernels, we recommend getting
214 * the Linux Test Project (LTP).
216 cds_list_for_each_entry(index
, ®istry
, node
) {
217 while (CMM_LOAD_SHARED(index
->need_mb
)) {
218 pthread_kill(index
->tid
, SIGRCU
);
222 cmm_smp_mb(); /* read ->need_mb before ending the barrier */
225 static void smp_mb_master(int group
)
227 force_mb_all_readers();
229 #endif /* #ifdef RCU_SIGNAL */
232 * synchronize_rcu() waiting. Single thread.
234 static void wait_gp(void)
236 /* Read reader_gp before read futex */
237 smp_mb_master(RCU_MB_GROUP
);
238 if (uatomic_read(&rcu_gp
.futex
) == -1)
239 futex_async(&rcu_gp
.futex
, FUTEX_WAIT
, -1,
244 * Always called with rcu_registry lock held. Releases this lock between
245 * iterations and grabs it again. Holds the lock when it returns.
247 static void wait_for_readers(struct cds_list_head
*input_readers
,
248 struct cds_list_head
*cur_snap_readers
,
249 struct cds_list_head
*qsreaders
)
251 unsigned int wait_loops
= 0;
252 struct rcu_reader
*index
, *tmp
;
253 #ifdef HAS_INCOHERENT_CACHES
254 unsigned int wait_gp_loops
= 0;
255 #endif /* HAS_INCOHERENT_CACHES */
258 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
259 * indicate quiescence (not nested), or observe the current
263 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
265 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
266 uatomic_dec(&rcu_gp
.futex
);
267 /* Write futex before read reader_gp */
268 smp_mb_master(RCU_MB_GROUP
);
271 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
272 switch (rcu_reader_state(&index
->ctr
)) {
273 case RCU_READER_ACTIVE_CURRENT
:
274 if (cur_snap_readers
) {
275 cds_list_move(&index
->node
,
280 case RCU_READER_INACTIVE
:
281 cds_list_move(&index
->node
, qsreaders
);
283 case RCU_READER_ACTIVE_OLD
:
285 * Old snapshot. Leaving node in
286 * input_readers will make us busy-loop
287 * until the snapshot becomes current or
288 * the reader becomes inactive.
294 #ifndef HAS_INCOHERENT_CACHES
295 if (cds_list_empty(input_readers
)) {
296 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
297 /* Read reader_gp before write futex */
298 smp_mb_master(RCU_MB_GROUP
);
299 uatomic_set(&rcu_gp
.futex
, 0);
303 /* Temporarily unlock the registry lock. */
304 mutex_unlock(&rcu_registry_lock
);
305 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
309 /* Re-lock the registry lock before the next loop. */
310 mutex_lock(&rcu_registry_lock
);
312 #else /* #ifndef HAS_INCOHERENT_CACHES */
314 * BUSY-LOOP. Force the reader thread to commit its
315 * URCU_TLS(rcu_reader).ctr update to memory if we wait
318 if (cds_list_empty(input_readers
)) {
319 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
320 /* Read reader_gp before write futex */
321 smp_mb_master(RCU_MB_GROUP
);
322 uatomic_set(&rcu_gp
.futex
, 0);
326 if (wait_gp_loops
== KICK_READER_LOOPS
) {
327 smp_mb_master(RCU_MB_GROUP
);
330 /* Temporarily unlock the registry lock. */
331 mutex_unlock(&rcu_registry_lock
);
332 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
338 /* Re-lock the registry lock before the next loop. */
339 mutex_lock(&rcu_registry_lock
);
341 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
345 void synchronize_rcu(void)
347 CDS_LIST_HEAD(cur_snap_readers
);
348 CDS_LIST_HEAD(qsreaders
);
349 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
350 struct urcu_waiters waiters
;
353 * Add ourself to gp_waiters queue of threads awaiting to wait
354 * for a grace period. Proceed to perform the grace period only
355 * if we are the first thread added into the queue.
356 * The implicit memory barrier before urcu_wait_add()
357 * orders prior memory accesses of threads put into the wait
358 * queue before their insertion into the wait queue.
360 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
361 /* Not first in queue: will be awakened by another thread. */
362 urcu_adaptative_busy_wait(&wait
);
363 /* Order following memory accesses after grace period. */
367 /* We won't need to wake ourself up */
368 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
370 mutex_lock(&rcu_gp_lock
);
373 * Move all waiters into our local queue.
375 urcu_move_waiters(&waiters
, &gp_waiters
);
377 mutex_lock(&rcu_registry_lock
);
379 if (cds_list_empty(®istry
))
383 * All threads should read qparity before accessing data structure
384 * where new ptr points to. Must be done within rcu_registry_lock
385 * because it iterates on reader threads.
387 /* Write new ptr before changing the qparity */
388 smp_mb_master(RCU_MB_GROUP
);
391 * Wait for readers to observe original parity or be quiescent.
392 * wait_for_readers() can release and grab again rcu_registry_lock
395 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
398 * Must finish waiting for quiescent state for original parity before
399 * committing next rcu_gp.ctr update to memory. Failure to do so could
400 * result in the writer waiting forever while new readers are always
401 * accessing data (no progress). Enforce compiler-order of load
402 * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
407 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
408 * model easier to understand. It does not have a big performance impact
409 * anyway, given this is the write-side.
413 /* Switch parity: 0 -> 1, 1 -> 0 */
414 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
417 * Must commit rcu_gp.ctr update to memory before waiting for quiescent
418 * state. Failure to do so could result in the writer waiting forever
419 * while new readers are always accessing data (no progress). Enforce
420 * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
426 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
427 * model easier to understand. It does not have a big performance impact
428 * anyway, given this is the write-side.
433 * Wait for readers to observe new parity or be quiescent.
434 * wait_for_readers() can release and grab again rcu_registry_lock
437 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
440 * Put quiescent reader list back into registry.
442 cds_list_splice(&qsreaders
, ®istry
);
445 * Finish waiting for reader threads before letting the old ptr
446 * being freed. Must be done within rcu_registry_lock because it
447 * iterates on reader threads.
449 smp_mb_master(RCU_MB_GROUP
);
451 mutex_unlock(&rcu_registry_lock
);
452 mutex_unlock(&rcu_gp_lock
);
455 * Wakeup waiters only after we have completed the grace period
456 * and have ensured the memory barriers at the end of the grace
457 * period have been issued.
459 urcu_wake_all_waiters(&waiters
);
463 * library wrappers to be used by non-LGPL compatible source code.
466 void rcu_read_lock(void)
471 void rcu_read_unlock(void)
476 int rcu_read_ongoing(void)
478 return _rcu_read_ongoing();
481 void rcu_register_thread(void)
483 URCU_TLS(rcu_reader
).tid
= pthread_self();
484 assert(URCU_TLS(rcu_reader
).need_mb
== 0);
485 assert(!(URCU_TLS(rcu_reader
).ctr
& RCU_GP_CTR_NEST_MASK
));
487 mutex_lock(&rcu_registry_lock
);
488 rcu_init(); /* In case gcc does not support constructor attribute */
489 cds_list_add(&URCU_TLS(rcu_reader
).node
, ®istry
);
490 mutex_unlock(&rcu_registry_lock
);
493 void rcu_unregister_thread(void)
495 mutex_lock(&rcu_registry_lock
);
496 cds_list_del(&URCU_TLS(rcu_reader
).node
);
497 mutex_unlock(&rcu_registry_lock
);
500 #ifdef RCU_MEMBARRIER
506 if (!membarrier(MEMBARRIER_EXPEDITED
| MEMBARRIER_QUERY
))
507 rcu_has_sys_membarrier
= 1;
512 static void sigrcu_handler(int signo
, siginfo_t
*siginfo
, void *context
)
515 * Executing this cmm_smp_mb() is the only purpose of this signal handler.
516 * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is
520 _CMM_STORE_SHARED(URCU_TLS(rcu_reader
).need_mb
, 0);
525 * rcu_init constructor. Called when the library is linked, but also when
526 * reader threads are calling rcu_register_thread().
527 * Should only be called by a single thread at a given time. This is ensured by
528 * holing the rcu_registry_lock from rcu_register_thread() or by running
529 * at library load time, which should not be executed by multiple
530 * threads nor concurrently with rcu_register_thread() anyway.
534 struct sigaction act
;
541 act
.sa_sigaction
= sigrcu_handler
;
542 act
.sa_flags
= SA_SIGINFO
| SA_RESTART
;
543 sigemptyset(&act
.sa_mask
);
544 ret
= sigaction(SIGRCU
, &act
, NULL
);
552 * Don't unregister the SIGRCU signal handler anymore, because
553 * call_rcu threads could still be using it shortly before the
555 * Assertion disabled because call_rcu threads are now rcu
556 * readers, and left running at exit.
557 * assert(cds_list_empty(®istry));
561 #endif /* #ifdef RCU_SIGNAL */
563 DEFINE_RCU_FLAVOR(rcu_flavor
);
565 #include "urcu-call-rcu-impl.h"
566 #include "urcu-defer-impl.h"