4 * Userspace RCU QSBR library
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
38 #include "urcu/wfcqueue.h"
39 #include "urcu/map/urcu-qsbr.h"
40 #define BUILD_QSBR_LIB
41 #include "urcu/static/urcu-qsbr.h"
42 #include "urcu-pointer.h"
43 #include "urcu/tls-compat.h"
46 #include "urcu-wait.h"
48 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
50 #include "urcu-qsbr.h"
53 void __attribute__((destructor
)) rcu_exit(void);
56 * rcu_gp_lock ensures mutual exclusion between threads calling
59 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
61 * rcu_registry_lock ensures mutual exclusion between threads
62 * registering and unregistering themselves to/from the registry, and
63 * with threads reading that registry from synchronize_rcu(). However,
64 * this lock is not held all the way through the completion of awaiting
65 * for the grace period. It is sporadically released between iterations
67 * rcu_registry_lock may nest inside rcu_gp_lock.
69 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
70 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_ONLINE
};
73 * Active attempts to check for reader Q.S. before calling futex().
75 #define RCU_QS_ACTIVE_ATTEMPTS 100
78 * Written to only by each individual reader. Read by both the reader and the
81 DEFINE_URCU_TLS(struct rcu_reader
, rcu_reader
);
83 static CDS_LIST_HEAD(registry
);
86 * Queue keeping threads awaiting to wait for a grace period. Contains
87 * struct gp_waiters_thread objects.
89 static DEFINE_URCU_WAIT_QUEUE(gp_waiters
);
91 static void mutex_lock(pthread_mutex_t
*mutex
)
95 #ifndef DISTRUST_SIGNALS_EXTREME
96 ret
= pthread_mutex_lock(mutex
);
99 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
100 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
101 if (ret
!= EBUSY
&& ret
!= EINTR
)
105 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
108 static void mutex_unlock(pthread_mutex_t
*mutex
)
112 ret
= pthread_mutex_unlock(mutex
);
118 * synchronize_rcu() waiting. Single thread.
120 static void wait_gp(void)
122 /* Read reader_gp before read futex */
124 if (uatomic_read(&rcu_gp
.futex
) != -1)
126 while (futex_noasync(&rcu_gp
.futex
, FUTEX_WAIT
, -1,
130 /* Value already changed. */
133 /* Retry if interrupted by signal. */
134 break; /* Get out of switch. */
136 /* Unexpected error. */
143 * Always called with rcu_registry lock held. Releases this lock between
144 * iterations and grabs it again. Holds the lock when it returns.
146 static void wait_for_readers(struct cds_list_head
*input_readers
,
147 struct cds_list_head
*cur_snap_readers
,
148 struct cds_list_head
*qsreaders
)
150 unsigned int wait_loops
= 0;
151 struct rcu_reader
*index
, *tmp
;
154 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
155 * indicate quiescence (offline), or for them to observe the
156 * current rcu_gp.ctr value.
159 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
161 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
162 uatomic_set(&rcu_gp
.futex
, -1);
164 * Write futex before write waiting (the other side
165 * reads them in the opposite order).
168 cds_list_for_each_entry(index
, input_readers
, node
) {
169 _CMM_STORE_SHARED(index
->waiting
, 1);
171 /* Write futex before read reader_gp */
174 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
175 switch (rcu_reader_state(&index
->ctr
)) {
176 case RCU_READER_ACTIVE_CURRENT
:
177 if (cur_snap_readers
) {
178 cds_list_move(&index
->node
,
183 case RCU_READER_INACTIVE
:
184 cds_list_move(&index
->node
, qsreaders
);
186 case RCU_READER_ACTIVE_OLD
:
188 * Old snapshot. Leaving node in
189 * input_readers will make us busy-loop
190 * until the snapshot becomes current or
191 * the reader becomes inactive.
197 if (cds_list_empty(input_readers
)) {
198 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
199 /* Read reader_gp before write futex */
201 uatomic_set(&rcu_gp
.futex
, 0);
205 /* Temporarily unlock the registry lock. */
206 mutex_unlock(&rcu_registry_lock
);
207 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
) {
210 #ifndef HAS_INCOHERENT_CACHES
212 #else /* #ifndef HAS_INCOHERENT_CACHES */
214 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
216 /* Re-lock the registry lock before the next loop. */
217 mutex_lock(&rcu_registry_lock
);
223 * Using a two-subphases algorithm for architectures with smaller than 64-bit
224 * long-size to ensure we do not encounter an overflow bug.
227 #if (CAA_BITS_PER_LONG < 64)
228 void synchronize_rcu(void)
230 CDS_LIST_HEAD(cur_snap_readers
);
231 CDS_LIST_HEAD(qsreaders
);
232 unsigned long was_online
;
233 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
234 struct urcu_waiters waiters
;
236 was_online
= rcu_read_ongoing();
238 /* All threads should read qparity before accessing data structure
239 * where new ptr points to. In the "then" case, rcu_thread_offline
240 * includes a memory barrier.
242 * Mark the writer thread offline to make sure we don't wait for
243 * our own quiescent state. This allows using synchronize_rcu()
244 * in threads registered as readers.
247 rcu_thread_offline();
252 * Add ourself to gp_waiters queue of threads awaiting to wait
253 * for a grace period. Proceed to perform the grace period only
254 * if we are the first thread added into the queue.
256 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
257 /* Not first in queue: will be awakened by another thread. */
258 urcu_adaptative_busy_wait(&wait
);
261 /* We won't need to wake ourself up */
262 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
264 mutex_lock(&rcu_gp_lock
);
267 * Move all waiters into our local queue.
269 urcu_move_waiters(&waiters
, &gp_waiters
);
271 mutex_lock(&rcu_registry_lock
);
273 if (cds_list_empty(®istry
))
277 * Wait for readers to observe original parity or be quiescent.
278 * wait_for_readers() can release and grab again rcu_registry_lock
281 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
284 * Must finish waiting for quiescent state for original parity
285 * before committing next rcu_gp.ctr update to memory. Failure
286 * to do so could result in the writer waiting forever while new
287 * readers are always accessing data (no progress). Enforce
288 * compiler-order of load URCU_TLS(rcu_reader).ctr before store
294 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
295 * model easier to understand. It does not have a big performance impact
296 * anyway, given this is the write-side.
300 /* Switch parity: 0 -> 1, 1 -> 0 */
301 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR
);
304 * Must commit rcu_gp.ctr update to memory before waiting for
305 * quiescent state. Failure to do so could result in the writer
306 * waiting forever while new readers are always accessing data
307 * (no progress). Enforce compiler-order of store to rcu_gp.ctr
308 * before load URCU_TLS(rcu_reader).ctr.
313 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
314 * model easier to understand. It does not have a big performance impact
315 * anyway, given this is the write-side.
320 * Wait for readers to observe new parity or be quiescent.
321 * wait_for_readers() can release and grab again rcu_registry_lock
324 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
327 * Put quiescent reader list back into registry.
329 cds_list_splice(&qsreaders
, ®istry
);
331 mutex_unlock(&rcu_registry_lock
);
332 mutex_unlock(&rcu_gp_lock
);
333 urcu_wake_all_waiters(&waiters
);
336 * Finish waiting for reader threads before letting the old ptr being
344 #else /* !(CAA_BITS_PER_LONG < 64) */
345 void synchronize_rcu(void)
347 CDS_LIST_HEAD(qsreaders
);
348 unsigned long was_online
;
349 DEFINE_URCU_WAIT_NODE(wait
, URCU_WAIT_WAITING
);
350 struct urcu_waiters waiters
;
352 was_online
= rcu_read_ongoing();
355 * Mark the writer thread offline to make sure we don't wait for
356 * our own quiescent state. This allows using synchronize_rcu()
357 * in threads registered as readers.
360 rcu_thread_offline();
365 * Add ourself to gp_waiters queue of threads awaiting to wait
366 * for a grace period. Proceed to perform the grace period only
367 * if we are the first thread added into the queue.
369 if (urcu_wait_add(&gp_waiters
, &wait
) != 0) {
370 /* Not first in queue: will be awakened by another thread. */
371 urcu_adaptative_busy_wait(&wait
);
374 /* We won't need to wake ourself up */
375 urcu_wait_set_state(&wait
, URCU_WAIT_RUNNING
);
377 mutex_lock(&rcu_gp_lock
);
380 * Move all waiters into our local queue.
382 urcu_move_waiters(&waiters
, &gp_waiters
);
384 mutex_lock(&rcu_registry_lock
);
386 if (cds_list_empty(®istry
))
389 /* Increment current G.P. */
390 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
+ RCU_GP_CTR
);
393 * Must commit rcu_gp.ctr update to memory before waiting for
394 * quiescent state. Failure to do so could result in the writer
395 * waiting forever while new readers are always accessing data
396 * (no progress). Enforce compiler-order of store to rcu_gp.ctr
397 * before load URCU_TLS(rcu_reader).ctr.
402 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
403 * model easier to understand. It does not have a big performance impact
404 * anyway, given this is the write-side.
409 * Wait for readers to observe new count of be quiescent.
410 * wait_for_readers() can release and grab again rcu_registry_lock
413 wait_for_readers(®istry
, NULL
, &qsreaders
);
416 * Put quiescent reader list back into registry.
418 cds_list_splice(&qsreaders
, ®istry
);
420 mutex_unlock(&rcu_registry_lock
);
421 mutex_unlock(&rcu_gp_lock
);
422 urcu_wake_all_waiters(&waiters
);
429 #endif /* !(CAA_BITS_PER_LONG < 64) */
432 * library wrappers to be used by non-LGPL compatible source code.
435 void rcu_read_lock(void)
440 void rcu_read_unlock(void)
445 int rcu_read_ongoing(void)
447 return _rcu_read_ongoing();
450 void rcu_quiescent_state(void)
452 _rcu_quiescent_state();
455 void rcu_thread_offline(void)
457 _rcu_thread_offline();
460 void rcu_thread_online(void)
462 _rcu_thread_online();
465 void rcu_register_thread(void)
467 URCU_TLS(rcu_reader
).tid
= pthread_self();
468 assert(URCU_TLS(rcu_reader
).ctr
== 0);
470 mutex_lock(&rcu_registry_lock
);
471 assert(!URCU_TLS(rcu_reader
).registered
);
472 URCU_TLS(rcu_reader
).registered
= 1;
473 cds_list_add(&URCU_TLS(rcu_reader
).node
, ®istry
);
474 mutex_unlock(&rcu_registry_lock
);
475 _rcu_thread_online();
478 void rcu_unregister_thread(void)
481 * We have to make the thread offline otherwise we end up dealocking
482 * with a waiting writer.
484 _rcu_thread_offline();
485 assert(URCU_TLS(rcu_reader
).registered
);
486 URCU_TLS(rcu_reader
).registered
= 0;
487 mutex_lock(&rcu_registry_lock
);
488 cds_list_del(&URCU_TLS(rcu_reader
).node
);
489 mutex_unlock(&rcu_registry_lock
);
495 * Assertion disabled because call_rcu threads are now rcu
496 * readers, and left running at exit.
497 * assert(cds_list_empty(®istry));
501 DEFINE_RCU_FLAVOR(rcu_flavor
);
503 #include "urcu-call-rcu-impl.h"
504 #include "urcu-defer-impl.h"