4 * Userspace RCU library, "bulletproof" version.
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
38 #include "urcu/arch.h"
39 #include "urcu/wfcqueue.h"
40 #include "urcu/map/urcu-bp.h"
41 #include "urcu/static/urcu-bp.h"
42 #include "urcu-pointer.h"
43 #include "urcu/tls-compat.h"
47 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
53 #define MAP_ANONYMOUS MAP_ANON
58 void *mremap_wrapper(void *old_address
, size_t old_size
,
59 size_t new_size
, int flags
)
61 return mremap(old_address
, old_size
, new_size
, flags
);
65 #define MREMAP_MAYMOVE 1
66 #define MREMAP_FIXED 2
69 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
70 * This is not generic.
73 void *mremap_wrapper(void *old_address
, size_t old_size
,
74 size_t new_size
, int flags
)
76 assert(!(flags
& MREMAP_MAYMOVE
));
82 /* Sleep delay in ms */
83 #define RCU_SLEEP_DELAY_MS 10
84 #define INIT_NR_THREADS 8
85 #define ARENA_INIT_ALLOC \
86 sizeof(struct registry_chunk) \
87 + INIT_NR_THREADS * sizeof(struct rcu_reader)
90 * Active attempts to check for reader Q.S. before calling sleep().
92 #define RCU_QS_ACTIVE_ATTEMPTS 100
97 /* If the headers do not support membarrier system call, fall back smp_mb. */
98 #ifdef __NR_membarrier
99 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
101 # define membarrier(...) -ENOSYS
104 enum membarrier_cmd
{
105 MEMBARRIER_CMD_QUERY
= 0,
106 MEMBARRIER_CMD_SHARED
= (1 << 0),
110 void __attribute__((constructor
)) rcu_bp_init(void);
112 void __attribute__((destructor
)) rcu_bp_exit(void);
114 int urcu_bp_has_sys_membarrier
;
117 * rcu_gp_lock ensures mutual exclusion between threads calling
120 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
122 * rcu_registry_lock ensures mutual exclusion between threads
123 * registering and unregistering themselves to/from the registry, and
124 * with threads reading that registry from synchronize_rcu(). However,
125 * this lock is not held all the way through the completion of awaiting
126 * for the grace period. It is sporadically released between iterations
128 * rcu_registry_lock may nest inside rcu_gp_lock.
130 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
132 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
133 static int initialized
;
135 static pthread_key_t urcu_bp_key
;
137 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
140 * Pointer to registry elements. Written to only by each individual reader. Read
141 * by both the reader and the writers.
143 DEFINE_URCU_TLS(struct rcu_reader
*, rcu_reader
);
145 static CDS_LIST_HEAD(registry
);
147 struct registry_chunk
{
148 size_t data_len
; /* data length */
149 size_t used
; /* amount of data used */
150 struct cds_list_head node
; /* chunk_list node */
154 struct registry_arena
{
155 struct cds_list_head chunk_list
;
158 static struct registry_arena registry_arena
= {
159 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
162 /* Saved fork signal mask, protected by rcu_gp_lock */
163 static sigset_t saved_fork_signal_mask
;
165 static void mutex_lock(pthread_mutex_t
*mutex
)
169 #ifndef DISTRUST_SIGNALS_EXTREME
170 ret
= pthread_mutex_lock(mutex
);
173 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
174 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
175 if (ret
!= EBUSY
&& ret
!= EINTR
)
179 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
182 static void mutex_unlock(pthread_mutex_t
*mutex
)
186 ret
= pthread_mutex_unlock(mutex
);
191 static void smp_mb_master(void)
193 if (caa_likely(urcu_bp_has_sys_membarrier
))
194 (void) membarrier(MEMBARRIER_CMD_SHARED
, 0);
200 * Always called with rcu_registry lock held. Releases this lock between
201 * iterations and grabs it again. Holds the lock when it returns.
203 static void wait_for_readers(struct cds_list_head
*input_readers
,
204 struct cds_list_head
*cur_snap_readers
,
205 struct cds_list_head
*qsreaders
)
207 unsigned int wait_loops
= 0;
208 struct rcu_reader
*index
, *tmp
;
211 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
212 * indicate quiescence (not nested), or observe the current
216 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
219 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
220 switch (rcu_reader_state(&index
->ctr
)) {
221 case RCU_READER_ACTIVE_CURRENT
:
222 if (cur_snap_readers
) {
223 cds_list_move(&index
->node
,
228 case RCU_READER_INACTIVE
:
229 cds_list_move(&index
->node
, qsreaders
);
231 case RCU_READER_ACTIVE_OLD
:
233 * Old snapshot. Leaving node in
234 * input_readers will make us busy-loop
235 * until the snapshot becomes current or
236 * the reader becomes inactive.
242 if (cds_list_empty(input_readers
)) {
245 /* Temporarily unlock the registry lock. */
246 mutex_unlock(&rcu_registry_lock
);
247 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
248 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
251 /* Re-lock the registry lock before the next loop. */
252 mutex_lock(&rcu_registry_lock
);
257 void synchronize_rcu(void)
259 CDS_LIST_HEAD(cur_snap_readers
);
260 CDS_LIST_HEAD(qsreaders
);
261 sigset_t newmask
, oldmask
;
264 ret
= sigfillset(&newmask
);
266 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
269 mutex_lock(&rcu_gp_lock
);
271 mutex_lock(&rcu_registry_lock
);
273 if (cds_list_empty(®istry
))
276 /* All threads should read qparity before accessing data structure
277 * where new ptr points to. */
278 /* Write new ptr before changing the qparity */
282 * Wait for readers to observe original parity or be quiescent.
283 * wait_for_readers() can release and grab again rcu_registry_lock
286 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
289 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
290 * model easier to understand. It does not have a big performance impact
291 * anyway, given this is the write-side.
295 /* Switch parity: 0 -> 1, 1 -> 0 */
296 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
299 * Must commit qparity update to memory before waiting for other parity
300 * quiescent state. Failure to do so could result in the writer waiting
301 * forever while new readers are always accessing data (no progress).
302 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
306 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
307 * model easier to understand. It does not have a big performance impact
308 * anyway, given this is the write-side.
313 * Wait for readers to observe new parity or be quiescent.
314 * wait_for_readers() can release and grab again rcu_registry_lock
317 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
320 * Put quiescent reader list back into registry.
322 cds_list_splice(&qsreaders
, ®istry
);
325 * Finish waiting for reader threads before letting the old ptr being
330 mutex_unlock(&rcu_registry_lock
);
331 mutex_unlock(&rcu_gp_lock
);
332 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
337 * library wrappers to be used by non-LGPL compatible source code.
340 void rcu_read_lock(void)
345 void rcu_read_unlock(void)
350 int rcu_read_ongoing(void)
352 return _rcu_read_ongoing();
356 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
357 * Else, try expanding the last chunk. If this fails, allocate a new
358 * chunk twice as big as the last chunk.
359 * Memory used by chunks _never_ moves. A chunk could theoretically be
360 * freed when all "used" slots are released, but we don't do it at this
364 void expand_arena(struct registry_arena
*arena
)
366 struct registry_chunk
*new_chunk
, *last_chunk
;
367 size_t old_chunk_len
, new_chunk_len
;
370 if (cds_list_empty(&arena
->chunk_list
)) {
371 assert(ARENA_INIT_ALLOC
>=
372 sizeof(struct registry_chunk
)
373 + sizeof(struct rcu_reader
));
374 new_chunk_len
= ARENA_INIT_ALLOC
;
375 new_chunk
= mmap(NULL
, new_chunk_len
,
376 PROT_READ
| PROT_WRITE
,
377 MAP_ANONYMOUS
| MAP_PRIVATE
,
379 if (new_chunk
== MAP_FAILED
)
381 memset(new_chunk
, 0, new_chunk_len
);
382 new_chunk
->data_len
=
383 new_chunk_len
- sizeof(struct registry_chunk
);
384 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
385 return; /* We're done. */
388 /* Try expanding last chunk. */
389 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
390 struct registry_chunk
, node
);
392 last_chunk
->data_len
+ sizeof(struct registry_chunk
);
393 new_chunk_len
= old_chunk_len
<< 1;
395 /* Don't allow memory mapping to move, just expand. */
396 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_len
,
398 if (new_chunk
!= MAP_FAILED
) {
399 /* Should not have moved. */
400 assert(new_chunk
== last_chunk
);
401 memset((char *) last_chunk
+ old_chunk_len
, 0,
402 new_chunk_len
- old_chunk_len
);
403 last_chunk
->data_len
=
404 new_chunk_len
- sizeof(struct registry_chunk
);
405 return; /* We're done. */
408 /* Remap did not succeed, we need to add a new chunk. */
409 new_chunk
= mmap(NULL
, new_chunk_len
,
410 PROT_READ
| PROT_WRITE
,
411 MAP_ANONYMOUS
| MAP_PRIVATE
,
413 if (new_chunk
== MAP_FAILED
)
415 memset(new_chunk
, 0, new_chunk_len
);
416 new_chunk
->data_len
=
417 new_chunk_len
- sizeof(struct registry_chunk
);
418 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
422 struct rcu_reader
*arena_alloc(struct registry_arena
*arena
)
424 struct registry_chunk
*chunk
;
425 struct rcu_reader
*rcu_reader_reg
;
426 int expand_done
= 0; /* Only allow to expand once per alloc */
427 size_t len
= sizeof(struct rcu_reader
);
430 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
431 if (chunk
->data_len
- chunk
->used
< len
)
434 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
435 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
437 if (!rcu_reader_reg
->alloc
) {
438 rcu_reader_reg
->alloc
= 1;
440 return rcu_reader_reg
;
454 /* Called with signals off and mutex locked */
456 void add_thread(void)
458 struct rcu_reader
*rcu_reader_reg
;
461 rcu_reader_reg
= arena_alloc(®istry_arena
);
464 ret
= pthread_setspecific(urcu_bp_key
, rcu_reader_reg
);
468 /* Add to registry */
469 rcu_reader_reg
->tid
= pthread_self();
470 assert(rcu_reader_reg
->ctr
== 0);
471 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
473 * Reader threads are pointing to the reader registry. This is
474 * why its memory should never be relocated.
476 URCU_TLS(rcu_reader
) = rcu_reader_reg
;
479 /* Called with mutex locked */
481 void cleanup_thread(struct registry_chunk
*chunk
,
482 struct rcu_reader
*rcu_reader_reg
)
484 rcu_reader_reg
->ctr
= 0;
485 cds_list_del(&rcu_reader_reg
->node
);
486 rcu_reader_reg
->tid
= 0;
487 rcu_reader_reg
->alloc
= 0;
488 chunk
->used
-= sizeof(struct rcu_reader
);
492 struct registry_chunk
*find_chunk(struct rcu_reader
*rcu_reader_reg
)
494 struct registry_chunk
*chunk
;
496 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
497 if (rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[0])
499 if (rcu_reader_reg
>= (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
])
506 /* Called with signals off and mutex locked */
508 void remove_thread(struct rcu_reader
*rcu_reader_reg
)
510 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
511 URCU_TLS(rcu_reader
) = NULL
;
514 /* Disable signals, take mutex, add to registry */
515 void rcu_bp_register(void)
517 sigset_t newmask
, oldmask
;
520 ret
= sigfillset(&newmask
);
523 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
528 * Check if a signal concurrently registered our thread since
529 * the check in rcu_read_lock().
531 if (URCU_TLS(rcu_reader
))
535 * Take care of early registration before urcu_bp constructor.
539 mutex_lock(&rcu_registry_lock
);
541 mutex_unlock(&rcu_registry_lock
);
543 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
548 /* Disable signals, take mutex, remove from registry */
550 void rcu_bp_unregister(struct rcu_reader
*rcu_reader_reg
)
552 sigset_t newmask
, oldmask
;
555 ret
= sigfillset(&newmask
);
558 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
562 mutex_lock(&rcu_registry_lock
);
563 remove_thread(rcu_reader_reg
);
564 mutex_unlock(&rcu_registry_lock
);
565 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
572 * Remove thread from the registry when it exits, and flag it as
573 * destroyed so garbage collection can take care of it.
576 void urcu_bp_thread_exit_notifier(void *rcu_key
)
578 rcu_bp_unregister(rcu_key
);
582 void rcu_bp_init(void)
584 mutex_lock(&init_lock
);
585 if (!rcu_bp_refcount
++) {
588 ret
= pthread_key_create(&urcu_bp_key
,
589 urcu_bp_thread_exit_notifier
);
592 ret
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
593 if (ret
>= 0 && (ret
& MEMBARRIER_CMD_SHARED
)) {
594 urcu_bp_has_sys_membarrier
= 1;
598 mutex_unlock(&init_lock
);
602 void rcu_bp_exit(void)
604 mutex_lock(&init_lock
);
605 if (!--rcu_bp_refcount
) {
606 struct registry_chunk
*chunk
, *tmp
;
609 cds_list_for_each_entry_safe(chunk
, tmp
,
610 ®istry_arena
.chunk_list
, node
) {
611 munmap(chunk
, chunk
->data_len
612 + sizeof(struct registry_chunk
));
614 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
615 ret
= pthread_key_delete(urcu_bp_key
);
619 mutex_unlock(&init_lock
);
623 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
624 * sure we fork() don't race with a concurrent thread executing with
625 * any of those locks held. This ensures that the registry and data
626 * protected by rcu_gp_lock are in a coherent state in the child.
628 void rcu_bp_before_fork(void)
630 sigset_t newmask
, oldmask
;
633 ret
= sigfillset(&newmask
);
635 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
637 mutex_lock(&rcu_gp_lock
);
638 mutex_lock(&rcu_registry_lock
);
639 saved_fork_signal_mask
= oldmask
;
642 void rcu_bp_after_fork_parent(void)
647 oldmask
= saved_fork_signal_mask
;
648 mutex_unlock(&rcu_registry_lock
);
649 mutex_unlock(&rcu_gp_lock
);
650 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
655 * Prune all entries from registry except our own thread. Fits the Linux
656 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
659 void urcu_bp_prune_registry(void)
661 struct registry_chunk
*chunk
;
662 struct rcu_reader
*rcu_reader_reg
;
664 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
665 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
666 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
668 if (!rcu_reader_reg
->alloc
)
670 if (rcu_reader_reg
->tid
== pthread_self())
672 cleanup_thread(chunk
, rcu_reader_reg
);
677 void rcu_bp_after_fork_child(void)
682 urcu_bp_prune_registry();
683 oldmask
= saved_fork_signal_mask
;
684 mutex_unlock(&rcu_registry_lock
);
685 mutex_unlock(&rcu_gp_lock
);
686 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
690 void *rcu_dereference_sym_bp(void *p
)
692 return _rcu_dereference(p
);
695 void *rcu_set_pointer_sym_bp(void **p
, void *v
)
702 void *rcu_xchg_pointer_sym_bp(void **p
, void *v
)
705 return uatomic_xchg(p
, v
);
708 void *rcu_cmpxchg_pointer_sym_bp(void **p
, void *old
, void *_new
)
711 return uatomic_cmpxchg(p
, old
, _new
);
714 DEFINE_RCU_FLAVOR(rcu_flavor
);
716 #include "urcu-call-rcu-impl.h"
717 #include "urcu-defer-impl.h"