2 * SPDX-License-Identifier: LGPL-2.1-or-later
4 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
7 * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
23 #include <urcu/arch.h>
24 #include <urcu/wfcqueue.h>
25 #include <lttng/urcu/static/urcu-ust.h>
26 #include <lttng/urcu/pointer.h>
27 #include <urcu/tls-compat.h>
29 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
31 #include <lttng/urcu/urcu-ust.h>
35 #define MAP_ANONYMOUS MAP_ANON
40 void *mremap_wrapper(void *old_address
, size_t old_size
,
41 size_t new_size
, int flags
)
43 return mremap(old_address
, old_size
, new_size
, flags
);
47 #define MREMAP_MAYMOVE 1
48 #define MREMAP_FIXED 2
51 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
52 * This is not generic.
55 void *mremap_wrapper(void *old_address
__attribute__((unused
)),
56 size_t old_size
__attribute__((unused
)),
57 size_t new_size
__attribute__((unused
)),
60 assert(!(flags
& MREMAP_MAYMOVE
));
66 /* Sleep delay in ms */
67 #define RCU_SLEEP_DELAY_MS 10
68 #define INIT_READER_COUNT 8
71 * Active attempts to check for reader Q.S. before calling sleep().
73 #define RCU_QS_ACTIVE_ATTEMPTS 100
76 int lttng_ust_urcu_refcount
;
78 /* If the headers do not support membarrier system call, fall back smp_mb. */
79 #ifdef __NR_membarrier
80 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
82 # define membarrier(...) -ENOSYS
86 MEMBARRIER_CMD_QUERY
= 0,
87 MEMBARRIER_CMD_SHARED
= (1 << 0),
88 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
89 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
90 MEMBARRIER_CMD_PRIVATE_EXPEDITED
= (1 << 3),
91 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
= (1 << 4),
95 void _lttng_ust_urcu_init(void)
96 __attribute__((constructor
));
98 void lttng_ust_urcu_exit(void)
99 __attribute__((destructor
));
101 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
102 int lttng_ust_urcu_has_sys_membarrier
;
106 * rcu_gp_lock ensures mutual exclusion between threads calling
109 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
111 * rcu_registry_lock ensures mutual exclusion between threads
112 * registering and unregistering themselves to/from the registry, and
113 * with threads reading that registry from synchronize_rcu(). However,
114 * this lock is not held all the way through the completion of awaiting
115 * for the grace period. It is sporadically released between iterations
117 * rcu_registry_lock may nest inside rcu_gp_lock.
119 static pthread_mutex_t rcu_registry_lock
= PTHREAD_MUTEX_INITIALIZER
;
121 static pthread_mutex_t init_lock
= PTHREAD_MUTEX_INITIALIZER
;
122 static int initialized
;
124 static pthread_key_t lttng_ust_urcu_key
;
126 struct lttng_ust_urcu_gp lttng_ust_urcu_gp
= { .ctr
= LTTNG_UST_URCU_GP_COUNT
};
129 * Pointer to registry elements. Written to only by each individual reader. Read
130 * by both the reader and the writers.
132 DEFINE_URCU_TLS(struct lttng_ust_urcu_reader
*, lttng_ust_urcu_reader
);
134 static CDS_LIST_HEAD(registry
);
136 struct registry_chunk
{
137 size_t capacity
; /* capacity of this chunk (in elements) */
138 size_t used
; /* count of elements used */
139 struct cds_list_head node
; /* chunk_list node */
140 struct lttng_ust_urcu_reader readers
[];
143 struct registry_arena
{
144 struct cds_list_head chunk_list
;
147 static struct registry_arena registry_arena
= {
148 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
151 /* Saved fork signal mask, protected by rcu_gp_lock */
152 static sigset_t saved_fork_signal_mask
;
154 static void mutex_lock(pthread_mutex_t
*mutex
)
158 #ifndef DISTRUST_SIGNALS_EXTREME
159 ret
= pthread_mutex_lock(mutex
);
162 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
163 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
164 if (ret
!= EBUSY
&& ret
!= EINTR
)
168 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
171 static void mutex_unlock(pthread_mutex_t
*mutex
)
175 ret
= pthread_mutex_unlock(mutex
);
180 static void smp_mb_master(void)
182 if (caa_likely(lttng_ust_urcu_has_sys_membarrier
)) {
183 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED
, 0))
190 /* Get the size of a chunk's allocation from its capacity (an element count). */
191 static size_t chunk_allocation_size(size_t capacity
)
193 return (capacity
* sizeof(struct lttng_ust_urcu_reader
)) +
194 sizeof(struct registry_chunk
);
198 * Always called with rcu_registry lock held. Releases this lock between
199 * iterations and grabs it again. Holds the lock when it returns.
201 static void wait_for_readers(struct cds_list_head
*input_readers
,
202 struct cds_list_head
*cur_snap_readers
,
203 struct cds_list_head
*qsreaders
)
205 unsigned int wait_loops
= 0;
206 struct lttng_ust_urcu_reader
*index
, *tmp
;
209 * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
210 * indicate quiescence (not nested), or observe the current
214 if (wait_loops
< RCU_QS_ACTIVE_ATTEMPTS
)
217 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
218 switch (lttng_ust_urcu_reader_state(&index
->ctr
)) {
219 case LTTNG_UST_URCU_READER_ACTIVE_CURRENT
:
220 if (cur_snap_readers
) {
221 cds_list_move(&index
->node
,
226 case LTTNG_UST_URCU_READER_INACTIVE
:
227 cds_list_move(&index
->node
, qsreaders
);
229 case LTTNG_UST_URCU_READER_ACTIVE_OLD
:
231 * Old snapshot. Leaving node in
232 * input_readers will make us busy-loop
233 * until the snapshot becomes current or
234 * the reader becomes inactive.
240 if (cds_list_empty(input_readers
)) {
243 /* Temporarily unlock the registry lock. */
244 mutex_unlock(&rcu_registry_lock
);
245 if (wait_loops
>= RCU_QS_ACTIVE_ATTEMPTS
)
246 (void) poll(NULL
, 0, RCU_SLEEP_DELAY_MS
);
249 /* Re-lock the registry lock before the next loop. */
250 mutex_lock(&rcu_registry_lock
);
255 void lttng_ust_urcu_synchronize_rcu(void)
257 CDS_LIST_HEAD(cur_snap_readers
);
258 CDS_LIST_HEAD(qsreaders
);
259 sigset_t newmask
, oldmask
;
262 ret
= sigfillset(&newmask
);
264 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
267 mutex_lock(&rcu_gp_lock
);
269 mutex_lock(&rcu_registry_lock
);
271 if (cds_list_empty(®istry
))
274 /* All threads should read qparity before accessing data structure
275 * where new ptr points to. */
276 /* Write new ptr before changing the qparity */
280 * Wait for readers to observe original parity or be quiescent.
281 * wait_for_readers() can release and grab again rcu_registry_lock
284 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
287 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
288 * model easier to understand. It does not have a big performance impact
289 * anyway, given this is the write-side.
293 /* Switch parity: 0 -> 1, 1 -> 0 */
294 CMM_STORE_SHARED(lttng_ust_urcu_gp
.ctr
, lttng_ust_urcu_gp
.ctr
^ LTTNG_UST_URCU_GP_CTR_PHASE
);
297 * Must commit qparity update to memory before waiting for other parity
298 * quiescent state. Failure to do so could result in the writer waiting
299 * forever while new readers are always accessing data (no progress).
300 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
304 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
305 * model easier to understand. It does not have a big performance impact
306 * anyway, given this is the write-side.
311 * Wait for readers to observe new parity or be quiescent.
312 * wait_for_readers() can release and grab again rcu_registry_lock
315 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
318 * Put quiescent reader list back into registry.
320 cds_list_splice(&qsreaders
, ®istry
);
323 * Finish waiting for reader threads before letting the old ptr being
328 mutex_unlock(&rcu_registry_lock
);
329 mutex_unlock(&rcu_gp_lock
);
330 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
335 * library wrappers to be used by non-LGPL compatible source code.
338 void lttng_ust_urcu_read_lock(void)
340 _lttng_ust_urcu_read_lock();
343 void lttng_ust_urcu_read_unlock(void)
345 _lttng_ust_urcu_read_unlock();
348 int lttng_ust_urcu_read_ongoing(void)
350 return _lttng_ust_urcu_read_ongoing();
354 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
355 * Else, try expanding the last chunk. If this fails, allocate a new
356 * chunk twice as big as the last chunk.
357 * Memory used by chunks _never_ moves. A chunk could theoretically be
358 * freed when all "used" slots are released, but we don't do it at this
362 void expand_arena(struct registry_arena
*arena
)
364 struct registry_chunk
*new_chunk
, *last_chunk
;
365 size_t old_chunk_size_bytes
, new_chunk_size_bytes
, new_capacity
;
368 if (cds_list_empty(&arena
->chunk_list
)) {
369 new_chunk_size_bytes
= chunk_allocation_size(INIT_READER_COUNT
);
370 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
371 new_chunk_size_bytes
,
372 PROT_READ
| PROT_WRITE
,
373 MAP_ANONYMOUS
| MAP_PRIVATE
,
375 if (new_chunk
== MAP_FAILED
)
377 memset(new_chunk
, 0, new_chunk_size_bytes
);
378 new_chunk
->capacity
= INIT_READER_COUNT
;
379 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
380 return; /* We're done. */
383 /* Try expanding last chunk. */
384 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
385 struct registry_chunk
, node
);
386 old_chunk_size_bytes
= chunk_allocation_size(last_chunk
->capacity
);
387 new_capacity
= last_chunk
->capacity
<< 1;
388 new_chunk_size_bytes
= chunk_allocation_size(new_capacity
);
390 /* Don't allow memory mapping to move, just expand. */
391 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_size_bytes
,
392 new_chunk_size_bytes
, 0);
393 if (new_chunk
!= MAP_FAILED
) {
394 /* Should not have moved. */
395 assert(new_chunk
== last_chunk
);
396 memset((char *) last_chunk
+ old_chunk_size_bytes
, 0,
397 new_chunk_size_bytes
- old_chunk_size_bytes
);
398 last_chunk
->capacity
= new_capacity
;
399 return; /* We're done. */
402 /* Remap did not succeed, we need to add a new chunk. */
403 new_chunk
= (struct registry_chunk
*) mmap(NULL
,
404 new_chunk_size_bytes
,
405 PROT_READ
| PROT_WRITE
,
406 MAP_ANONYMOUS
| MAP_PRIVATE
,
408 if (new_chunk
== MAP_FAILED
)
410 memset(new_chunk
, 0, new_chunk_size_bytes
);
411 new_chunk
->capacity
= new_capacity
;
412 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
416 struct lttng_ust_urcu_reader
*arena_alloc(struct registry_arena
*arena
)
418 struct registry_chunk
*chunk
;
419 int expand_done
= 0; /* Only allow to expand once per alloc */
422 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
425 /* Skip fully used chunks. */
426 if (chunk
->used
== chunk
->capacity
) {
431 for (spot_idx
= 0; spot_idx
< chunk
->capacity
; spot_idx
++) {
432 if (!chunk
->readers
[spot_idx
].alloc
) {
433 chunk
->readers
[spot_idx
].alloc
= 1;
435 return &chunk
->readers
[spot_idx
];
449 /* Called with signals off and mutex locked */
451 void add_thread(void)
453 struct lttng_ust_urcu_reader
*rcu_reader_reg
;
456 rcu_reader_reg
= arena_alloc(®istry_arena
);
459 ret
= pthread_setspecific(lttng_ust_urcu_key
, rcu_reader_reg
);
463 /* Add to registry */
464 rcu_reader_reg
->tid
= pthread_self();
465 assert(rcu_reader_reg
->ctr
== 0);
466 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
468 * Reader threads are pointing to the reader registry. This is
469 * why its memory should never be relocated.
471 URCU_TLS(lttng_ust_urcu_reader
) = rcu_reader_reg
;
474 /* Called with mutex locked */
476 void cleanup_thread(struct registry_chunk
*chunk
,
477 struct lttng_ust_urcu_reader
*rcu_reader_reg
)
479 rcu_reader_reg
->ctr
= 0;
480 cds_list_del(&rcu_reader_reg
->node
);
481 rcu_reader_reg
->tid
= 0;
482 rcu_reader_reg
->alloc
= 0;
487 struct registry_chunk
*find_chunk(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
489 struct registry_chunk
*chunk
;
491 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
492 if (rcu_reader_reg
< (struct lttng_ust_urcu_reader
*) &chunk
->readers
[0])
494 if (rcu_reader_reg
>= (struct lttng_ust_urcu_reader
*) &chunk
->readers
[chunk
->capacity
])
501 /* Called with signals off and mutex locked */
503 void remove_thread(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
505 cleanup_thread(find_chunk(rcu_reader_reg
), rcu_reader_reg
);
506 URCU_TLS(lttng_ust_urcu_reader
) = NULL
;
509 /* Disable signals, take mutex, add to registry */
510 void lttng_ust_urcu_register(void)
512 sigset_t newmask
, oldmask
;
515 ret
= sigfillset(&newmask
);
518 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
523 * Check if a signal concurrently registered our thread since
524 * the check in rcu_read_lock().
526 if (URCU_TLS(lttng_ust_urcu_reader
))
530 * Take care of early registration before lttng_ust_urcu constructor.
532 _lttng_ust_urcu_init();
534 mutex_lock(&rcu_registry_lock
);
536 mutex_unlock(&rcu_registry_lock
);
538 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
543 void lttng_ust_urcu_register_thread(void)
545 if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader
)))
546 lttng_ust_urcu_register(); /* If not yet registered. */
549 /* Disable signals, take mutex, remove from registry */
551 void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader
*rcu_reader_reg
)
553 sigset_t newmask
, oldmask
;
556 ret
= sigfillset(&newmask
);
559 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
563 mutex_lock(&rcu_registry_lock
);
564 remove_thread(rcu_reader_reg
);
565 mutex_unlock(&rcu_registry_lock
);
566 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
569 lttng_ust_urcu_exit();
573 * Remove thread from the registry when it exits, and flag it as
574 * destroyed so garbage collection can take care of it.
577 void lttng_ust_urcu_thread_exit_notifier(void *rcu_key
)
579 lttng_ust_urcu_unregister(rcu_key
);
582 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
584 void lttng_ust_urcu_sys_membarrier_status(bool available
)
591 void lttng_ust_urcu_sys_membarrier_status(bool available
)
595 lttng_ust_urcu_has_sys_membarrier
= 1;
600 void lttng_ust_urcu_sys_membarrier_init(void)
602 bool available
= false;
605 mask
= membarrier(MEMBARRIER_CMD_QUERY
, 0);
607 if (mask
& MEMBARRIER_CMD_PRIVATE_EXPEDITED
) {
608 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
, 0))
613 lttng_ust_urcu_sys_membarrier_status(available
);
617 void _lttng_ust_urcu_init(void)
619 mutex_lock(&init_lock
);
620 if (!lttng_ust_urcu_refcount
++) {
623 ret
= pthread_key_create(<tng_ust_urcu_key
,
624 lttng_ust_urcu_thread_exit_notifier
);
627 lttng_ust_urcu_sys_membarrier_init();
630 mutex_unlock(&init_lock
);
634 void lttng_ust_urcu_exit(void)
636 mutex_lock(&init_lock
);
637 if (!--lttng_ust_urcu_refcount
) {
638 struct registry_chunk
*chunk
, *tmp
;
641 cds_list_for_each_entry_safe(chunk
, tmp
,
642 ®istry_arena
.chunk_list
, node
) {
643 munmap((void *) chunk
, chunk_allocation_size(chunk
->capacity
));
645 CDS_INIT_LIST_HEAD(®istry_arena
.chunk_list
);
646 ret
= pthread_key_delete(lttng_ust_urcu_key
);
650 mutex_unlock(&init_lock
);
654 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
655 * sure we fork() don't race with a concurrent thread executing with
656 * any of those locks held. This ensures that the registry and data
657 * protected by rcu_gp_lock are in a coherent state in the child.
659 void lttng_ust_urcu_before_fork(void)
661 sigset_t newmask
, oldmask
;
664 ret
= sigfillset(&newmask
);
666 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
668 mutex_lock(&rcu_gp_lock
);
669 mutex_lock(&rcu_registry_lock
);
670 saved_fork_signal_mask
= oldmask
;
673 void lttng_ust_urcu_after_fork_parent(void)
678 oldmask
= saved_fork_signal_mask
;
679 mutex_unlock(&rcu_registry_lock
);
680 mutex_unlock(&rcu_gp_lock
);
681 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
686 * Prune all entries from registry except our own thread. Fits the Linux
687 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
690 void lttng_ust_urcu_prune_registry(void)
692 struct registry_chunk
*chunk
;
694 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
697 for (spot_idx
= 0; spot_idx
< chunk
->capacity
; spot_idx
++) {
698 struct lttng_ust_urcu_reader
*reader
= &chunk
->readers
[spot_idx
];
702 if (reader
->tid
== pthread_self())
704 cleanup_thread(chunk
, reader
);
709 void lttng_ust_urcu_after_fork_child(void)
714 lttng_ust_urcu_prune_registry();
715 oldmask
= saved_fork_signal_mask
;
716 mutex_unlock(&rcu_registry_lock
);
717 mutex_unlock(&rcu_gp_lock
);
718 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);