1 // SPDX-FileCopyrightText: 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
11 #include <urcu/urcu-signal.h> /* Signal-based RCU flavor */
12 #include <urcu/rculist.h> /* List example */
13 #include <urcu/compiler.h> /* For CAA_ARRAY_SIZE */
16 * Example showing how to use the signal-based Userspace RCU flavor.
18 * This is a mock-up example where updates and RCU traversals are
19 * performed by the same thread to keep things simple on purpose.
22 static CDS_LIST_HEAD(mylist
);
26 struct cds_list_head node
; /* linked-list chaining */
27 struct rcu_head rcu_head
; /* for call_rcu() */
31 int add_node(uint64_t v
)
35 node
= calloc(sizeof(*node
), 1);
39 cds_list_add_rcu(&node
->node
, &mylist
);
44 void rcu_free_node(struct rcu_head
*rh
)
46 struct mynode
*node
= caa_container_of(rh
, struct mynode
, rcu_head
);
53 uint64_t values
[] = { 42, 36, 24, };
56 struct mynode
*node
, *n
;
59 * Each thread need using RCU read-side need to be explicitly
62 urcu_signal_register_thread();
65 * Adding nodes to the linked-list. Safe against concurrent
66 * RCU traversals, require mutual exclusion with list updates.
68 for (i
= 0; i
< CAA_ARRAY_SIZE(values
); i
++) {
69 ret
= add_node(values
[i
]);
75 * We need to explicitly mark RCU read-side critical sections
76 * with rcu_read_lock() and rcu_read_unlock(). They can be
77 * nested. Those are no-ops for the QSBR flavor.
79 urcu_signal_read_lock();
82 * RCU traversal of the linked list.
84 cds_list_for_each_entry_rcu(node
, &mylist
, node
) {
85 printf("Value: %" PRIu64
"\n", node
->value
);
87 urcu_signal_read_unlock();
90 * Removing nodes from linked list. Safe against concurrent RCU
91 * traversals, require mutual exclusion with list updates.
93 cds_list_for_each_entry_safe(node
, n
, &mylist
, node
) {
94 cds_list_del_rcu(&node
->node
);
96 * call_rcu() will ensure that the handler
97 * "rcu_free_node" is executed after a grace period.
98 * call_rcu() can be called from RCU read-side critical
101 urcu_signal_call_rcu(&node
->rcu_head
, rcu_free_node
);
105 * We can also wait for a quiescent state by calling
106 * synchronize_rcu() rather than using call_rcu(). It is usually
107 * a slower approach than call_rcu(), because the latter can
108 * batch work. Moreover, call_rcu() can be called from a RCU
109 * read-side critical section, but synchronize_rcu() should not.
111 urcu_signal_synchronize_rcu();
116 * Waiting for previously called call_rcu handlers to complete
117 * before program exits, or in library destructors, is a good
120 urcu_signal_barrier();
123 urcu_signal_unregister_thread();
This page took 0.033864 seconds and 4 git commands to generate.