1 // SPDX-FileCopyrightText: 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 // SPDX-License-Identifier: LGPL-2.1-or-later
11 #include <urcu/urcu-mb.h> /* Memory barrier RCU flavor */
12 #include <urcu/rculist.h> /* List example */
13 #include <urcu/compiler.h> /* For CAA_ARRAY_SIZE */
16 * Example showing how to use the memory-barrier-based Userspace RCU
19 * This is a mock-up example where updates and RCU traversals are
20 * performed by the same thread to keep things simple on purpose.
23 static CDS_LIST_HEAD(mylist
);
27 struct cds_list_head node
; /* linked-list chaining */
28 struct rcu_head rcu_head
; /* for call_rcu() */
32 int add_node(uint64_t v
)
36 node
= calloc(1, sizeof(*node
));
40 cds_list_add_rcu(&node
->node
, &mylist
);
45 void rcu_free_node(struct rcu_head
*rh
)
47 struct mynode
*node
= caa_container_of(rh
, struct mynode
, rcu_head
);
54 uint64_t values
[] = { 42, 36, 24, };
57 struct mynode
*node
, *n
;
60 * Each thread need using RCU read-side need to be explicitly
63 urcu_mb_register_thread();
66 * Adding nodes to the linked-list. Safe against concurrent
67 * RCU traversals, require mutual exclusion with list updates.
69 for (i
= 0; i
< CAA_ARRAY_SIZE(values
); i
++) {
70 ret
= add_node(values
[i
]);
76 * We need to explicitly mark RCU read-side critical sections
77 * with rcu_read_lock() and rcu_read_unlock(). They can be
78 * nested. Those are no-ops for the QSBR flavor.
83 * RCU traversal of the linked list.
85 cds_list_for_each_entry_rcu(node
, &mylist
, node
) {
86 printf("Value: %" PRIu64
"\n", node
->value
);
88 urcu_mb_read_unlock();
91 * Removing nodes from linked list. Safe against concurrent RCU
92 * traversals, require mutual exclusion with list updates.
94 cds_list_for_each_entry_safe(node
, n
, &mylist
, node
) {
95 cds_list_del_rcu(&node
->node
);
97 * call_rcu() will ensure that the handler
98 * "rcu_free_node" is executed after a grace period.
99 * call_rcu() can be called from RCU read-side critical
102 urcu_mb_call_rcu(&node
->rcu_head
, rcu_free_node
);
106 * We can also wait for a quiescent state by calling
107 * synchronize_rcu() rather than using call_rcu(). It is usually
108 * a slower approach than call_rcu(), because the latter can
109 * batch work. Moreover, call_rcu() can be called from a RCU
110 * read-side critical section, but synchronize_rcu() should not.
112 urcu_mb_synchronize_rcu();
117 * Waiting for previously called call_rcu handlers to complete
118 * before program exits, or in library destructors, is a good
124 urcu_mb_unregister_thread();
This page took 0.033094 seconds and 5 git commands to generate.