uatomic/x86: Remove redundant memory barriers
[urcu.git] / doc / examples / urcu-flavors / membarrier.c
CommitLineData
1c87adb3
MJ
1// SPDX-FileCopyrightText: 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
56e47f8d
MD
4
5#include <unistd.h>
6#include <stdlib.h>
7#include <stdio.h>
8#include <stdint.h>
9#include <inttypes.h>
10
b9050d91 11#include <urcu/urcu-memb.h> /* sys_membarrier() RCU flavor */
56e47f8d
MD
12#include <urcu/rculist.h> /* List example */
13#include <urcu/compiler.h> /* For CAA_ARRAY_SIZE */
14
15/*
16 * Example showing how to use the sys_membarrier()-based Userspace RCU
17 * flavor.
18 *
19 * This is a mock-up example where updates and RCU traversals are
20 * performed by the same thread to keep things simple on purpose.
21 */
22
23static CDS_LIST_HEAD(mylist);
24
25struct mynode {
26 uint64_t value;
27 struct cds_list_head node; /* linked-list chaining */
28 struct rcu_head rcu_head; /* for call_rcu() */
29};
30
31static
32int add_node(uint64_t v)
33{
34 struct mynode *node;
35
81270292 36 node = calloc(1, sizeof(*node));
56e47f8d
MD
37 if (!node)
38 return -1;
39 node->value = v;
40 cds_list_add_rcu(&node->node, &mylist);
41 return 0;
42}
43
44static
45void rcu_free_node(struct rcu_head *rh)
46{
47 struct mynode *node = caa_container_of(rh, struct mynode, rcu_head);
48
49 free(node);
50}
51
70469b43 52int main(void)
56e47f8d
MD
53{
54 uint64_t values[] = { 42, 36, 24, };
55 unsigned int i;
56 int ret;
57 struct mynode *node, *n;
58
59 /*
60 * Each thread need using RCU read-side need to be explicitly
61 * registered.
62 */
b9050d91 63 urcu_memb_register_thread();
56e47f8d
MD
64
65 /*
66 * Adding nodes to the linked-list. Safe against concurrent
67 * RCU traversals, require mutual exclusion with list updates.
68 */
69 for (i = 0; i < CAA_ARRAY_SIZE(values); i++) {
70 ret = add_node(values[i]);
71 if (ret)
72 goto end;
73 }
74
75 /*
76 * We need to explicitly mark RCU read-side critical sections
77 * with rcu_read_lock() and rcu_read_unlock(). They can be
78 * nested. Those are no-ops for the QSBR flavor.
79 */
b9050d91 80 urcu_memb_read_lock();
56e47f8d
MD
81
82 /*
83 * RCU traversal of the linked list.
84 */
85 cds_list_for_each_entry_rcu(node, &mylist, node) {
86 printf("Value: %" PRIu64 "\n", node->value);
87 }
b9050d91 88 urcu_memb_read_unlock();
56e47f8d
MD
89
90 /*
91 * Removing nodes from linked list. Safe against concurrent RCU
92 * traversals, require mutual exclusion with list updates.
93 */
94 cds_list_for_each_entry_safe(node, n, &mylist, node) {
95 cds_list_del_rcu(&node->node);
96 /*
97 * call_rcu() will ensure that the handler
98 * "rcu_free_node" is executed after a grace period.
99 * call_rcu() can be called from RCU read-side critical
100 * sections.
101 */
b9050d91 102 urcu_memb_call_rcu(&node->rcu_head, rcu_free_node);
56e47f8d
MD
103 }
104
d7818a6f
MD
105 /*
106 * We can also wait for a quiescent state by calling
107 * synchronize_rcu() rather than using call_rcu(). It is usually
108 * a slower approach than call_rcu(), because the latter can
109 * batch work. Moreover, call_rcu() can be called from a RCU
110 * read-side critical section, but synchronize_rcu() should not.
111 */
b9050d91 112 urcu_memb_synchronize_rcu();
d7818a6f 113
56e47f8d
MD
114 sleep(1);
115
116 /*
117 * Waiting for previously called call_rcu handlers to complete
118 * before program exits, or in library destructors, is a good
119 * practice.
120 */
b9050d91 121 urcu_memb_barrier();
56e47f8d
MD
122
123end:
b9050d91 124 urcu_memb_unregister_thread();
56e47f8d
MD
125 return ret;
126}
This page took 0.041279 seconds and 4 git commands to generate.