| 1 | // SPDX-FileCopyrightText: 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 2 | // |
| 3 | // SPDX-License-Identifier: LGPL-2.1-or-later |
| 4 | |
| 5 | #include <unistd.h> |
| 6 | #include <stdlib.h> |
| 7 | #include <stdio.h> |
| 8 | #include <stdint.h> |
| 9 | #include <inttypes.h> |
| 10 | |
| 11 | #include <urcu/urcu-mb.h> /* Memory barrier RCU flavor */ |
| 12 | #include <urcu/rculist.h> /* List example */ |
| 13 | #include <urcu/compiler.h> /* For CAA_ARRAY_SIZE */ |
| 14 | |
| 15 | /* |
| 16 | * Example showing how to use the memory-barrier-based Userspace RCU |
| 17 | * flavor. |
| 18 | * |
| 19 | * This is a mock-up example where updates and RCU traversals are |
| 20 | * performed by the same thread to keep things simple on purpose. |
| 21 | */ |
| 22 | |
| 23 | static CDS_LIST_HEAD(mylist); |
| 24 | |
| 25 | struct mynode { |
| 26 | uint64_t value; |
| 27 | struct cds_list_head node; /* linked-list chaining */ |
| 28 | struct rcu_head rcu_head; /* for call_rcu() */ |
| 29 | }; |
| 30 | |
| 31 | static |
| 32 | int add_node(uint64_t v) |
| 33 | { |
| 34 | struct mynode *node; |
| 35 | |
| 36 | node = calloc(1, sizeof(*node)); |
| 37 | if (!node) |
| 38 | return -1; |
| 39 | node->value = v; |
| 40 | cds_list_add_rcu(&node->node, &mylist); |
| 41 | return 0; |
| 42 | } |
| 43 | |
| 44 | static |
| 45 | void rcu_free_node(struct rcu_head *rh) |
| 46 | { |
| 47 | struct mynode *node = caa_container_of(rh, struct mynode, rcu_head); |
| 48 | |
| 49 | free(node); |
| 50 | } |
| 51 | |
| 52 | int main(void) |
| 53 | { |
| 54 | uint64_t values[] = { 42, 36, 24, }; |
| 55 | unsigned int i; |
| 56 | int ret; |
| 57 | struct mynode *node, *n; |
| 58 | |
| 59 | /* |
| 60 | * Each thread need using RCU read-side need to be explicitly |
| 61 | * registered. |
| 62 | */ |
| 63 | urcu_mb_register_thread(); |
| 64 | |
| 65 | /* |
| 66 | * Adding nodes to the linked-list. Safe against concurrent |
| 67 | * RCU traversals, require mutual exclusion with list updates. |
| 68 | */ |
| 69 | for (i = 0; i < CAA_ARRAY_SIZE(values); i++) { |
| 70 | ret = add_node(values[i]); |
| 71 | if (ret) |
| 72 | goto end; |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * We need to explicitly mark RCU read-side critical sections |
| 77 | * with rcu_read_lock() and rcu_read_unlock(). They can be |
| 78 | * nested. Those are no-ops for the QSBR flavor. |
| 79 | */ |
| 80 | urcu_mb_read_lock(); |
| 81 | |
| 82 | /* |
| 83 | * RCU traversal of the linked list. |
| 84 | */ |
| 85 | cds_list_for_each_entry_rcu(node, &mylist, node) { |
| 86 | printf("Value: %" PRIu64 "\n", node->value); |
| 87 | } |
| 88 | urcu_mb_read_unlock(); |
| 89 | |
| 90 | /* |
| 91 | * Removing nodes from linked list. Safe against concurrent RCU |
| 92 | * traversals, require mutual exclusion with list updates. |
| 93 | */ |
| 94 | cds_list_for_each_entry_safe(node, n, &mylist, node) { |
| 95 | cds_list_del_rcu(&node->node); |
| 96 | /* |
| 97 | * call_rcu() will ensure that the handler |
| 98 | * "rcu_free_node" is executed after a grace period. |
| 99 | * call_rcu() can be called from RCU read-side critical |
| 100 | * sections. |
| 101 | */ |
| 102 | urcu_mb_call_rcu(&node->rcu_head, rcu_free_node); |
| 103 | } |
| 104 | |
| 105 | /* |
| 106 | * We can also wait for a quiescent state by calling |
| 107 | * synchronize_rcu() rather than using call_rcu(). It is usually |
| 108 | * a slower approach than call_rcu(), because the latter can |
| 109 | * batch work. Moreover, call_rcu() can be called from a RCU |
| 110 | * read-side critical section, but synchronize_rcu() should not. |
| 111 | */ |
| 112 | urcu_mb_synchronize_rcu(); |
| 113 | |
| 114 | sleep(1); |
| 115 | |
| 116 | /* |
| 117 | * Waiting for previously called call_rcu handlers to complete |
| 118 | * before program exits, or in library destructors, is a good |
| 119 | * practice. |
| 120 | */ |
| 121 | urcu_mb_barrier(); |
| 122 | |
| 123 | end: |
| 124 | urcu_mb_unregister_thread(); |
| 125 | return ret; |
| 126 | } |