| 1 | #ifndef _URCU_WAIT_H |
| 2 | #define _URCU_WAIT_H |
| 3 | |
| 4 | /* |
| 5 | * urcu-wait.h |
| 6 | * |
| 7 | * Userspace RCU library wait/wakeup management |
| 8 | * |
| 9 | * Copyright (c) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 10 | * |
| 11 | * This library is free software; you can redistribute it and/or |
| 12 | * modify it under the terms of the GNU Lesser General Public |
| 13 | * License as published by the Free Software Foundation; either |
| 14 | * version 2.1 of the License, or (at your option) any later version. |
| 15 | * |
| 16 | * This library is distributed in the hope that it will be useful, |
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * Lesser General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU Lesser General Public |
| 22 | * License along with this library; if not, write to the Free Software |
| 23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 24 | */ |
| 25 | |
| 26 | #include <urcu/uatomic.h> |
| 27 | #include <urcu/wfstack.h> |
| 28 | |
| 29 | /* |
| 30 | * Number of busy-loop attempts before waiting on futex for grace period |
| 31 | * batching. |
| 32 | */ |
| 33 | #define URCU_WAIT_ATTEMPTS 1000 |
| 34 | |
| 35 | enum urcu_wait_state { |
| 36 | /* URCU_WAIT_WAITING is compared directly (futex compares it). */ |
| 37 | URCU_WAIT_WAITING = 0, |
| 38 | /* non-zero are used as masks. */ |
| 39 | URCU_WAIT_WAKEUP = (1 << 0), |
| 40 | URCU_WAIT_RUNNING = (1 << 1), |
| 41 | URCU_WAIT_TEARDOWN = (1 << 2), |
| 42 | }; |
| 43 | |
| 44 | struct urcu_wait_node { |
| 45 | struct cds_wfs_node node; |
| 46 | int32_t state; /* enum urcu_wait_state */ |
| 47 | }; |
| 48 | |
| 49 | #define URCU_WAIT_NODE_INIT(name, _state) \ |
| 50 | { .state = _state } |
| 51 | |
| 52 | #define DEFINE_URCU_WAIT_NODE(name, state) \ |
| 53 | struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state) |
| 54 | |
| 55 | #define DECLARE_URCU_WAIT_NODE(name) \ |
| 56 | struct urcu_wait_node name |
| 57 | |
| 58 | struct urcu_wait_queue { |
| 59 | struct cds_wfs_stack stack; |
| 60 | }; |
| 61 | |
| 62 | #define URCU_WAIT_QUEUE_HEAD_INIT(name) \ |
| 63 | { .stack.head = CDS_WFS_END, .stack.lock = PTHREAD_MUTEX_INITIALIZER } |
| 64 | |
| 65 | #define DECLARE_URCU_WAIT_QUEUE(name) \ |
| 66 | struct urcu_wait_queue name |
| 67 | |
| 68 | #define DEFINE_URCU_WAIT_QUEUE(name) \ |
| 69 | struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name) |
| 70 | |
| 71 | struct urcu_waiters { |
| 72 | struct cds_wfs_head *head; |
| 73 | }; |
| 74 | |
| 75 | /* |
| 76 | * Add ourself atomically to a wait queue. Return 0 if queue was |
| 77 | * previously empty, else return 1. |
| 78 | * A full memory barrier is issued before being added to the wait queue. |
| 79 | */ |
| 80 | static inline |
| 81 | bool urcu_wait_add(struct urcu_wait_queue *queue, |
| 82 | struct urcu_wait_node *node) |
| 83 | { |
| 84 | return cds_wfs_push(&queue->stack, &node->node); |
| 85 | } |
| 86 | |
| 87 | /* |
| 88 | * Atomically move all waiters from wait queue into our local struct |
| 89 | * urcu_waiters. |
| 90 | */ |
| 91 | static inline |
| 92 | void urcu_move_waiters(struct urcu_waiters *waiters, |
| 93 | struct urcu_wait_queue *queue) |
| 94 | { |
| 95 | waiters->head = __cds_wfs_pop_all(&queue->stack); |
| 96 | } |
| 97 | |
| 98 | static inline |
| 99 | void urcu_wait_set_state(struct urcu_wait_node *node, |
| 100 | enum urcu_wait_state state) |
| 101 | { |
| 102 | node->state = state; |
| 103 | } |
| 104 | |
| 105 | static inline |
| 106 | void urcu_wait_node_init(struct urcu_wait_node *node, |
| 107 | enum urcu_wait_state state) |
| 108 | { |
| 109 | urcu_wait_set_state(node, state); |
| 110 | cds_wfs_node_init(&node->node); |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * Note: urcu_adaptative_wake_up needs "value" to stay allocated |
| 115 | * throughout its execution. In this scheme, the waiter owns the node |
| 116 | * memory, and we only allow it to free this memory when it receives the |
| 117 | * URCU_WAIT_TEARDOWN flag. |
| 118 | */ |
| 119 | static inline |
| 120 | void urcu_adaptative_wake_up(struct urcu_wait_node *wait) |
| 121 | { |
| 122 | cmm_smp_mb(); |
| 123 | assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING); |
| 124 | uatomic_set(&wait->state, URCU_WAIT_WAKEUP); |
| 125 | if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) |
| 126 | futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0); |
| 127 | /* Allow teardown of struct urcu_wait memory. */ |
| 128 | uatomic_or(&wait->state, URCU_WAIT_TEARDOWN); |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Caller must initialize "value" to URCU_WAIT_WAITING before passing its |
| 133 | * memory to waker thread. |
| 134 | */ |
| 135 | static inline |
| 136 | void urcu_adaptative_busy_wait(struct urcu_wait_node *wait) |
| 137 | { |
| 138 | unsigned int i; |
| 139 | |
| 140 | /* Load and test condition before read state */ |
| 141 | cmm_smp_rmb(); |
| 142 | for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) { |
| 143 | if (uatomic_read(&wait->state) != URCU_WAIT_WAITING) |
| 144 | goto skip_futex_wait; |
| 145 | caa_cpu_relax(); |
| 146 | } |
| 147 | futex_noasync(&wait->state, FUTEX_WAIT, |
| 148 | URCU_WAIT_WAITING, NULL, NULL, 0); |
| 149 | skip_futex_wait: |
| 150 | |
| 151 | /* Tell waker thread than we are running. */ |
| 152 | uatomic_or(&wait->state, URCU_WAIT_RUNNING); |
| 153 | |
| 154 | /* |
| 155 | * Wait until waker thread lets us know it's ok to tear down |
| 156 | * memory allocated for struct urcu_wait. |
| 157 | */ |
| 158 | for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) { |
| 159 | if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN) |
| 160 | break; |
| 161 | caa_cpu_relax(); |
| 162 | } |
| 163 | while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)) |
| 164 | poll(NULL, 0, 10); |
| 165 | assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN); |
| 166 | } |
| 167 | |
| 168 | static inline |
| 169 | void urcu_wake_all_waiters(struct urcu_waiters *waiters) |
| 170 | { |
| 171 | struct cds_wfs_node *iter, *iter_n; |
| 172 | |
| 173 | /* Wake all waiters in our stack head */ |
| 174 | cds_wfs_for_each_blocking_safe(waiters->head, iter, iter_n) { |
| 175 | struct urcu_wait_node *wait_node = |
| 176 | caa_container_of(iter, struct urcu_wait_node, node); |
| 177 | |
| 178 | /* Don't wake already running threads */ |
| 179 | if (wait_node->state & URCU_WAIT_RUNNING) |
| 180 | continue; |
| 181 | urcu_adaptative_wake_up(wait_node); |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | #endif /* _URCU_WAIT_H */ |