1 #ifndef _URCU_WAITQUEUE_LIFO_H
2 #define _URCU_WAITQUEUE_LIFO_H
5 * urcu/waitqueue-lifo.h
7 * Userspace RCU library - wait queue scheme with LIFO semantic
9 * Copyright (c) 2012-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <urcu/uatomic.h>
27 #include <urcu/lfstack.h>
28 #include <urcu/futex.h>
31 * Number of busy-loop attempts before waiting on futex for grace period
34 #define URCU_WAIT_ATTEMPTS 1000
36 enum urcu_wait_state
{
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING
= 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP
= (1 << 0),
41 URCU_WAIT_RUNNING
= (1 << 1),
42 URCU_WAIT_TEARDOWN
= (1 << 2),
45 struct urcu_wait_node
{
46 struct cds_lfs_node node
;
47 int32_t state
; /* enum urcu_wait_state */
50 #define URCU_WAIT_NODE_INIT(name, _state) \
53 #define DEFINE_URCU_WAIT_NODE(name, state) \
54 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
56 #define DECLARE_URCU_WAIT_NODE(name) \
57 struct urcu_wait_node name
59 struct urcu_wait_queue
{
60 struct __cds_lfs_stack stack
;
63 #define URCU_WAIT_QUEUE_HEAD_INIT(name) \
64 { .stack.head = CDS_LFS_END, }
66 #define DECLARE_URCU_WAIT_QUEUE(name) \
67 struct urcu_wait_queue name
69 #define DEFINE_URCU_WAIT_QUEUE(name) \
70 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
73 void urcu_wait_queue_init(struct urcu_wait_queue
*queue
)
75 __cds_lfs_init(&queue
->stack
);
79 struct cds_lfs_head
*head
;
83 * Add ourself atomically to a wait queue. Return 0 if queue was
84 * previously empty, else return 1.
85 * A full memory barrier is issued before being added to the wait queue.
88 bool urcu_wait_add(struct urcu_wait_queue
*queue
,
89 struct urcu_wait_node
*node
)
91 return cds_lfs_push(&queue
->stack
, &node
->node
);
95 * Atomically move all waiters from wait queue into our local struct
99 void urcu_move_waiters(struct urcu_waiters
*waiters
,
100 struct urcu_wait_queue
*queue
)
102 waiters
->head
= __cds_lfs_pop_all(&queue
->stack
);
106 void urcu_wait_set_state(struct urcu_wait_node
*node
,
107 enum urcu_wait_state state
)
113 void urcu_wait_or_state(struct urcu_wait_node
*node
,
114 enum urcu_wait_state state
)
116 uatomic_or(&node
->state
, state
);
120 void urcu_wait_node_init(struct urcu_wait_node
*node
,
121 enum urcu_wait_state state
)
123 urcu_wait_set_state(node
, state
);
124 cds_lfs_node_init(&node
->node
);
128 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
129 * throughout its execution. In this scheme, the waiter owns the node
130 * memory, and we only allow it to free this memory when it receives the
131 * URCU_WAIT_TEARDOWN flag.
132 * Return true if wakeup is performed, false if thread was already
136 bool urcu_adaptative_wake_up(struct urcu_wait_node
*wait
)
138 bool wakeup_performed
= false;
142 * "or" of WAKEUP flag rather than "set" is useful for multiple
143 * concurrent wakeup sources. Note that "WAIT_TEARDOWN" becomes
144 * useless when we use multiple wakeup sources: lifetime of the
145 * "value" should then be handled by the caller.
147 uatomic_or(&wait
->state
, URCU_WAIT_WAKEUP
);
148 if (!(uatomic_read(&wait
->state
) & URCU_WAIT_RUNNING
)) {
149 futex_noasync(&wait
->state
, FUTEX_WAKE
, 1, NULL
, NULL
, 0);
150 wakeup_performed
= true;
152 /* Allow teardown of struct urcu_wait memory. */
153 uatomic_or(&wait
->state
, URCU_WAIT_TEARDOWN
);
154 return wakeup_performed
;
158 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
159 * memory to waker thread.
162 void urcu_adaptative_busy_wait(struct urcu_wait_node
*wait
)
166 /* Load and test condition before read state */
168 for (i
= 0; i
< URCU_WAIT_ATTEMPTS
; i
++) {
169 if (uatomic_read(&wait
->state
) != URCU_WAIT_WAITING
)
170 goto skip_futex_wait
;
173 futex_noasync(&wait
->state
, FUTEX_WAIT
,
174 URCU_WAIT_WAITING
, NULL
, NULL
, 0);
177 /* Tell waker thread than we are running. */
178 uatomic_or(&wait
->state
, URCU_WAIT_RUNNING
);
181 * Wait until waker thread lets us know it's ok to tear down
182 * memory allocated for struct urcu_wait.
184 for (i
= 0; i
< URCU_WAIT_ATTEMPTS
; i
++) {
185 if (uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
)
189 while (!(uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
))
191 assert(uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
);
195 * Need mutual exclusion against other wakeup and move waiters
196 * operations. It is provided by the caller.
199 int urcu_dequeue_wake_single(struct urcu_wait_queue
*queue
)
201 struct cds_lfs_node
*node
;
202 struct urcu_wait_node
*wait_node
;
205 node
= __cds_lfs_pop(&queue
->stack
);
208 wait_node
= caa_container_of(node
, struct urcu_wait_node
, node
);
209 CMM_STORE_SHARED(wait_node
->node
.next
, NULL
);
210 /* Don't wake already running threads */
211 if (!(wait_node
->state
& URCU_WAIT_RUNNING
))
212 ret
= urcu_adaptative_wake_up(wait_node
);
217 * Need mutual exclusion against other wakeup and move waiters
218 * operations. It is provided by the caller.
221 int urcu_dequeue_wake_n(struct urcu_wait_queue
*queue
, int n
)
228 ret
= urcu_dequeue_wake_single(queue
);
240 int urcu_wake_all_waiters(struct urcu_waiters
*waiters
)
242 struct cds_lfs_node
*iter
, *iter_n
;
245 /* Wake all waiters in our stack head */
246 cds_lfs_for_each_safe(waiters
->head
, iter
, iter_n
) {
247 struct urcu_wait_node
*wait_node
=
248 caa_container_of(iter
, struct urcu_wait_node
, node
);
250 CMM_STORE_SHARED(wait_node
->node
.next
, NULL
);
251 /* Don't wake already running threads */
252 if (wait_node
->state
& URCU_WAIT_RUNNING
)
254 if (urcu_adaptative_wake_up(wait_node
))
260 #endif /* _URCU_WAITQUEUE_LIFO_H */
This page took 0.050104 seconds and 4 git commands to generate.