1 #ifndef _URCU_WAITQUEUE_LIFO_H
2 #define _URCU_WAITQUEUE_LIFO_H
5 * urcu/waitqueue-lifo.h
7 * Userspace RCU library - wait queue scheme with LIFO semantic
9 * Copyright (c) 2012-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include <urcu/uatomic.h>
27 #include <urcu/lfstack.h>
28 #include <urcu/futex.h>
31 * Number of busy-loop attempts before waiting on futex for grace period
34 #define URCU_WAIT_ATTEMPTS 1000
36 enum urcu_wait_state
{
37 /* URCU_WAIT_WAITING is compared directly (futex compares it). */
38 URCU_WAIT_WAITING
= 0,
39 /* non-zero are used as masks. */
40 URCU_WAIT_WAKEUP
= (1 << 0),
41 URCU_WAIT_RUNNING
= (1 << 1),
42 URCU_WAIT_TEARDOWN
= (1 << 2),
45 struct urcu_wait_node
{
46 struct cds_lfs_node node
;
47 int32_t state
; /* enum urcu_wait_state */
51 #define URCU_WAIT_NODE_INIT(name, _state) \
54 #define DEFINE_URCU_WAIT_NODE(name, state) \
55 struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
57 #define DECLARE_URCU_WAIT_NODE(name) \
58 struct urcu_wait_node name
60 struct urcu_wait_queue
{
61 struct __cds_lfs_stack stack
;
64 #define URCU_WAIT_QUEUE_HEAD_INIT(name) \
65 { .stack.head = CDS_LFS_END, }
67 #define DECLARE_URCU_WAIT_QUEUE(name) \
68 struct urcu_wait_queue name
70 #define DEFINE_URCU_WAIT_QUEUE(name) \
71 struct urcu_wait_queue name = URCU_WAIT_QUEUE_HEAD_INIT(name)
74 void urcu_wait_queue_init(struct urcu_wait_queue
*queue
)
76 __cds_lfs_init(&queue
->stack
);
80 struct cds_lfs_head
*head
;
84 * Add ourself atomically to a wait queue. Return 0 if queue was
85 * previously empty, else return 1.
86 * A full memory barrier is issued before being added to the wait queue.
89 bool urcu_wait_add(struct urcu_wait_queue
*queue
,
90 struct urcu_wait_node
*node
)
92 cds_lfs_node_init(&node
->node
);
93 CMM_STORE_SHARED(node
->in_waitqueue
, true);
94 return cds_lfs_push(&queue
->stack
, &node
->node
);
98 * Atomically move all waiters from wait queue into our local struct
102 void urcu_move_waiters(struct urcu_waiters
*waiters
,
103 struct urcu_wait_queue
*queue
)
105 waiters
->head
= __cds_lfs_pop_all(&queue
->stack
);
109 void urcu_wait_set_state(struct urcu_wait_node
*node
,
110 enum urcu_wait_state state
)
116 void urcu_wait_or_state(struct urcu_wait_node
*node
,
117 enum urcu_wait_state state
)
119 uatomic_or(&node
->state
, state
);
123 void urcu_wait_node_init(struct urcu_wait_node
*node
,
124 enum urcu_wait_state state
)
126 urcu_wait_set_state(node
, state
);
127 cds_lfs_node_init(&node
->node
);
128 node
->in_waitqueue
= false;
132 bool urcu_in_waitqueue(struct urcu_wait_node
*node
)
134 return CMM_LOAD_SHARED(node
->in_waitqueue
);
138 * Note: urcu_adaptative_wake_up needs "value" to stay allocated
139 * throughout its execution. In this scheme, the waiter owns the node
140 * memory, and we only allow it to free this memory when it receives the
141 * URCU_WAIT_TEARDOWN flag.
142 * Return true if wakeup is performed, false if thread was already
146 bool urcu_adaptative_wake_up(struct urcu_wait_node
*wait
)
148 bool wakeup_performed
= false;
152 * "or" of WAKEUP flag rather than "set" is useful for multiple
153 * concurrent wakeup sources. Note that "WAIT_TEARDOWN" becomes
154 * useless when we use multiple wakeup sources: lifetime of the
155 * "value" should then be handled by the caller.
157 uatomic_or(&wait
->state
, URCU_WAIT_WAKEUP
);
158 if (!(uatomic_read(&wait
->state
) & URCU_WAIT_RUNNING
)) {
159 futex_noasync(&wait
->state
, FUTEX_WAKE
, 1, NULL
, NULL
, 0);
160 wakeup_performed
= true;
162 /* Allow teardown of struct urcu_wait memory. */
163 uatomic_or(&wait
->state
, URCU_WAIT_TEARDOWN
);
164 return wakeup_performed
;
168 * Caller must initialize "value" to URCU_WAIT_WAITING before passing its
169 * memory to waker thread.
172 void urcu_adaptative_busy_wait(struct urcu_wait_node
*wait
)
176 /* Load and test condition before read state */
178 for (i
= 0; i
< URCU_WAIT_ATTEMPTS
; i
++) {
179 if (uatomic_read(&wait
->state
) != URCU_WAIT_WAITING
)
180 goto skip_futex_wait
;
183 futex_noasync(&wait
->state
, FUTEX_WAIT
,
184 URCU_WAIT_WAITING
, NULL
, NULL
, 0);
187 /* Tell waker thread than we are running. */
188 uatomic_or(&wait
->state
, URCU_WAIT_RUNNING
);
191 * Wait until waker thread lets us know it's ok to tear down
192 * memory allocated for struct urcu_wait.
194 for (i
= 0; i
< URCU_WAIT_ATTEMPTS
; i
++) {
195 if (uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
)
199 while (!(uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
))
201 assert(uatomic_read(&wait
->state
) & URCU_WAIT_TEARDOWN
);
205 * Need mutual exclusion against other wakeup and move waiters
206 * operations. It is provided by the caller.
209 int urcu_dequeue_wake_single(struct urcu_wait_queue
*queue
)
211 struct cds_lfs_node
*node
;
212 struct urcu_wait_node
*wait_node
;
215 node
= __cds_lfs_pop(&queue
->stack
);
218 wait_node
= caa_container_of(node
, struct urcu_wait_node
, node
);
219 CMM_STORE_SHARED(wait_node
->in_waitqueue
, false);
220 /* Don't wake already running threads */
221 if (!(wait_node
->state
& URCU_WAIT_RUNNING
))
222 ret
= urcu_adaptative_wake_up(wait_node
);
227 * Need mutual exclusion against other wakeup and move waiters
228 * operations. It is provided by the caller.
231 int urcu_dequeue_wake_n(struct urcu_wait_queue
*queue
, int n
)
238 ret
= urcu_dequeue_wake_single(queue
);
250 int urcu_wake_all_waiters(struct urcu_waiters
*waiters
)
252 struct cds_lfs_node
*iter
, *iter_n
;
255 /* Wake all waiters in our stack head */
256 cds_lfs_for_each_safe(waiters
->head
, iter
, iter_n
) {
257 struct urcu_wait_node
*wait_node
=
258 caa_container_of(iter
, struct urcu_wait_node
, node
);
260 CMM_STORE_SHARED(wait_node
->in_waitqueue
, false);
261 /* Don't wake already running threads */
262 if (wait_node
->state
& URCU_WAIT_RUNNING
)
264 if (urcu_adaptative_wake_up(wait_node
))
270 #endif /* _URCU_WAITQUEUE_LIFO_H */
This page took 0.035489 seconds and 5 git commands to generate.