struct urcu_wait_node {
struct cds_lfs_node node;
int32_t state; /* enum urcu_wait_state */
+ int in_waitqueue;
};
#define URCU_WAIT_NODE_INIT(name, _state) \
bool urcu_wait_add(struct urcu_wait_queue *queue,
struct urcu_wait_node *node)
{
+ cds_lfs_node_init(&node->node);
+ CMM_STORE_SHARED(node->in_waitqueue, true);
return cds_lfs_push(&queue->stack, &node->node);
}
{
urcu_wait_set_state(node, state);
cds_lfs_node_init(&node->node);
+ node->in_waitqueue = false;
+}
+
+static inline
+bool urcu_in_waitqueue(struct urcu_wait_node *node)
+{
+ return CMM_LOAD_SHARED(node->in_waitqueue);
}
/*
if (!node)
return -ENOENT;
wait_node = caa_container_of(node, struct urcu_wait_node, node);
- CMM_STORE_SHARED(wait_node->node.next, NULL);
+ CMM_STORE_SHARED(wait_node->in_waitqueue, false);
/* Don't wake already running threads */
if (!(wait_node->state & URCU_WAIT_RUNNING))
ret = urcu_adaptative_wake_up(wait_node);
struct urcu_wait_node *wait_node =
caa_container_of(iter, struct urcu_wait_node, node);
- CMM_STORE_SHARED(wait_node->node.next, NULL);
+ CMM_STORE_SHARED(wait_node->in_waitqueue, false);
/* Don't wake already running threads */
if (wait_node->state & URCU_WAIT_RUNNING)
continue;