waitqueue: add in_waitqueue field
[userspace-rcu.git] / urcu / waitqueue-lifo.h
index 4d800f027c3a093dd8ed83434c85dbaf002b89ff..96b211c68355bbd9611c4895ebaab5a295e961fb 100644 (file)
@@ -24,7 +24,7 @@
  */
 
 #include <urcu/uatomic.h>
-#include <urcu/wfstack.h>
+#include <urcu/lfstack.h>
 #include <urcu/futex.h>
 
 /*
@@ -43,8 +43,9 @@ enum urcu_wait_state {
 };
 
 struct urcu_wait_node {
-       struct cds_wfs_node node;
+       struct cds_lfs_node node;
        int32_t state;  /* enum urcu_wait_state */
+       int in_waitqueue;
 };
 
 #define URCU_WAIT_NODE_INIT(name, _state)              \
@@ -57,11 +58,11 @@ struct urcu_wait_node {
        struct urcu_wait_node name
 
 struct urcu_wait_queue {
-       struct __cds_wfs_stack stack;
+       struct __cds_lfs_stack stack;
 };
 
 #define URCU_WAIT_QUEUE_HEAD_INIT(name)                        \
-       { .stack.head = CDS_WFS_END, }
+       { .stack.head = CDS_LFS_END, }
 
 #define DECLARE_URCU_WAIT_QUEUE(name)                  \
        struct urcu_wait_queue name
@@ -72,11 +73,11 @@ struct urcu_wait_queue {
 static inline
 void urcu_wait_queue_init(struct urcu_wait_queue *queue)
 {
-       __cds_wfs_init(&queue->stack);
+       __cds_lfs_init(&queue->stack);
 }
 
 struct urcu_waiters {
-       struct cds_wfs_head *head;
+       struct cds_lfs_head *head;
 };
 
 /*
@@ -88,7 +89,9 @@ static inline
 bool urcu_wait_add(struct urcu_wait_queue *queue,
                struct urcu_wait_node *node)
 {
-       return cds_wfs_push(&queue->stack, &node->node);
+       cds_lfs_node_init(&node->node);
+       CMM_STORE_SHARED(node->in_waitqueue, true);
+       return cds_lfs_push(&queue->stack, &node->node);
 }
 
 /*
@@ -99,7 +102,7 @@ static inline
 void urcu_move_waiters(struct urcu_waiters *waiters,
                struct urcu_wait_queue *queue)
 {
-       waiters->head = __cds_wfs_pop_all(&queue->stack);
+       waiters->head = __cds_lfs_pop_all(&queue->stack);
 }
 
 static inline
@@ -121,7 +124,14 @@ void urcu_wait_node_init(struct urcu_wait_node *node,
                enum urcu_wait_state state)
 {
        urcu_wait_set_state(node, state);
-       cds_wfs_node_init(&node->node);
+       cds_lfs_node_init(&node->node);
+       node->in_waitqueue = false;
+}
+
+static inline
+bool urcu_in_waitqueue(struct urcu_wait_node *node)
+{
+       return CMM_LOAD_SHARED(node->in_waitqueue);
 }
 
 /*
@@ -129,10 +139,14 @@ void urcu_wait_node_init(struct urcu_wait_node *node,
  * throughout its execution. In this scheme, the waiter owns the node
  * memory, and we only allow it to free this memory when it receives the
  * URCU_WAIT_TEARDOWN flag.
+ * Return true if wakeup is performed, false if thread was already
+ * running.
  */
 static inline
-void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
+bool urcu_adaptative_wake_up(struct urcu_wait_node *wait)
 {
+       bool wakeup_performed = false;
+
        cmm_smp_mb();
        /*
         * "or" of WAKEUP flag rather than "set" is useful for multiple
@@ -141,10 +155,13 @@ void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
         * "value" should then be handled by the caller.
         */
        uatomic_or(&wait->state, URCU_WAIT_WAKEUP);
-       if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING))
+       if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
                futex_noasync(&wait->state, FUTEX_WAKE, 1, NULL, NULL, 0);
+               wakeup_performed = true;
+       }
        /* Allow teardown of struct urcu_wait memory. */
        uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
+       return wakeup_performed;
 }
 
 /*
@@ -191,21 +208,19 @@ skip_futex_wait:
 static inline
 int urcu_dequeue_wake_single(struct urcu_wait_queue *queue)
 {
-       struct cds_wfs_node *node;
+       struct cds_lfs_node *node;
        struct urcu_wait_node *wait_node;
-       int wakeup_done = 0;
+       int ret = 0;
 
-       node = __cds_wfs_pop_blocking(&queue->stack);
+       node = __cds_lfs_pop(&queue->stack);
        if (!node)
                return -ENOENT;
        wait_node = caa_container_of(node, struct urcu_wait_node, node);
-       CMM_STORE_SHARED(wait_node->node.next, NULL);
+       CMM_STORE_SHARED(wait_node->in_waitqueue, false);
        /* Don't wake already running threads */
-       if (!(wait_node->state & URCU_WAIT_RUNNING)) {
-               urcu_adaptative_wake_up(wait_node);
-               wakeup_done = 1;
-       }
-       return wakeup_done;
+       if (!(wait_node->state & URCU_WAIT_RUNNING))
+               ret = urcu_adaptative_wake_up(wait_node);
+       return ret;
 }
 
 /*
@@ -234,20 +249,20 @@ int urcu_dequeue_wake_n(struct urcu_wait_queue *queue, int n)
 static inline
 int urcu_wake_all_waiters(struct urcu_waiters *waiters)
 {
-       struct cds_wfs_node *iter, *iter_n;
+       struct cds_lfs_node *iter, *iter_n;
        int nr_wakeup = 0;
 
        /* Wake all waiters in our stack head */
-       cds_wfs_for_each_blocking_safe(waiters->head, iter, iter_n) {
+       cds_lfs_for_each_safe(waiters->head, iter, iter_n) {
                struct urcu_wait_node *wait_node =
                        caa_container_of(iter, struct urcu_wait_node, node);
 
-               CMM_STORE_SHARED(wait_node->node.next, NULL);
+               CMM_STORE_SHARED(wait_node->in_waitqueue, false);
                /* Don't wake already running threads */
                if (wait_node->state & URCU_WAIT_RUNNING)
                        continue;
-               urcu_adaptative_wake_up(wait_node);
-               nr_wakeup++;
+               if (urcu_adaptative_wake_up(wait_node))
+                       nr_wakeup++;
        }
        return nr_wakeup;
 }
This page took 0.028062 seconds and 4 git commands to generate.