fix: handle EINTR correctly in get_cpu_mask_from_sysfs
[urcu.git] / src / urcu-wait.h
index 93e8e516d868e7bba5b481416d3095b3154fc3fb..d77282bd55defc12e9dc78a797ab80c54777829b 100644 (file)
@@ -1,26 +1,12 @@
+// SPDX-FileCopyrightText: 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
 #ifndef _URCU_WAIT_H
 #define _URCU_WAIT_H
 
 /*
- * urcu-wait.h
- *
  * Userspace RCU library wait/wakeup management
- *
- * Copyright (c) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
 #include <urcu/assert.h>
@@ -50,7 +36,7 @@ struct urcu_wait_node {
 };
 
 #define URCU_WAIT_NODE_INIT(name, _state)              \
-       { .state = _state }
+       { .node = { .next = NULL }, .state = _state }
 
 #define DEFINE_URCU_WAIT_NODE(name, state)             \
        struct urcu_wait_node name = URCU_WAIT_NODE_INIT(name, state)
@@ -127,16 +113,15 @@ void urcu_wait_node_init(struct urcu_wait_node *node,
 static inline
 void urcu_adaptative_wake_up(struct urcu_wait_node *wait)
 {
-       cmm_smp_mb();
        urcu_posix_assert(uatomic_read(&wait->state) == URCU_WAIT_WAITING);
-       uatomic_set(&wait->state, URCU_WAIT_WAKEUP);
+       uatomic_store(&wait->state, URCU_WAIT_WAKEUP, CMM_RELEASE);
        if (!(uatomic_read(&wait->state) & URCU_WAIT_RUNNING)) {
                if (futex_noasync(&wait->state, FUTEX_WAKE, 1,
                                NULL, NULL, 0) < 0)
                        urcu_die(errno);
        }
        /* Allow teardown of struct urcu_wait memory. */
-       uatomic_or(&wait->state, URCU_WAIT_TEARDOWN);
+       uatomic_or_mo(&wait->state, URCU_WAIT_TEARDOWN, CMM_RELEASE);
 }
 
 /*
@@ -151,11 +136,11 @@ void urcu_adaptative_busy_wait(struct urcu_wait_node *wait)
        /* Load and test condition before read state */
        cmm_smp_rmb();
        for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
-               if (uatomic_read(&wait->state) != URCU_WAIT_WAITING)
+               if (uatomic_load(&wait->state, CMM_ACQUIRE) != URCU_WAIT_WAITING)
                        goto skip_futex_wait;
                caa_cpu_relax();
        }
-       while (uatomic_read(&wait->state) == URCU_WAIT_WAITING) {
+       while (uatomic_load(&wait->state, CMM_ACQUIRE) == URCU_WAIT_WAITING) {
                if (!futex_noasync(&wait->state, FUTEX_WAIT, URCU_WAIT_WAITING, NULL, NULL, 0)) {
                        /*
                         * Prior queued wakeups queued by unrelated code
@@ -190,11 +175,11 @@ skip_futex_wait:
         * memory allocated for struct urcu_wait.
         */
        for (i = 0; i < URCU_WAIT_ATTEMPTS; i++) {
-               if (uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN)
+               if (uatomic_load(&wait->state, CMM_RELAXED) & URCU_WAIT_TEARDOWN)
                        break;
                caa_cpu_relax();
        }
-       while (!(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN))
+       while (!(uatomic_load(&wait->state, CMM_ACQUIRE) & URCU_WAIT_TEARDOWN))
                poll(NULL, 0, 10);
        urcu_posix_assert(uatomic_read(&wait->state) & URCU_WAIT_TEARDOWN);
 }
@@ -210,7 +195,7 @@ void urcu_wake_all_waiters(struct urcu_waiters *waiters)
                        caa_container_of(iter, struct urcu_wait_node, node);
 
                /* Don't wake already running threads */
-               if (wait_node->state & URCU_WAIT_RUNNING)
+               if (uatomic_load(&wait_node->state, CMM_RELAXED) & URCU_WAIT_RUNNING)
                        continue;
                urcu_adaptative_wake_up(wait_node);
        }
This page took 0.023871 seconds and 4 git commands to generate.