+// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+//
+// SPDX-License-Identifier: LGPL-2.1-or-later
+
#ifndef _URCU_STATIC_WFSTACK_H
#define _URCU_STATIC_WFSTACK_H
/*
- * urcu/static/wfstack.h
- *
* Userspace RCU library - Stack with with wait-free push, blocking traversal.
*
* TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/wfstack.h for
* linking dynamically with the userspace rcu library.
- *
- * Copyright 2010-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <pthread.h>
{
struct __cds_wfs_stack *s = u_stack._s;
- return ___cds_wfs_end(CMM_LOAD_SHARED(s->head));
+ return ___cds_wfs_end(uatomic_load(&s->head, CMM_RELAXED));
}
/*
* Issues a full memory barrier before push. No mutual exclusion is
* required.
*
+ * Operations before push are consistent when observed after associated pop.
+ *
* Returns 0 if the stack was empty prior to adding the node.
* Returns non-zero otherwise.
*/
* uatomic_xchg() implicit memory barrier orders earlier stores
* to node (setting it to NULL) before publication.
*/
- old_head = uatomic_xchg(&s->head, new_head);
+ cmm_emit_legacy_smp_mb();
+ old_head = uatomic_xchg_mo(&s->head, new_head, CMM_SEQ_CST);
/*
* At this point, dequeuers see a NULL node->next, they should
* busy-wait until node->next is set to old_head.
*/
- CMM_STORE_SHARED(node->next, &old_head->node);
+ uatomic_store(&node->next, &old_head->node, CMM_RELEASE);
return !___cds_wfs_end(old_head);
}
/*
* Adaptative busy-looping waiting for push to complete.
*/
- while ((next = CMM_LOAD_SHARED(node->next)) == NULL) {
+ while ((next = uatomic_load(&node->next, CMM_CONSUME)) == NULL) {
if (!blocking)
return CDS_WFS_WOULDBLOCK;
if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) {
if (state)
*state = 0;
for (;;) {
- head = CMM_LOAD_SHARED(s->head);
+ head = uatomic_load(&s->head, CMM_CONSUME);
if (___cds_wfs_end(head)) {
return NULL;
}
return CDS_WFS_WOULDBLOCK;
}
new_head = caa_container_of(next, struct cds_wfs_head, node);
- if (uatomic_cmpxchg(&s->head, head, new_head) == head) {
+ if (uatomic_cmpxchg_mo(&s->head, head, new_head,
+ CMM_SEQ_CST, CMM_SEQ_CST) == head) {
if (state && ___cds_wfs_end(new_head))
*state |= CDS_WFS_STATE_LAST;
+ cmm_emit_legacy_smp_mb();
return &head->node;
}
if (!blocking) {
*
* Returns NULL if stack is empty.
*
+ * Operations after pop push are consistent when observed before associated push.
+ *
* __cds_wfs_pop_blocking needs to be synchronized using one of the
* following techniques:
*
/*
* __cds_wfs_pop_all: pop all nodes from a stack.
*
+ * Operations after pop push are consistent when observed before associated push.
+ *
* __cds_wfs_pop_all does not require any synchronization with other
* push, nor with other __cds_wfs_pop_all, but requires synchronization
* matching the technique used to synchronize __cds_wfs_pop_blocking:
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
- head = uatomic_xchg(&s->head, CDS_WFS_END);
+ head = uatomic_xchg_mo(&s->head, CDS_WFS_END, CMM_SEQ_CST);
+ cmm_emit_legacy_smp_mb();
if (___cds_wfs_end(head))
return NULL;
return head;