X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=include%2Furcu%2Fstatic%2Flfstack.h;h=d7e70d4966d6bab1f5f040c634a025c84f7baa99;hb=HEAD;hp=a05acb4904d56308caadf3b7cc030ee81ea3ce94;hpb=2875743718fb23293190b5edd0f2b530977270b1;p=urcu.git diff --git a/include/urcu/static/lfstack.h b/include/urcu/static/lfstack.h index a05acb4..d7e70d4 100644 --- a/include/urcu/static/lfstack.h +++ b/include/urcu/static/lfstack.h @@ -1,29 +1,15 @@ +// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers +// +// SPDX-License-Identifier: LGPL-2.1-or-later + #ifndef _URCU_STATIC_LFSTACK_H #define _URCU_STATIC_LFSTACK_H /* - * urcu/static/lfstack.h - * * Userspace RCU library - Lock-Free Stack * - * Copyright 2010-2012 - Mathieu Desnoyers - * * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for * linking dynamically with the userspace rcu library. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include @@ -114,7 +100,7 @@ bool ___cds_lfs_empty_head(struct cds_lfs_head *head) static inline bool _cds_lfs_empty(cds_lfs_stack_ptr_t s) { - return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head)); + return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED)); } /* @@ -122,6 +108,8 @@ bool _cds_lfs_empty(cds_lfs_stack_ptr_t s) * * Does not require any synchronization with other push nor pop. * + * Operations before push are consistent when observed after associated pop. + * * Lock-free stack push is not subject to ABA problem, so no need to * take the RCU read-side lock. Even if "head" changes between two * uatomic_cmpxchg() invocations here (being popped, and then pushed @@ -167,7 +155,9 @@ bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s, * uatomic_cmpxchg() implicit memory barrier orders earlier * stores to node before publication. */ - head = uatomic_cmpxchg(&s->head, old_head, new_head); + cmm_emit_legacy_smp_mb(); + head = uatomic_cmpxchg_mo(&s->head, old_head, new_head, + CMM_SEQ_CST, CMM_SEQ_CST); if (old_head == head) break; } @@ -179,6 +169,8 @@ bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s, * * Returns NULL if stack is empty. * + * Operations after pop are consistent when observed before associated push. + * * __cds_lfs_pop needs to be synchronized using one of the following * techniques: * @@ -203,7 +195,7 @@ struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s) struct cds_lfs_head *head, *next_head; struct cds_lfs_node *next; - head = _CMM_LOAD_SHARED(s->head); + head = uatomic_load(&s->head, CMM_CONSUME); if (___cds_lfs_empty_head(head)) return NULL; /* Empty stack */ @@ -212,12 +204,14 @@ struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s) * memory barrier before uatomic_cmpxchg() in * cds_lfs_push. */ - cmm_smp_read_barrier_depends(); - next = _CMM_LOAD_SHARED(head->node.next); + next = uatomic_load(&head->node.next, CMM_RELAXED); next_head = caa_container_of(next, struct cds_lfs_head, node); - if (uatomic_cmpxchg(&s->head, head, next_head) == head) + if (uatomic_cmpxchg_mo(&s->head, head, next_head, + CMM_SEQ_CST, CMM_SEQ_CST) == head){ + cmm_emit_legacy_smp_mb(); return &head->node; + } /* busy-loop if head changed under us */ } } @@ -245,6 +239,7 @@ static inline struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s) { struct __cds_lfs_stack *s = u_s._s; + struct cds_lfs_head *head; /* * Implicit memory barrier after uatomic_xchg() matches implicit @@ -256,7 +251,9 @@ struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s) * taking care to order writes to each node prior to the full * memory barrier after this uatomic_xchg(). */ - return uatomic_xchg(&s->head, NULL); + head = uatomic_xchg_mo(&s->head, NULL, CMM_SEQ_CST); + cmm_emit_legacy_smp_mb(); + return head; } /*