X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=include%2Furcu%2Fstatic%2Flfstack.h;h=d7e70d4966d6bab1f5f040c634a025c84f7baa99;hb=HEAD;hp=b8b544feeed551121aeaf1ade5970e5eac9d0f8b;hpb=70469b43316ecc8d6053550504858ad8a8ef9b16;p=urcu.git diff --git a/include/urcu/static/lfstack.h b/include/urcu/static/lfstack.h index b8b544f..22233d8 100644 --- a/include/urcu/static/lfstack.h +++ b/include/urcu/static/lfstack.h @@ -1,34 +1,20 @@ +// SPDX-FileCopyrightText: 2010-2012 Mathieu Desnoyers +// +// SPDX-License-Identifier: LGPL-2.1-or-later + #ifndef _URCU_STATIC_LFSTACK_H #define _URCU_STATIC_LFSTACK_H /* - * urcu/static/lfstack.h - * * Userspace RCU library - Lock-Free Stack * - * Copyright 2010-2012 - Mathieu Desnoyers - * * TO BE INCLUDED ONLY IN LGPL-COMPATIBLE CODE. See urcu/lfstack.h for * linking dynamically with the userspace rcu library. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include -#include +#include #include #include @@ -76,7 +62,7 @@ void _cds_lfs_init(struct cds_lfs_stack *s) s->head = NULL; ret = pthread_mutex_init(&s->lock, NULL); - assert(!ret); + urcu_posix_assert(!ret); } /* @@ -87,7 +73,7 @@ static inline void _cds_lfs_destroy(struct cds_lfs_stack *s) { int ret = pthread_mutex_destroy(&s->lock); - assert(!ret); + urcu_posix_assert(!ret); } /* @@ -101,7 +87,7 @@ void ___cds_lfs_init(struct __cds_lfs_stack *s) } static inline -bool ___cds_lfs_empty_head(struct cds_lfs_head *head) +bool ___cds_lfs_empty_head(const struct cds_lfs_head *head) { return head == NULL; } @@ -112,9 +98,9 @@ bool ___cds_lfs_empty_head(struct cds_lfs_head *head) * No memory barrier is issued. No mutual exclusion is required. */ static inline -bool _cds_lfs_empty(cds_lfs_stack_ptr_t s) +bool _cds_lfs_empty(cds_lfs_stack_const_ptr_t s) { - return ___cds_lfs_empty_head(CMM_LOAD_SHARED(s._s->head)); + return ___cds_lfs_empty_head(uatomic_load(&s._s->head, CMM_RELAXED)); } /* @@ -122,6 +108,8 @@ bool _cds_lfs_empty(cds_lfs_stack_ptr_t s) * * Does not require any synchronization with other push nor pop. * + * Operations before push are consistent when observed after associated pop. + * * Lock-free stack push is not subject to ABA problem, so no need to * take the RCU read-side lock. Even if "head" changes between two * uatomic_cmpxchg() invocations here (being popped, and then pushed @@ -167,7 +155,9 @@ bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s, * uatomic_cmpxchg() implicit memory barrier orders earlier * stores to node before publication. */ - head = uatomic_cmpxchg(&s->head, old_head, new_head); + cmm_emit_legacy_smp_mb(); + head = uatomic_cmpxchg_mo(&s->head, old_head, new_head, + CMM_SEQ_CST, CMM_SEQ_CST); if (old_head == head) break; } @@ -179,6 +169,8 @@ bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s, * * Returns NULL if stack is empty. * + * Operations after pop are consistent when observed before associated push. + * * __cds_lfs_pop needs to be synchronized using one of the following * techniques: * @@ -203,7 +195,7 @@ struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s) struct cds_lfs_head *head, *next_head; struct cds_lfs_node *next; - head = _CMM_LOAD_SHARED(s->head); + head = uatomic_load(&s->head, CMM_CONSUME); if (___cds_lfs_empty_head(head)) return NULL; /* Empty stack */ @@ -212,12 +204,14 @@ struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s) * memory barrier before uatomic_cmpxchg() in * cds_lfs_push. */ - cmm_smp_read_barrier_depends(); - next = _CMM_LOAD_SHARED(head->node.next); + next = uatomic_load(&head->node.next, CMM_RELAXED); next_head = caa_container_of(next, struct cds_lfs_head, node); - if (uatomic_cmpxchg(&s->head, head, next_head) == head) + if (uatomic_cmpxchg_mo(&s->head, head, next_head, + CMM_SEQ_CST, CMM_SEQ_CST) == head){ + cmm_emit_legacy_smp_mb(); return &head->node; + } /* busy-loop if head changed under us */ } } @@ -245,6 +239,7 @@ static inline struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s) { struct __cds_lfs_stack *s = u_s._s; + struct cds_lfs_head *head; /* * Implicit memory barrier after uatomic_xchg() matches implicit @@ -256,7 +251,9 @@ struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s) * taking care to order writes to each node prior to the full * memory barrier after this uatomic_xchg(). */ - return uatomic_xchg(&s->head, NULL); + head = uatomic_xchg_mo(&s->head, NULL, CMM_SEQ_CST); + cmm_emit_legacy_smp_mb(); + return head; } /* @@ -267,7 +264,7 @@ static inline void _cds_lfs_pop_lock(struct cds_lfs_stack *s) int ret; ret = pthread_mutex_lock(&s->lock); - assert(!ret); + urcu_posix_assert(!ret); } /* @@ -278,7 +275,7 @@ static inline void _cds_lfs_pop_unlock(struct cds_lfs_stack *s) int ret; ret = pthread_mutex_unlock(&s->lock); - assert(!ret); + urcu_posix_assert(!ret); } /* @@ -289,9 +286,11 @@ struct cds_lfs_node * _cds_lfs_pop_blocking(struct cds_lfs_stack *s) { struct cds_lfs_node *retnode; + cds_lfs_stack_ptr_t stack; _cds_lfs_pop_lock(s); - retnode = ___cds_lfs_pop(s); + stack.s = s; + retnode = ___cds_lfs_pop(stack); _cds_lfs_pop_unlock(s); return retnode; } @@ -304,9 +303,11 @@ struct cds_lfs_head * _cds_lfs_pop_all_blocking(struct cds_lfs_stack *s) { struct cds_lfs_head *rethead; + cds_lfs_stack_ptr_t stack; _cds_lfs_pop_lock(s); - rethead = ___cds_lfs_pop_all(s); + stack.s = s; + rethead = ___cds_lfs_pop_all(stack); _cds_lfs_pop_unlock(s); return rethead; }