projects
/
urcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Add cmm_emit_legacy_smp_mb()
[urcu.git]
/
include
/
urcu
/
static
/
lfstack.h
diff --git
a/include/urcu/static/lfstack.h
b/include/urcu/static/lfstack.h
index 75db75ea3652adc37dea3835a89c1a03d1cfdb6b..d7e70d4966d6bab1f5f040c634a025c84f7baa99 100644
(file)
--- a/
include/urcu/static/lfstack.h
+++ b/
include/urcu/static/lfstack.h
@@
-100,7
+100,7
@@
bool ___cds_lfs_empty_head(struct cds_lfs_head *head)
static inline
bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
{
static inline
bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
{
- return ___cds_lfs_empty_head(
CMM_LOAD_SHARED(s._s->head
));
+ return ___cds_lfs_empty_head(
uatomic_load(&s._s->head, CMM_RELAXED
));
}
/*
}
/*
@@
-108,6
+108,8
@@
bool _cds_lfs_empty(cds_lfs_stack_ptr_t s)
*
* Does not require any synchronization with other push nor pop.
*
*
* Does not require any synchronization with other push nor pop.
*
+ * Operations before push are consistent when observed after associated pop.
+ *
* Lock-free stack push is not subject to ABA problem, so no need to
* take the RCU read-side lock. Even if "head" changes between two
* uatomic_cmpxchg() invocations here (being popped, and then pushed
* Lock-free stack push is not subject to ABA problem, so no need to
* take the RCU read-side lock. Even if "head" changes between two
* uatomic_cmpxchg() invocations here (being popped, and then pushed
@@
-153,7
+155,9
@@
bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
* uatomic_cmpxchg() implicit memory barrier orders earlier
* stores to node before publication.
*/
* uatomic_cmpxchg() implicit memory barrier orders earlier
* stores to node before publication.
*/
- head = uatomic_cmpxchg(&s->head, old_head, new_head);
+ cmm_emit_legacy_smp_mb();
+ head = uatomic_cmpxchg_mo(&s->head, old_head, new_head,
+ CMM_SEQ_CST, CMM_SEQ_CST);
if (old_head == head)
break;
}
if (old_head == head)
break;
}
@@
-165,6
+169,8
@@
bool _cds_lfs_push(cds_lfs_stack_ptr_t u_s,
*
* Returns NULL if stack is empty.
*
*
* Returns NULL if stack is empty.
*
+ * Operations after pop are consistent when observed before associated push.
+ *
* __cds_lfs_pop needs to be synchronized using one of the following
* techniques:
*
* __cds_lfs_pop needs to be synchronized using one of the following
* techniques:
*
@@
-189,7
+195,7
@@
struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
struct cds_lfs_head *head, *next_head;
struct cds_lfs_node *next;
- head =
_CMM_LOAD_SHARED(s->head
);
+ head =
uatomic_load(&s->head, CMM_CONSUME
);
if (___cds_lfs_empty_head(head))
return NULL; /* Empty stack */
if (___cds_lfs_empty_head(head))
return NULL; /* Empty stack */
@@
-198,12
+204,14
@@
struct cds_lfs_node *___cds_lfs_pop(cds_lfs_stack_ptr_t u_s)
* memory barrier before uatomic_cmpxchg() in
* cds_lfs_push.
*/
* memory barrier before uatomic_cmpxchg() in
* cds_lfs_push.
*/
- cmm_smp_read_barrier_depends();
- next = _CMM_LOAD_SHARED(head->node.next);
+ next = uatomic_load(&head->node.next, CMM_RELAXED);
next_head = caa_container_of(next,
struct cds_lfs_head, node);
next_head = caa_container_of(next,
struct cds_lfs_head, node);
- if (uatomic_cmpxchg(&s->head, head, next_head) == head)
+ if (uatomic_cmpxchg_mo(&s->head, head, next_head,
+ CMM_SEQ_CST, CMM_SEQ_CST) == head){
+ cmm_emit_legacy_smp_mb();
return &head->node;
return &head->node;
+ }
/* busy-loop if head changed under us */
}
}
/* busy-loop if head changed under us */
}
}
@@
-231,6
+239,7
@@
static inline
struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
{
struct __cds_lfs_stack *s = u_s._s;
struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
{
struct __cds_lfs_stack *s = u_s._s;
+ struct cds_lfs_head *head;
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
/*
* Implicit memory barrier after uatomic_xchg() matches implicit
@@
-242,7
+251,9
@@
struct cds_lfs_head *___cds_lfs_pop_all(cds_lfs_stack_ptr_t u_s)
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
* taking care to order writes to each node prior to the full
* memory barrier after this uatomic_xchg().
*/
- return uatomic_xchg(&s->head, NULL);
+ head = uatomic_xchg_mo(&s->head, NULL, CMM_SEQ_CST);
+ cmm_emit_legacy_smp_mb();
+ return head;
}
/*
}
/*
This page took
0.024936 seconds
and
4
git commands to generate.