X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Fwfstack.h;h=e96c8876a2b916dcd5bde1e1ba2ad3f8b5ff4ec8;hp=018a1213b525d2266c6187da7b8fffd03dc94e43;hb=200d100e05ed8e10c47f971939042f2406df92ef;hpb=c97c6ce56f28f27216103386eef2f40aeb441645 diff --git a/urcu/static/wfstack.h b/urcu/static/wfstack.h index 018a121..e96c887 100644 --- a/urcu/static/wfstack.h +++ b/urcu/static/wfstack.h @@ -77,7 +77,17 @@ void _cds_wfs_node_init(struct cds_wfs_node *node) } /* - * cds_wfs_init: initialize wait-free stack. + * __cds_wfs_init: initialize wait-free stack. Don't pair with + * any destroy function. + */ +static inline void ___cds_wfs_init(struct __cds_wfs_stack *s) +{ + s->head = CDS_WFS_END; +} + +/* + * cds_wfs_init: initialize wait-free stack. Pair with + * cds_wfs_destroy(). */ static inline void _cds_wfs_init(struct cds_wfs_stack *s) @@ -89,6 +99,17 @@ void _cds_wfs_init(struct cds_wfs_stack *s) assert(!ret); } +/* + * cds_wfs_destroy: destroy wait-free stack. Pair with + * cds_wfs_init(). + */ +static inline +void _cds_wfs_destroy(struct cds_wfs_stack *s) +{ + int ret = pthread_mutex_destroy(&s->lock); + assert(!ret); +} + static inline bool ___cds_wfs_end(void *node) { return node == CDS_WFS_END; @@ -99,8 +120,10 @@ static inline bool ___cds_wfs_end(void *node) * * No memory barrier is issued. No mutual exclusion is required. */ -static inline bool _cds_wfs_empty(struct cds_wfs_stack *s) +static inline bool _cds_wfs_empty(cds_wfs_stack_ptr_t u_stack) { + struct __cds_wfs_stack *s = u_stack._s; + return ___cds_wfs_end(CMM_LOAD_SHARED(s->head)); } @@ -114,8 +137,9 @@ static inline bool _cds_wfs_empty(struct cds_wfs_stack *s) * Returns non-zero otherwise. */ static inline -int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) +int _cds_wfs_push(cds_wfs_stack_ptr_t u_stack, struct cds_wfs_node *node) { + struct __cds_wfs_stack *s = u_stack._s; struct cds_wfs_head *old_head, *new_head; assert(node->next == NULL); @@ -137,7 +161,7 @@ int _cds_wfs_push(struct cds_wfs_stack *s, struct cds_wfs_node *node) * Waiting for push to complete enqueue and return the next node. */ static inline struct cds_wfs_node * -___cds_wfs_node_sync_next(struct cds_wfs_node *node) +___cds_wfs_node_sync_next(struct cds_wfs_node *node, int blocking) { struct cds_wfs_node *next; int attempt = 0; @@ -146,8 +170,10 @@ ___cds_wfs_node_sync_next(struct cds_wfs_node *node) * Adaptative busy-looping waiting for push to complete. */ while ((next = CMM_LOAD_SHARED(node->next)) == NULL) { + if (!blocking) + return CDS_WFS_WOULDBLOCK; if (++attempt >= CDS_WFS_ADAPT_ATTEMPTS) { - poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ + (void) poll(NULL, 0, CDS_WFS_WAIT); /* Wait for 10ms */ attempt = 0; } else { caa_cpu_relax(); @@ -157,8 +183,40 @@ ___cds_wfs_node_sync_next(struct cds_wfs_node *node) return next; } +static inline +struct cds_wfs_node * +___cds_wfs_pop(cds_wfs_stack_ptr_t u_stack, int *state, int blocking) +{ + struct cds_wfs_head *head, *new_head; + struct cds_wfs_node *next; + struct __cds_wfs_stack *s = u_stack._s; + + if (state) + *state = 0; + for (;;) { + head = CMM_LOAD_SHARED(s->head); + if (___cds_wfs_end(head)) { + return NULL; + } + next = ___cds_wfs_node_sync_next(&head->node, blocking); + if (!blocking && next == CDS_WFS_WOULDBLOCK) { + return CDS_WFS_WOULDBLOCK; + } + new_head = caa_container_of(next, struct cds_wfs_head, node); + if (uatomic_cmpxchg(&s->head, head, new_head) == head) { + if (state && ___cds_wfs_end(new_head)) + *state |= CDS_WFS_STATE_LAST; + return &head->node; + } + if (!blocking) { + return CDS_WFS_WOULDBLOCK; + } + /* busy-loop if head changed under us */ + } +} + /* - * __cds_wfs_pop_blocking: pop a node from the stack. + * __cds_wfs_pop_with_state_blocking: pop a node from the stack, with state. * * Returns NULL if stack is empty. * @@ -172,24 +230,49 @@ ___cds_wfs_node_sync_next(struct cds_wfs_node *node) * __cds_wfs_pop_blocking and __cds_wfs_pop_all callers. * 3) Ensuring that only ONE thread can call __cds_wfs_pop_blocking() * and __cds_wfs_pop_all(). (multi-provider/single-consumer scheme). + * + * "state" saves state flags atomically sampled with pop operation. */ static inline struct cds_wfs_node * -___cds_wfs_pop_blocking(struct cds_wfs_stack *s) +___cds_wfs_pop_with_state_blocking(cds_wfs_stack_ptr_t u_stack, int *state) { - struct cds_wfs_head *head, *new_head; - struct cds_wfs_node *next; + return ___cds_wfs_pop(u_stack, state, 1); +} - for (;;) { - head = CMM_LOAD_SHARED(s->head); - if (___cds_wfs_end(head)) - return NULL; - next = ___cds_wfs_node_sync_next(&head->node); - new_head = caa_container_of(next, struct cds_wfs_head, node); - if (uatomic_cmpxchg(&s->head, head, new_head) == head) - return &head->node; - /* busy-loop if head changed under us */ - } +static inline +struct cds_wfs_node * +___cds_wfs_pop_blocking(cds_wfs_stack_ptr_t u_stack) +{ + return ___cds_wfs_pop_with_state_blocking(u_stack, NULL); +} + +/* + * __cds_wfs_pop_with_state_nonblocking: pop a node from the stack. + * + * Same as __cds_wfs_pop_with_state_blocking, but returns + * CDS_WFS_WOULDBLOCK if it needs to block. + * + * "state" saves state flags atomically sampled with pop operation. + */ +static inline +struct cds_wfs_node * +___cds_wfs_pop_with_state_nonblocking(cds_wfs_stack_ptr_t u_stack, int *state) +{ + return ___cds_wfs_pop(u_stack, state, 0); +} + +/* + * __cds_wfs_pop_nonblocking: pop a node from the stack. + * + * Same as __cds_wfs_pop_blocking, but returns CDS_WFS_WOULDBLOCK if + * it needs to block. + */ +static inline +struct cds_wfs_node * +___cds_wfs_pop_nonblocking(cds_wfs_stack_ptr_t u_stack) +{ + return ___cds_wfs_pop_with_state_nonblocking(u_stack, NULL); } /* @@ -211,8 +294,9 @@ ___cds_wfs_pop_blocking(struct cds_wfs_stack *s) */ static inline struct cds_wfs_head * -___cds_wfs_pop_all(struct cds_wfs_stack *s) +___cds_wfs_pop_all(cds_wfs_stack_ptr_t u_stack) { + struct __cds_wfs_stack *s = u_stack._s; struct cds_wfs_head *head; /* @@ -254,20 +338,30 @@ static inline void _cds_wfs_pop_unlock(struct cds_wfs_stack *s) } /* - * Call __cds_wfs_pop_blocking with an internal pop mutex held. + * Call __cds_wfs_pop_with_state_blocking with an internal pop mutex held. */ static inline struct cds_wfs_node * -_cds_wfs_pop_blocking(struct cds_wfs_stack *s) +_cds_wfs_pop_with_state_blocking(struct cds_wfs_stack *s, int *state) { struct cds_wfs_node *retnode; _cds_wfs_pop_lock(s); - retnode = ___cds_wfs_pop_blocking(s); + retnode = ___cds_wfs_pop_with_state_blocking(s, state); _cds_wfs_pop_unlock(s); return retnode; } +/* + * Call _cds_wfs_pop_with_state_blocking without saving any state. + */ +static inline +struct cds_wfs_node * +_cds_wfs_pop_blocking(struct cds_wfs_stack *s) +{ + return _cds_wfs_pop_with_state_blocking(s, NULL); +} + /* * Call __cds_wfs_pop_all with an internal pop mutex held. */ @@ -303,6 +397,22 @@ _cds_wfs_first(struct cds_wfs_head *head) return &head->node; } +static inline struct cds_wfs_node * +___cds_wfs_next(struct cds_wfs_node *node, int blocking) +{ + struct cds_wfs_node *next; + + next = ___cds_wfs_node_sync_next(node, blocking); + /* + * CDS_WFS_WOULDBLOCK != CSD_WFS_END, so we can check for end + * even if ___cds_wfs_node_sync_next returns CDS_WFS_WOULDBLOCK, + * and still return CDS_WFS_WOULDBLOCK. + */ + if (___cds_wfs_end(next)) + return NULL; + return next; +} + /* * cds_wfs_next_blocking: get next node of a popped stack. * @@ -319,12 +429,20 @@ _cds_wfs_first(struct cds_wfs_head *head) static inline struct cds_wfs_node * _cds_wfs_next_blocking(struct cds_wfs_node *node) { - struct cds_wfs_node *next; + return ___cds_wfs_next(node, 1); +} - next = ___cds_wfs_node_sync_next(node); - if (___cds_wfs_end(next)) - return NULL; - return next; + +/* + * cds_wfs_next_nonblocking: get next node of a popped stack. + * + * Same as cds_wfs_next_blocking, but returns CDS_WFS_WOULDBLOCK if it + * needs to block. + */ +static inline struct cds_wfs_node * +_cds_wfs_next_nonblocking(struct cds_wfs_node *node) +{ + return ___cds_wfs_next(node, 0); } #ifdef __cplusplus