X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu%2Fstatic%2Frculfqueue.h;h=99335c41ec41d33078279e2f843431111cf307e8;hp=33e71f5c4ec9e1d9c78b848ba52537fa1fee3b6f;hb=7618919ae496bda84a2efa4f2ad0abe569892a9e;hpb=0cca1a2d1b9861591795df4e48df73ac9919d772 diff --git a/urcu/static/rculfqueue.h b/urcu/static/rculfqueue.h index 33e71f5..99335c4 100644 --- a/urcu/static/rculfqueue.h +++ b/urcu/static/rculfqueue.h @@ -59,12 +59,6 @@ struct cds_lfq_node_rcu_dummy { * grace period. */ -static inline -int is_dummy(struct cds_lfq_node_rcu *node) -{ - return ((unsigned long) node) & 0x1UL; -} - static inline struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q, struct cds_lfq_node_rcu *next) @@ -74,18 +68,13 @@ struct cds_lfq_node_rcu *make_dummy(struct cds_lfq_queue_rcu *q, dummy = malloc(sizeof(struct cds_lfq_node_rcu_dummy)); assert(dummy); dummy->parent.next = next; + dummy->parent.dummy = 1; dummy->q = q; - return (struct cds_lfq_node_rcu *) (((unsigned long) &dummy->parent) | 0x1UL); + return &dummy->parent; } static inline -struct cds_lfq_node_rcu *get_node(struct cds_lfq_node_rcu *node) -{ - return (struct cds_lfq_node_rcu *) (((unsigned long )node) & ~0x1UL); -} - -static inline -void free_dummy(struct rcu_head *head) +void free_dummy_cb(struct rcu_head *head) { struct cds_lfq_node_rcu_dummy *dummy = caa_container_of(head, struct cds_lfq_node_rcu_dummy, head); @@ -97,25 +86,33 @@ void rcu_free_dummy(struct cds_lfq_node_rcu *node) { struct cds_lfq_node_rcu_dummy *dummy; - dummy = caa_container_of(get_node(node), struct cds_lfq_node_rcu_dummy, - parent); - dummy->q->queue_call_rcu(&dummy->head, free_dummy); + assert(node->dummy); + dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); + call_rcu(&dummy->head, free_dummy_cb); +} + +static inline +void free_dummy(struct cds_lfq_node_rcu *node) +{ + struct cds_lfq_node_rcu_dummy *dummy; + + assert(node->dummy); + dummy = caa_container_of(node, struct cds_lfq_node_rcu_dummy, parent); + free(dummy); } static inline void _cds_lfq_node_init_rcu(struct cds_lfq_node_rcu *node) { node->next = NULL; + node->dummy = 0; } static inline -void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q, - void queue_call_rcu(struct rcu_head *head, - void (*func)(struct rcu_head *head))) +void _cds_lfq_init_rcu(struct cds_lfq_queue_rcu *q) { q->tail = make_dummy(q, NULL); q->head = q->tail; - q->queue_call_rcu = queue_call_rcu; } /* @@ -129,14 +126,14 @@ int _cds_lfq_destroy_rcu(struct cds_lfq_queue_rcu *q) struct cds_lfq_node_rcu *head; head = rcu_dereference(q->head); - if (!(is_dummy(head) && get_node(head)->next == NULL)) + if (!(head->dummy && head->next == NULL)) return -EPERM; /* not empty */ - rcu_free_dummy(head); + free_dummy(head); return 0; } /* - * Should be called under rcu read lock critical section. + * Acts as a RCU reader. */ static inline void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, @@ -150,8 +147,9 @@ void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, for (;;) { struct cds_lfq_node_rcu *tail, *next; + rcu_read_lock(); tail = rcu_dereference(q->tail); - next = uatomic_cmpxchg(&get_node(tail)->next, NULL, node); + next = uatomic_cmpxchg(&tail->next, NULL, node); if (next == NULL) { /* * Tail was at the end of queue, we successfully @@ -159,6 +157,7 @@ void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, * enqueue might beat us to it, that's fine). */ (void) uatomic_cmpxchg(&q->tail, tail, node); + rcu_read_unlock(); return; } else { /* @@ -166,13 +165,24 @@ void _cds_lfq_enqueue_rcu(struct cds_lfq_queue_rcu *q, * Help moving tail further and retry. */ (void) uatomic_cmpxchg(&q->tail, tail, next); + rcu_read_unlock(); continue; } } } +static inline +void enqueue_dummy(struct cds_lfq_queue_rcu *q) +{ + struct cds_lfq_node_rcu *node; + + /* We need to reallocate to protect from ABA. */ + node = make_dummy(q, NULL); + _cds_lfq_enqueue_rcu(q, node); +} + /* - * Should be called under rcu read lock critical section. + * Acts as a RCU reader. * * The caller must wait for a grace period to pass before freeing the returned * node or modifying the cds_lfq_node_rcu structure. @@ -184,40 +194,36 @@ struct cds_lfq_node_rcu *_cds_lfq_dequeue_rcu(struct cds_lfq_queue_rcu *q) for (;;) { struct cds_lfq_node_rcu *head, *next; + rcu_read_lock(); head = rcu_dereference(q->head); - next = rcu_dereference(get_node(head)->next); - if (is_dummy(head) && next == NULL) + next = rcu_dereference(head->next); + if (head->dummy && next == NULL) { + rcu_read_unlock(); return NULL; /* empty */ + } /* * We never, ever allow dequeue to get to a state where * the queue is empty (we need at least one node in the * queue). This is ensured by checking if the head next - * is NULL and retry in that case (this means a - * concurrent dummy node re-enqueue is in progress). + * is NULL, which means we need to enqueue a dummy node + * before we can hope dequeuing anything. */ - if (next) { - if (uatomic_cmpxchg(&q->head, head, next) == head) { - if (is_dummy(head)) { - struct cds_lfq_node_rcu *node; - /* - * Requeue dummy. We need to - * reallocate to protect from - * ABA. - */ - rcu_free_dummy(head); - node = make_dummy(q, NULL); - _cds_lfq_enqueue_rcu(q, node); - continue; /* try again */ - } - return head; - } else { - /* Concurrently pushed, retry */ - continue; - } - } else { - /* Dummy node re-enqueue is in progress, retry. */ - continue; + if (!next) { + enqueue_dummy(q); + next = rcu_dereference(head->next); + } + if (uatomic_cmpxchg(&q->head, head, next) != head) { + rcu_read_unlock(); + continue; /* Concurrently pushed. */ + } + if (head->dummy) { + /* Free dummy after grace period. */ + rcu_free_dummy(head); + rcu_read_unlock(); + continue; /* try again */ } + rcu_read_unlock(); + return head; } }