if (!node)
goto fail;
cds_lfq_node_init_rcu(node);
+ rcu_read_lock();
cds_lfq_enqueue_rcu(&q, node);
+ rcu_read_unlock();
nr_successful_enqueues++;
if (unlikely(wdelay))
}
-static void rcu_release_node(struct urcu_ref *ref)
+static void rcu_free_node(struct rcu_head *head)
{
- struct cds_lfq_node_rcu *node = caa_container_of(ref, struct cds_lfq_node_rcu, ref);
- defer_rcu(free, node);
- //synchronize_rcu();
- //free(node);
+ struct cds_lfq_node_rcu *node =
+ caa_container_of(head, struct cds_lfq_node_rcu, rcu_head);
+ free(node);
+}
+
+static void ref_release_node(struct urcu_ref *ref)
+{
+ struct cds_lfq_node_rcu *node =
+ caa_container_of(ref, struct cds_lfq_node_rcu, ref);
+ call_rcu(&node->rcu_head, rcu_free_node);
}
void *thr_dequeuer(void *_count)
cmm_smp_mb();
for (;;) {
- struct cds_lfq_node_rcu *node = cds_lfq_dequeue_rcu(&q,
- rcu_release_node);
+ struct cds_lfq_node_rcu *node;
+
+ rcu_read_lock();
+ node = cds_lfq_dequeue_rcu(&q);
+ rcu_read_unlock();
if (node) {
- urcu_ref_put(&node->ref, rcu_release_node);
+ urcu_ref_put(&node->ref, ref_release_node);
nr_successful_dequeues++;
}
struct cds_lfq_node_rcu *node;
do {
- node = cds_lfq_dequeue_rcu(q, release_node);
+ rcu_read_lock();
+ node = cds_lfq_dequeue_rcu(q);
+ rcu_read_unlock();
if (node) {
urcu_ref_put(&node->ref, release_node);
(*nr_dequeues)++;
tid_dequeuer = malloc(sizeof(*tid_dequeuer) * nr_dequeuers);
count_enqueuer = malloc(2 * sizeof(*count_enqueuer) * nr_enqueuers);
count_dequeuer = malloc(2 * sizeof(*count_dequeuer) * nr_dequeuers);
- cds_lfq_init_rcu(&q);
+ cds_lfq_init_rcu(&q, ref_release_node);
next_aff = 0;