rculfhash test: add missing call_rcu per-cpu worker threads teardown
[urcu.git] / rculfhash.c
index f6c901bb0d4f242ecfa5b8bce0077313bee56f54..4080e8e54b9ee87a6cbccb93fbea49cd0901b3b3 100644 (file)
@@ -217,6 +217,8 @@ struct cds_lfht {
        void (*cds_lfht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
        void (*cds_lfht_synchronize_rcu)(void);
+       void (*cds_lfht_rcu_read_lock)(void);
+       void (*cds_lfht_rcu_read_unlock)(void);
        unsigned long count;            /* global approximate item count */
        struct ht_items_count *percpu_count;    /* per-cpu item count */
 };
@@ -418,6 +420,16 @@ int get_count_order_ulong(unsigned long x)
        return order;
 }
 
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
 static
 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
 
@@ -468,7 +480,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void)
 static
 void free_per_cpu_items_count(struct ht_items_count *count)
 {
-       free(count);
+       poison_free(count);
 }
 
 static
@@ -644,7 +656,7 @@ void cds_lfht_free_table_cb(struct rcu_head *head)
 {
        struct rcu_table *t =
                caa_container_of(head, struct rcu_table, head);
-       free(t);
+       poison_free(t);
 }
 
 static
@@ -652,7 +664,7 @@ void cds_lfht_free_level(struct rcu_head *head)
 {
        struct rcu_level *l =
                caa_container_of(head, struct rcu_level, head);
-       free(l);
+       poison_free(l);
 }
 
 /*
@@ -672,6 +684,13 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
                                return;
@@ -835,6 +854,11 @@ end:
                return -ENOENT;
 }
 
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
 static
 void init_table(struct cds_lfht *ht, struct rcu_table *t,
                unsigned long first_order, unsigned long len_order)
@@ -852,6 +876,7 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
                dbg_printf("init order %lu len: %lu\n", i, len);
                t->tbl[i] = calloc(1, sizeof(struct rcu_level)
                                + (len * sizeof(struct _cds_lfht_node)));
+               ht->cds_lfht_rcu_read_lock();
                for (j = 0; j < len; j++) {
                        struct cds_lfht_node *new_node =
                                (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
@@ -864,6 +889,7 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
                        if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                                break;
                }
+               ht->cds_lfht_rcu_read_unlock();
                /* Update table size */
                t->size = !i ? 1 : (1UL << i);
                dbg_printf("init new size: %lu\n", t->size);
@@ -874,6 +900,11 @@ void init_table(struct cds_lfht *ht, struct rcu_table *t,
        t->resize_initiated = 0;
 }
 
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
 static
 void fini_table(struct cds_lfht *ht, struct rcu_table *t,
                unsigned long first_order, unsigned long len_order)
@@ -895,8 +926,9 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
                 * removal so gc lookups use non-logically-removed dummy
                 * nodes.
                 */
-               t->size = 1UL << (i - 2);
+               t->size = 1UL << (i - 1);
                /* Unlink */
+               ht->cds_lfht_rcu_read_lock();
                for (j = 0; j < len; j++) {
                        struct cds_lfht_node *fini_node =
                                (struct cds_lfht_node *) &t->tbl[i]->nodes[j];
@@ -909,6 +941,7 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
                        if (CMM_LOAD_SHARED(ht->in_progress_destroy))
                                break;
                }
+               ht->cds_lfht_rcu_read_unlock();
                ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
                dbg_printf("fini new size: %lu\n", t->size);
                if (CMM_LOAD_SHARED(ht->in_progress_destroy))
@@ -925,7 +958,9 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
                        int flags,
                        void (*cds_lfht_call_rcu)(struct rcu_head *head,
                                        void (*func)(struct rcu_head *head)),
-                       void (*cds_lfht_synchronize_rcu)(void))
+                       void (*cds_lfht_synchronize_rcu)(void),
+                       void (*cds_lfht_rcu_read_lock)(void),
+                       void (*cds_lfht_rcu_read_unlock)(void))
 {
        struct cds_lfht *ht;
        unsigned long order;
@@ -939,6 +974,8 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
        ht->hash_seed = hash_seed;
        ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
        ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
+       ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
+       ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
        ht->in_progress_resize = 0;
        ht->percpu_count = alloc_per_cpu_items_count();
        /* this mutex should not nest in read-side C.S. */
@@ -1094,7 +1131,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
                                bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
                        assert(is_dummy(t->tbl[order]->nodes[i].next));
                }
-               free(t->tbl[order]);
+               poison_free(t->tbl[order]);
        }
        return 0;
 }
@@ -1114,9 +1151,9 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       free(ht->t);
+       poison_free(ht->t);
        free_per_cpu_items_count(ht->percpu_count);
-       free(ht);
+       poison_free(ht);
        return ret;
 }
 
@@ -1268,7 +1305,7 @@ void do_resize_cb(struct rcu_head *head)
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
-       free(work);
+       poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
This page took 0.024934 seconds and 4 git commands to generate.