rculfhash: add list iteration tracer in gc
[urcu.git] / rculfhash.c
index 4b781f7b9012273b24dfb93e99afa0d664032daf..6a80049ddd1f783a21e117d63437eec5771ba514 100644 (file)
 #define dbg_printf(fmt, args...)
 #endif
 
+/* For testing */
+#define POISON_FREE
+
 /*
  * Per-CPU split-counters lazily update the global counter each 1024
  * addition/removal. It automatically keeps track of resize required.
@@ -418,6 +421,16 @@ int get_count_order_ulong(unsigned long x)
        return order;
 }
 
+#ifdef POISON_FREE
+#define poison_free(ptr)                               \
+       do {                                            \
+               memset(ptr, 0x42, sizeof(*(ptr)));      \
+               free(ptr);                              \
+       } while (0)
+#else
+#define poison_free(ptr)       free(ptr)
+#endif
+
 static
 void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
 
@@ -468,7 +481,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void)
 static
 void free_per_cpu_items_count(struct ht_items_count *count)
 {
-       free(count);
+       poison_free(count);
 }
 
 static
@@ -644,7 +657,7 @@ void cds_lfht_free_table_cb(struct rcu_head *head)
 {
        struct rcu_table *t =
                caa_container_of(head, struct rcu_table, head);
-       free(t);
+       poison_free(t);
 }
 
 static
@@ -652,7 +665,7 @@ void cds_lfht_free_level(struct rcu_head *head)
 {
        struct rcu_level *l =
                caa_container_of(head, struct rcu_level, head);
-       free(l);
+       poison_free(l);
 }
 
 /*
@@ -662,16 +675,28 @@ static
 void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
 {
        struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
+       struct cds_lfht_node *iter_trace[64];
+       unsigned long trace_idx = 0;
 
+       memset(iter_trace, 0, sizeof(iter_trace));
        assert(!is_dummy(dummy));
        assert(!is_removed(dummy));
        assert(!is_dummy(node));
        assert(!is_removed(node));
        for (;;) {
+               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
                iter_prev = dummy;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
+               iter_trace[trace_idx++ & (64 - 1)] = iter;
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+               /*
+                * We should never be called with dummy (start of chain)
+                * and logically removed node (end of path compression
+                * marker) being the actual same node. This would be a
+                * bug in the algorithm implementation.
+                */
+               assert(dummy != node);
                for (;;) {
                        if (unlikely(!clear_flag(iter)))
                                return;
@@ -682,6 +707,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                                break;
                        iter_prev = clear_flag(iter);
                        iter = next;
+                       iter_trace[trace_idx++ & (64 - 1)] = iter;
                }
                assert(!is_removed(iter));
                if (is_dummy(iter))
@@ -689,6 +715,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
                else
                        new_next = clear_flag(next);
                (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+               iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
        }
 }
 
@@ -890,7 +917,11 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t,
 
                len = !i ? 1 : 1UL << (i - 1);
                dbg_printf("fini order %lu len: %lu\n", i, len);
-               /* Update table size */
+               /*
+                * Update table size. Need to shrink this table prior to
+                * removal so gc lookups use non-logically-removed dummy
+                * nodes.
+                */
                t->size = 1UL << (i - 1);
                /* Unlink */
                for (j = 0; j < len; j++) {
@@ -1090,7 +1121,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht)
                                bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
                        assert(is_dummy(t->tbl[order]->nodes[i].next));
                }
-               free(t->tbl[order]);
+               poison_free(t->tbl[order]);
        }
        return 0;
 }
@@ -1110,9 +1141,9 @@ int cds_lfht_destroy(struct cds_lfht *ht)
        ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
-       free(ht->t);
+       poison_free(ht->t);
        free_per_cpu_items_count(ht->percpu_count);
-       free(ht);
+       poison_free(ht);
        return ret;
 }
 
@@ -1264,7 +1295,7 @@ void do_resize_cb(struct rcu_head *head)
        pthread_mutex_lock(&ht->resize_mutex);
        _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
-       free(work);
+       poison_free(work);
        cmm_smp_mb();   /* finish resize before decrement */
        uatomic_dec(&ht->in_progress_resize);
 }
This page took 0.023812 seconds and 4 git commands to generate.