#define dbg_printf(fmt, args...)
#endif
+/* For testing */
+#define POISON_FREE
+
/*
* Per-CPU split-counters lazily update the global counter each 1024
* addition/removal. It automatically keeps track of resize required.
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void (*cds_lfht_synchronize_rcu)(void);
+ void (*cds_lfht_rcu_read_lock)(void);
+ void (*cds_lfht_rcu_read_unlock)(void);
unsigned long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
return order;
}
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
static
void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
static
void free_per_cpu_items_count(struct ht_items_count *count)
{
- free(count);
+ poison_free(count);
}
static
{
struct rcu_table *t =
caa_container_of(head, struct rcu_table, head);
- free(t);
+ poison_free(t);
}
static
{
struct rcu_level *l =
caa_container_of(head, struct rcu_level, head);
- free(l);
+ poison_free(l);
}
/*
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
+ struct cds_lfht_node *iter_trace[64];
+ unsigned long trace_idx = 0;
+ memset(iter_trace, 0, sizeof(iter_trace));
assert(!is_dummy(dummy));
assert(!is_removed(dummy));
assert(!is_dummy(node));
assert(!is_removed(node));
for (;;) {
+ iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1;
iter_prev = dummy;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
+ iter_trace[trace_idx++ & (64 - 1)] = iter;
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ /*
+ * We should never be called with dummy (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(dummy != node);
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
break;
iter_prev = clear_flag(iter);
iter = next;
+ iter_trace[trace_idx++ & (64 - 1)] = iter;
}
assert(!is_removed(iter));
if (is_dummy(iter))
else
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2;
}
}
return -ENOENT;
}
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
static
void init_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
dbg_printf("init order %lu len: %lu\n", i, len);
t->tbl[i] = calloc(1, sizeof(struct rcu_level)
+ (len * sizeof(struct _cds_lfht_node)));
+ ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+ ht->cds_lfht_rcu_read_unlock();
/* Update table size */
t->size = !i ? 1 : (1UL << i);
dbg_printf("init new size: %lu\n", t->size);
t->resize_initiated = 0;
}
+/*
+ * Holding RCU read lock to protect _cds_lfht_remove against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ */
static
void fini_table(struct cds_lfht *ht, struct rcu_table *t,
unsigned long first_order, unsigned long len_order)
* removal so gc lookups use non-logically-removed dummy
* nodes.
*/
- t->size = (i == 1) ? 0 : 1UL << (i - 2);
+ t->size = 1UL << (i - 1);
/* Unlink */
+ ht->cds_lfht_rcu_read_lock();
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
(struct cds_lfht_node *) &t->tbl[i]->nodes[j];
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+ ht->cds_lfht_rcu_read_unlock();
ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
dbg_printf("fini new size: %lu\n", t->size);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
- void (*cds_lfht_synchronize_rcu)(void))
+ void (*cds_lfht_synchronize_rcu)(void),
+ void (*cds_lfht_rcu_read_lock)(void),
+ void (*cds_lfht_rcu_read_unlock)(void))
{
struct cds_lfht *ht;
unsigned long order;
ht->hash_seed = hash_seed;
ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
+ ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
+ ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
ht->in_progress_resize = 0;
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash));
assert(is_dummy(t->tbl[order]->nodes[i].next));
}
- free(t->tbl[order]);
+ poison_free(t->tbl[order]);
}
return 0;
}
ret = cds_lfht_delete_dummy(ht);
if (ret)
return ret;
- free(ht->t);
+ poison_free(ht->t);
free_per_cpu_items_count(ht->percpu_count);
- free(ht);
+ poison_free(ht);
return ret;
}
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
- free(work);
+ poison_free(work);
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);
}