X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=6a80049ddd1f783a21e117d63437eec5771ba514;hp=0c153982a1aac87cf0b785ecc8fe93fcec27fafa;hb=53960c9f970e231b1ca5f53913b60d298540cf9a;hpb=b8af50117bfbb4dacc5e02cdf13b760f7ff9fe3f diff --git a/rculfhash.c b/rculfhash.c index 0c15398..6a80049 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -156,6 +156,9 @@ #define dbg_printf(fmt, args...) #endif +/* For testing */ +#define POISON_FREE + /* * Per-CPU split-counters lazily update the global counter each 1024 * addition/removal. It automatically keeps track of resize required. @@ -418,6 +421,16 @@ int get_count_order_ulong(unsigned long x) return order; } +#ifdef POISON_FREE +#define poison_free(ptr) \ + do { \ + memset(ptr, 0x42, sizeof(*(ptr))); \ + free(ptr); \ + } while (0) +#else +#define poison_free(ptr) free(ptr) +#endif + static void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth); @@ -468,7 +481,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void) static void free_per_cpu_items_count(struct ht_items_count *count) { - free(count); + poison_free(count); } static @@ -644,7 +657,7 @@ void cds_lfht_free_table_cb(struct rcu_head *head) { struct rcu_table *t = caa_container_of(head, struct rcu_table, head); - free(t); + poison_free(t); } static @@ -652,7 +665,7 @@ void cds_lfht_free_level(struct rcu_head *head) { struct rcu_level *l = caa_container_of(head, struct rcu_level, head); - free(l); + poison_free(l); } /* @@ -662,12 +675,28 @@ static void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) { struct cds_lfht_node *iter_prev, *iter, *next, *new_next; - + struct cds_lfht_node *iter_trace[64]; + unsigned long trace_idx = 0; + + memset(iter_trace, 0, sizeof(iter_trace)); + assert(!is_dummy(dummy)); + assert(!is_removed(dummy)); + assert(!is_dummy(node)); + assert(!is_removed(node)); for (;;) { + iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1; iter_prev = dummy; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); + iter_trace[trace_idx++ & (64 - 1)] = iter; assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + /* + * We should never be called with dummy (start of chain) + * and logically removed node (end of path compression + * marker) being the actual same node. This would be a + * bug in the algorithm implementation. + */ + assert(dummy != node); for (;;) { if (unlikely(!clear_flag(iter))) return; @@ -678,6 +707,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node break; iter_prev = clear_flag(iter); iter = next; + iter_trace[trace_idx++ & (64 - 1)] = iter; } assert(!is_removed(iter)); if (is_dummy(iter)) @@ -685,6 +715,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node else new_next = clear_flag(next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); + iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2; } } @@ -697,6 +728,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, struct _cds_lfht_node *lookup; unsigned long hash, index, order; + assert(!is_dummy(node)); + assert(!is_removed(node)); if (!t->size) { assert(dummy); node->p.next = flag_dummy(NULL); @@ -718,8 +751,10 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, iter = rcu_dereference(iter_prev->p.next); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { + /* TODO: check if removed */ if (unlikely(!clear_flag(iter))) goto insert; + /* TODO: check if removed */ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; next = rcu_dereference(clear_flag(iter)->p.next); @@ -741,6 +776,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); + assert(!is_removed(iter)); assert(iter_prev != node); if (!dummy) node->p.next = clear_flag(iter); @@ -784,6 +820,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, unsigned long hash, index, order; /* logically delete the node */ + assert(!is_dummy(node)); + assert(!is_removed(node)); old = rcu_dereference(node->p.next); do { next = old; @@ -800,9 +838,6 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, /* We performed the (logical) deletion. */ flagged = 1; - if (dummy_removal) - node = clear_flag(node); - /* * Ensure that the node is not visible to readers anymore: lookup for * the node, and remove it (along with any other logically removed node) @@ -882,18 +917,22 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, len = !i ? 1 : 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); - /* Update table size */ + /* + * Update table size. Need to shrink this table prior to + * removal so gc lookups use non-logically-removed dummy + * nodes. + */ t->size = 1UL << (i - 1); /* Unlink */ for (j = 0; j < len; j++) { - struct cds_lfht_node *new_node = + struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; dbg_printf("fini entry: i %lu j %lu hash %lu\n", i, j, !i ? 0 : (1UL << (i - 1)) + j); - new_node->p.reverse_hash = + fini_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_remove(ht, t, new_node, 1); + (void) _cds_lfht_remove(ht, t, fini_node, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } @@ -1082,7 +1121,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash)); assert(is_dummy(t->tbl[order]->nodes[i].next)); } - free(t->tbl[order]); + poison_free(t->tbl[order]); } return 0; } @@ -1102,9 +1141,9 @@ int cds_lfht_destroy(struct cds_lfht *ht) ret = cds_lfht_delete_dummy(ht); if (ret) return ret; - free(ht->t); + poison_free(ht->t); free_per_cpu_items_count(ht->percpu_count); - free(ht); + poison_free(ht); return ret; } @@ -1180,6 +1219,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, memcpy(&new_t->tbl, &old_t->tbl, new_order * sizeof(struct rcu_level *)); new_t->size = !new_order ? 1 : (1UL << (new_order - 1)); + assert(new_t->size == new_size); new_t->resize_target = new_t->size; new_t->resize_initiated = 0; @@ -1187,9 +1227,10 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, rcu_assign_pointer(ht->t, new_t); /* - * We need to wait for all reader threads to reach Q.S. (and + * We need to wait for all add operations to reach Q.S. (and * thus use the new table for lookups) before we can start - * releasing the old dummy nodes. + * releasing the old dummy nodes. Otherwise their lookup will + * return a logically removed node as insert position. */ ht->cds_lfht_synchronize_rcu(); @@ -1254,7 +1295,7 @@ void do_resize_cb(struct rcu_head *head) pthread_mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); pthread_mutex_unlock(&ht->resize_mutex); - free(work); + poison_free(work); cmm_smp_mb(); /* finish resize before decrement */ uatomic_dec(&ht->in_progress_resize); }