X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=6a80049ddd1f783a21e117d63437eec5771ba514;hp=b74739f29a30f1953b3e6447fcf8daa44e069354;hb=53960c9f970e231b1ca5f53913b60d298540cf9a;hpb=dd51e0ad14ab677d0fab45b492150adce2ee863e diff --git a/rculfhash.c b/rculfhash.c index b74739f..6a80049 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -120,11 +120,11 @@ * order bits reverse * 0 0 000 000 * | - * 1 | 1 001 100 <- - * | | | - * 2 | | 2 010 010 | - * | | | 3 011 110 <- | - * | | | | | | + * 1 | 1 001 100 <- <- + * | | | | + * 2 | | 2 010 010 | | + * | | | 3 011 110 | <- | + * | | | | | | | * 3 -> | | | 4 100 001 | | * -> | | 5 101 101 | * -> | 6 110 011 @@ -156,6 +156,9 @@ #define dbg_printf(fmt, args...) #endif +/* For testing */ +#define POISON_FREE + /* * Per-CPU split-counters lazily update the global counter each 1024 * addition/removal. It automatically keeps track of resize required. @@ -166,6 +169,16 @@ #define CHAIN_LEN_TARGET 1 #define CHAIN_LEN_RESIZE_THRESHOLD 3 +/* + * Define the minimum table size. Protects against hash table resize overload + * when too many entries are added quickly before the resize can complete. + * This is especially the case if the table could be shrinked to a size of 1. + * TODO: we might want to make the add/remove operations help the resize to + * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on + * chain length. + */ +#define MIN_TABLE_SIZE 128 + #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) #endif @@ -201,6 +214,7 @@ struct cds_lfht { cds_lfht_hash_fct hash_fct; cds_lfht_compare_fct compare_fct; unsigned long hash_seed; + int flags; pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ unsigned int in_progress_resize, in_progress_destroy; void (*cds_lfht_call_rcu)(struct rcu_head *head, @@ -407,6 +421,16 @@ int get_count_order_ulong(unsigned long x) return order; } +#ifdef POISON_FREE +#define poison_free(ptr) \ + do { \ + memset(ptr, 0x42, sizeof(*(ptr))); \ + free(ptr); \ + } while (0) +#else +#define poison_free(ptr) free(ptr) +#endif + static void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth); @@ -457,7 +481,7 @@ struct ht_items_count *alloc_per_cpu_items_count(void) static void free_per_cpu_items_count(struct ht_items_count *count) { - free(count); + poison_free(count); } static @@ -567,6 +591,8 @@ void check_resize(struct cds_lfht *ht, struct rcu_table *t, { unsigned long count; + if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) + return; count = uatomic_read(&ht->count); /* * Use bucket-local length for small table expand and for @@ -631,7 +657,7 @@ void cds_lfht_free_table_cb(struct rcu_head *head) { struct rcu_table *t = caa_container_of(head, struct rcu_table, head); - free(t); + poison_free(t); } static @@ -639,7 +665,7 @@ void cds_lfht_free_level(struct rcu_head *head) { struct rcu_level *l = caa_container_of(head, struct rcu_level, head); - free(l); + poison_free(l); } /* @@ -649,12 +675,28 @@ static void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) { struct cds_lfht_node *iter_prev, *iter, *next, *new_next; - + struct cds_lfht_node *iter_trace[64]; + unsigned long trace_idx = 0; + + memset(iter_trace, 0, sizeof(iter_trace)); + assert(!is_dummy(dummy)); + assert(!is_removed(dummy)); + assert(!is_dummy(node)); + assert(!is_removed(node)); for (;;) { + iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x1; iter_prev = dummy; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); + iter_trace[trace_idx++ & (64 - 1)] = iter; assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + /* + * We should never be called with dummy (start of chain) + * and logically removed node (end of path compression + * marker) being the actual same node. This would be a + * bug in the algorithm implementation. + */ + assert(dummy != node); for (;;) { if (unlikely(!clear_flag(iter))) return; @@ -665,6 +707,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node break; iter_prev = clear_flag(iter); iter = next; + iter_trace[trace_idx++ & (64 - 1)] = iter; } assert(!is_removed(iter)); if (is_dummy(iter)) @@ -672,6 +715,7 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node else new_next = clear_flag(next); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); + iter_trace[trace_idx++ & (64 - 1)] = (void *) 0x2; } } @@ -684,6 +728,8 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, struct _cds_lfht_node *lookup; unsigned long hash, index, order; + assert(!is_dummy(node)); + assert(!is_removed(node)); if (!t->size) { assert(dummy); node->p.next = flag_dummy(NULL); @@ -699,14 +745,16 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, */ index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; iter_prev = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { + /* TODO: check if removed */ if (unlikely(!clear_flag(iter))) goto insert; + /* TODO: check if removed */ if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; next = rcu_dereference(clear_flag(iter)->p.next); @@ -728,6 +776,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, insert: assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); + assert(!is_removed(iter)); assert(iter_prev != node); if (!dummy) node->p.next = clear_flag(iter); @@ -755,7 +804,7 @@ gc_end: /* Garbage collect logically removed nodes in the bucket */ index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy_node, node); return node; @@ -771,6 +820,8 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, unsigned long hash, index, order; /* logically delete the node */ + assert(!is_dummy(node)); + assert(!is_removed(node)); old = rcu_dereference(node->p.next); do { next = old; @@ -787,27 +838,16 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, /* We performed the (logical) deletion. */ flagged = 1; - if (dummy_removal) - node = clear_flag(node); - /* * Ensure that the node is not visible to readers anymore: lookup for * the node, and remove it (along with any other logically removed node) * if found. */ hash = bit_reverse_ulong(node->p.reverse_hash); - /* - * When removing a dummy node, we need to consider the lower - * order table, so we don't end up looking up the dummy nodes we - * are currently removing. - */ - - if (dummy_removal) - index = hash & ((t->size == 1) ? 0 : (t->size >> 1) - 1); - else - index = hash & (t->size - 1); + assert(t->size > 0); + index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy, node); end: @@ -877,22 +917,26 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, len = !i ? 1 : 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); + /* + * Update table size. Need to shrink this table prior to + * removal so gc lookups use non-logically-removed dummy + * nodes. + */ + t->size = 1UL << (i - 1); /* Unlink */ for (j = 0; j < len; j++) { - struct cds_lfht_node *new_node = + struct cds_lfht_node *fini_node = (struct cds_lfht_node *) &t->tbl[i]->nodes[j]; dbg_printf("fini entry: i %lu j %lu hash %lu\n", i, j, !i ? 0 : (1UL << (i - 1)) + j); - new_node->p.reverse_hash = + fini_node->p.reverse_hash = bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - (void) _cds_lfht_remove(ht, t, new_node, 1); + (void) _cds_lfht_remove(ht, t, fini_node, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; } ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level); - /* Update table size */ - t->size = (i == 1) ? 0 : 1UL << (i - 2); dbg_printf("fini new size: %lu\n", t->size); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) break; @@ -905,6 +949,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size, + int flags, void (*cds_lfht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)), void (*cds_lfht_synchronize_rcu)(void)) @@ -925,10 +970,11 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - order = get_count_order_ulong(max(init_size, 1)) + 1; + order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; ht->t = calloc(1, sizeof(struct cds_lfht) + (order * sizeof(struct rcu_level *))); ht->t->size = 0; + ht->flags = flags; pthread_mutex_lock(&ht->resize_mutex); init_table(ht, ht->t, 0, order); pthread_mutex_unlock(&ht->resize_mutex); @@ -948,9 +994,9 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key t = rcu_dereference(ht->t); index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & ((1UL << (order - 1)) - 1)); + hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); node = (struct cds_lfht_node *) lookup; for (;;) { if (unlikely(!node)) @@ -1075,7 +1121,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) bit_reverse_ulong(t->tbl[order]->nodes[i].reverse_hash)); assert(is_dummy(t->tbl[order]->nodes[i].next)); } - free(t->tbl[order]); + poison_free(t->tbl[order]); } return 0; } @@ -1095,9 +1141,9 @@ int cds_lfht_destroy(struct cds_lfht *ht) ret = cds_lfht_delete_dummy(ht); if (ret) return ret; - free(ht->t); + poison_free(ht->t); free_per_cpu_items_count(ht->percpu_count); - free(ht); + poison_free(ht); return ret; } @@ -1162,7 +1208,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, unsigned long old_order, new_order; struct rcu_table *new_t; - new_size = max(new_size, 1); + new_size = max(new_size, MIN_TABLE_SIZE); old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", @@ -1173,6 +1219,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, memcpy(&new_t->tbl, &old_t->tbl, new_order * sizeof(struct rcu_level *)); new_t->size = !new_order ? 1 : (1UL << (new_order - 1)); + assert(new_t->size == new_size); new_t->resize_target = new_t->size; new_t->resize_initiated = 0; @@ -1180,9 +1227,10 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, rcu_assign_pointer(ht->t, new_t); /* - * We need to wait for all reader threads to reach Q.S. (and + * We need to wait for all add operations to reach Q.S. (and * thus use the new table for lookups) before we can start - * releasing the old dummy nodes. + * releasing the old dummy nodes. Otherwise their lookup will + * return a logically removed node as insert position. */ ht->cds_lfht_synchronize_rcu(); @@ -1219,19 +1267,18 @@ unsigned long resize_target_update(struct rcu_table *t, } static -unsigned long resize_target_update_count(struct rcu_table *t, - unsigned long count) +void resize_target_update_count(struct rcu_table *t, + unsigned long count) { - count = max(count, 1); - return uatomic_set(&t->resize_target, count); + count = max(count, MIN_TABLE_SIZE); + uatomic_set(&t->resize_target, count); } void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) { struct rcu_table *t = rcu_dereference(ht->t); - unsigned long target_size; - target_size = resize_target_update_count(t, new_size); + resize_target_update_count(t, new_size); CMM_STORE_SHARED(t->resize_initiated, 1); pthread_mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); @@ -1248,7 +1295,7 @@ void do_resize_cb(struct rcu_head *head) pthread_mutex_lock(&ht->resize_mutex); _do_cds_lfht_resize(ht); pthread_mutex_unlock(&ht->resize_mutex); - free(work); + poison_free(work); cmm_smp_mb(); /* finish resize before decrement */ uatomic_dec(&ht->in_progress_resize); } @@ -1277,9 +1324,10 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t, unsigned long count) { struct rcu_resize_work *work; - unsigned long target_size; - target_size = resize_target_update_count(t, count); + if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) + return; + resize_target_update_count(t, count); if (!CMM_LOAD_SHARED(t->resize_initiated)) { uatomic_inc(&ht->in_progress_resize); cmm_smp_mb(); /* increment resize count before calling it */