X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=44a589b03361a33c7625d1b336f05da77d6b1bca;hb=f6fdd688eb73510a5192a4560c34d7e8caac77aa;hp=193857647439ef9cc3cfbc222184cab4c32c3360;hpb=4c3a0329b746828a7d686977dec0575a63db5c8b;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index 1938576..44a589b 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -120,11 +120,11 @@ * order bits reverse * 0 0 000 000 * | - * 1 | 1 001 100 <- - * | | | - * 2 | | 2 010 010 | - * | | | 3 011 110 <- | - * | | | | | | + * 1 | 1 001 100 <- <- + * | | | | + * 2 | | 2 010 010 | | + * | | | 3 011 110 | <- | + * | | | | | | | * 3 -> | | | 4 100 001 | | * -> | | 5 101 101 | * -> | 6 110 011 @@ -166,6 +166,16 @@ #define CHAIN_LEN_TARGET 1 #define CHAIN_LEN_RESIZE_THRESHOLD 3 +/* + * Define the minimum table size. Protects against hash table resize overload + * when too many entries are added quickly before the resize can complete. + * This is especially the case if the table could be shrinked to a size of 1. + * TODO: we might want to make the add/remove operations help the resize to + * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on + * chain length. + */ +#define MIN_TABLE_SIZE 128 + #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) #endif @@ -699,7 +709,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, */ index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; iter_prev = (struct cds_lfht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); @@ -755,7 +765,7 @@ gc_end: /* Garbage collect logically removed nodes in the bucket */ index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy_node = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy_node, node); return node; @@ -799,7 +809,7 @@ int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, assert(t->size > 0); index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; dummy = (struct cds_lfht_node *) lookup; _cds_lfht_gc_bucket(dummy, node); end: @@ -917,7 +927,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->percpu_count = alloc_per_cpu_items_count(); /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); - order = get_count_order_ulong(max(init_size, 1)) + 1; + order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; ht->t = calloc(1, sizeof(struct cds_lfht) + (order * sizeof(struct rcu_level *))); ht->t->size = 0; @@ -940,9 +950,9 @@ struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key t = rcu_dereference(ht->t); index = hash & (t->size - 1); order = get_count_order_ulong(index + 1); - lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; + lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & ((1UL << (order - 1)) - 1)); + hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); node = (struct cds_lfht_node *) lookup; for (;;) { if (unlikely(!node)) @@ -1154,7 +1164,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, struct rcu_table *old_t, unsigned long old_order, new_order; struct rcu_table *new_t; - new_size = max(new_size, 1); + new_size = max(new_size, MIN_TABLE_SIZE); old_order = get_count_order_ulong(old_size) + 1; new_order = get_count_order_ulong(new_size) + 1; printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", @@ -1214,7 +1224,7 @@ static unsigned long resize_target_update_count(struct rcu_table *t, unsigned long count) { - count = max(count, 1); + count = max(count, MIN_TABLE_SIZE); return uatomic_set(&t->resize_target, count); }