X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=f1e6674bd7f1cc63f6eb09a0946904fc1dfc4f10;hb=dd4505e03a99bb149c71ebabfa5d5e372a61d737;hp=a599961a45bc073d4ad351825d79df71310d17f5;hpb=f9830efd2b98c91089d105a21b21ae4cb52483ba;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index a599961..f1e6674 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -46,7 +46,8 @@ #define dbg_printf(args...) #endif -#define BUCKET_SIZE_RESIZE_THRESHOLD 8 +#define CHAIN_LEN_TARGET 4 +#define CHAIN_LEN_RESIZE_THRESHOLD 16 #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) @@ -62,7 +63,8 @@ struct rcu_table { struct rcu_ht { struct rcu_table *t; /* shared */ ht_hash_fct hash_fct; - void *hashseed; + ht_compare_fct compare_fct; + unsigned long hash_seed; pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ void (*ht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)); @@ -164,8 +166,9 @@ static void check_resize(struct rcu_ht *ht, struct rcu_table *t, uint32_t chain_len) { - if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD) - ht_resize_lazy(ht, t, log2_u32(chain_len)); + if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) + ht_resize_lazy(ht, t, + log2_u32(chain_len - CHAIN_LEN_TARGET)); } static @@ -217,7 +220,7 @@ void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) iter = clear_flag(rcu_dereference(iter_prev->next)); if (unlikely(!iter)) break; - if (iter->reverse_hash < node->reverse_hash) + if (iter->reverse_hash > node->reverse_hash) break; iter_prev = iter; check_resize(ht, t, ++chain_len); @@ -252,7 +255,7 @@ retry: iter = clear_flag(rcu_dereference(iter_prev->next)); if (unlikely(!iter)) break; - if (iter->reverse_hash < node->reverse_hash) + if (unlikely(iter->reverse_hash > node->reverse_hash)) break; if (iter == node) { found = 1; @@ -314,7 +317,8 @@ void init_table(struct rcu_ht *ht, struct rcu_table *t, } struct rcu_ht *ht_new(ht_hash_fct hash_fct, - void *hashseed, + ht_compare_fct compare_fct, + unsigned long hash_seed, unsigned long init_size, void (*ht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head))) @@ -323,7 +327,8 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, ht = calloc(1, sizeof(struct rcu_ht)); ht->hash_fct = hash_fct; - ht->hashseed = hashseed; + ht->compare_fct = compare_fct; + ht->hash_seed = hash_seed; ht->ht_call_rcu = ht_call_rcu; /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); @@ -336,13 +341,13 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, return ht; } -struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key) +struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) { struct rcu_table *t; struct rcu_ht_node *node; unsigned long hash, reverse_hash; - hash = ht->hash_fct(ht->hashseed, key); + hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); t = rcu_dereference(ht->t); @@ -350,12 +355,12 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key) for (;;) { if (unlikely(!node)) break; - if (node->reverse_hash > reverse_hash) { + if (unlikely(node->reverse_hash > reverse_hash)) { node = NULL; break; } - if (node->key == key) { - if (is_removed(rcu_dereference(node->next))) + if (!ht->compare_fct(node->key, node->key_len, key, key_len)) { + if (unlikely(is_removed(rcu_dereference(node->next)))) node = NULL; break; } @@ -368,7 +373,7 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node) { struct rcu_table *t; - node->hash = ht->hash_fct(ht->hashseed, node->key); + node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash); t = rcu_dereference(ht->t); @@ -451,7 +456,6 @@ void _do_ht_resize(struct rcu_ht *ht) memcpy(&new_t->tbl, &old_t->tbl, old_size * sizeof(struct rcu_ht_node *)); init_table(ht, new_t, old_size, new_size - old_size); - new_t->size = new_size; /* Changing table and size atomically wrt lookups */ rcu_assign_pointer(ht->t, new_t); ht->ht_call_rcu(&old_t->head, ht_free_table_cb);