X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=rculfhash.c;h=824a940391461a1a3b7f1db0a847be38e84dd741;hb=17f31d1b07ffb59db8bad3ce2ae8f824ed820de0;hp=ce09a0f93261d9a5c2a76c0a530e7cca6118fe5f;hpb=76a73da8b055a8db8b067e992b59222b653bd87d;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index ce09a0f..824a940 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -169,7 +169,8 @@ /* * Define the minimum table size. */ -#define MIN_TABLE_SIZE 128 +//#define MIN_TABLE_SIZE 128 +#define MIN_TABLE_SIZE 1 #if (CAA_BITS_PER_LONG == 32) #define MAX_TABLE_ORDER 32 @@ -194,6 +195,9 @@ #define DUMMY_FLAG (1UL << 1) #define FLAGS_MASK ((1UL << 2) - 1) +/* Value of the end pointer. Should not interact with flags. */ +#define END_VALUE 0x4 + struct ht_items_count { unsigned long add, remove; } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); @@ -652,7 +656,19 @@ struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) { return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); } - + +static +struct cds_lfht_node *get_end(void) +{ + return (struct cds_lfht_node *) END_VALUE; +} + +static +int is_end(struct cds_lfht_node *node) +{ + return clear_flag(node) == (struct cds_lfht_node *) END_VALUE; +} + static unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) { @@ -709,7 +725,8 @@ int _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) */ assert(dummy != node); for (;;) { - if (unlikely(!clear_flag(iter))) + assert(iter != NULL); + if (unlikely(is_end(iter))) return 0; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) return 0; @@ -724,6 +741,7 @@ int _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); + assert(new_next != NULL); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } return 0; @@ -739,12 +757,13 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, *dummy_node; struct _cds_lfht_node *lookup; unsigned long hash, index, order; + int force_dummy = 0; assert(!is_dummy(node)); assert(!is_removed(node)); if (!size) { assert(dummy); - node->p.next = flag_dummy(NULL); + node->p.next = flag_dummy(get_end()); return node; /* Initial first add (head) */ } hash = bit_reverse_ulong(node->p.reverse_hash); @@ -773,6 +792,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, } assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { + assert(iter != NULL); /* * When adding a dummy node, we allow concurrent * add/removal to help. If we find the dummy node in @@ -780,7 +800,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, */ if (unlikely(dummy && clear_flag(iter) == node)) return node; - if (unlikely(!clear_flag(iter))) + if (unlikely(is_end(iter))) goto insert; if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; @@ -805,14 +825,31 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, assert(!is_removed(iter_prev)); assert(!is_removed(iter)); assert(iter_prev != node); - if (!dummy) + if (!dummy) { node->p.next = clear_flag(iter); - else - node->p.next = flag_dummy(clear_flag(iter)); + } else { + /* + * Dummy node insertion is performed concurrently (help + * scheme). We try to link its next node, and if this + * succeeds, it _means_ it's us who link this dummy node + * into the table. force_dummy is set as soon as we + * succeed this cmpxchg within this function. + */ + if (!force_dummy) { + if (uatomic_cmpxchg(&node->p.next, NULL, + flag_dummy(clear_flag(iter))) != NULL) { + return NULL; + } + force_dummy = 1; + } else { + node->p.next = flag_dummy(clear_flag(iter)); + } + } if (is_dummy(iter)) new_node = flag_dummy(node); else new_node = node; + assert(new_node != NULL); if (uatomic_cmpxchg(&iter_prev->p.next, iter, new_node) != iter) continue; /* retry */ @@ -824,6 +861,7 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); + assert(new_next != NULL); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } @@ -863,6 +901,7 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, assert(is_dummy(next)); else assert(!is_dummy(next)); + assert(next != NULL); old = uatomic_cmpxchg(&node->p.next, next, flag_removed(next)); } while (old != next); @@ -959,6 +998,11 @@ void init_table(struct cds_lfht *ht, len = !i ? 1 : 1UL << (i - 1); dbg_printf("init order %lu len: %lu\n", i, len); + + /* Stop expand if the resize target changes under us */ + if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) + break; + ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); @@ -1040,13 +1084,27 @@ void fini_table(struct cds_lfht *ht, first_order, first_order + len_order); end_order = first_order + len_order; assert(first_order > 0); - assert(ht->t.size == (1UL << (first_order - 1))); for (i = end_order - 1; i >= first_order; i--) { unsigned long len; len = !i ? 1 : 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); + /* Stop shrink if the resize target changes under us */ + if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1))) + break; + + cmm_smp_wmb(); /* populate data before RCU size */ + CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1)); + + /* + * We need to wait for all add operations to reach Q.S. (and + * thus use the new table for lookups) before we can start + * releasing the old dummy nodes. Otherwise their lookup will + * return a logically removed node as insert position. + */ + ht->cds_lfht_synchronize_rcu(); + /* * Set "removed" flag in dummy nodes about to be removed. * Unlink all now-logically-removed dummy node pointers. @@ -1099,6 +1157,7 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, ht->flags = flags; ht->cds_lfht_rcu_thread_offline(); pthread_mutex_lock(&ht->resize_mutex); + ht->t.resize_target = 1UL << (order - 1); init_table(ht, 0, order); pthread_mutex_unlock(&ht->resize_mutex); ht->cds_lfht_rcu_thread_online(); @@ -1107,23 +1166,39 @@ struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) { - struct cds_lfht_node *node, *next; + struct cds_lfht_node *node, *next, *dummy_node; struct _cds_lfht_node *lookup; unsigned long hash, reverse_hash, index, order, size; hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); +restart: size = rcu_dereference(ht->t.size); index = hash & (size - 1); order = get_count_order_ulong(index + 1); lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); - node = (struct cds_lfht_node *) lookup; + dummy_node = (struct cds_lfht_node *) lookup; + /* We can always skip the dummy node initially */ + node = rcu_dereference(dummy_node->p.next); + if (unlikely(node == NULL)) { + /* + * We are executing concurrently with a hash table + * expand, so we see a dummy node with NULL next value. + * Help expand by linking this node into the list and + * retry. + */ + (void) _cds_lfht_add(ht, size >> 1, dummy_node, 0, 1); + goto restart; /* retry */ + } + node = clear_flag(node); for (;;) { - if (unlikely(!node)) + if (unlikely(is_end(node))) { + node = NULL; break; + } if (unlikely(node->p.reverse_hash > reverse_hash)) { node = NULL; break; @@ -1155,8 +1230,10 @@ struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, node = clear_flag(next); for (;;) { - if (unlikely(!node)) + if (unlikely(is_end(node))) { + node = NULL; break; + } if (unlikely(node->p.reverse_hash > reverse_hash)) { node = NULL; break; @@ -1196,7 +1273,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, size = rcu_dereference(ht->t.size); ret = _cds_lfht_add(ht, size, node, 1, 0); - if (ret != node) + if (ret == node) ht_count_add(ht, size); return ret; } @@ -1228,7 +1305,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) if (!is_dummy(node)) return -EPERM; assert(!is_removed(node)); - } while (clear_flag(node)); + } while (!is_end(node)); /* * size accessed without rcu_dereference because hash table is * being destroyed. @@ -1294,7 +1371,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, else (nr_dummy)++; node = clear_flag(next); - } while (node); + } while (!is_end(node)); dbg_printf("number of dummy nodes: %lu\n", nr_dummy); } @@ -1327,17 +1404,6 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, old_size, old_order, new_size, new_order); assert(new_size < old_size); - cmm_smp_wmb(); /* populate data before RCU size */ - CMM_STORE_SHARED(ht->t.size, new_size); - - /* - * We need to wait for all add operations to reach Q.S. (and - * thus use the new table for lookups) before we can start - * releasing the old dummy nodes. Otherwise their lookup will - * return a logically removed node as insert position. - */ - ht->cds_lfht_synchronize_rcu(); - /* Remove and unlink all dummy nodes to remove. */ fini_table(ht, new_order, old_order - new_order); } @@ -1363,7 +1429,7 @@ void _do_cds_lfht_resize(struct cds_lfht *ht) ht->t.resize_initiated = 0; /* write resize_initiated before read resize_target */ cmm_smp_mb(); - } while (new_size != CMM_LOAD_SHARED(ht->t.resize_target)); + } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target)); } static