new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
- assert(new_next != NULL);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
/* retry */
}
assert(is_dummy(next));
else
assert(!is_dummy(next));
- assert(next != NULL);
old = uatomic_cmpxchg(&node->p.next, next,
flag_removed(next));
} while (old != next);
return -ENOENT;
}
-static
-void init_table_hash(struct cds_lfht *ht, unsigned long i,
- unsigned long len)
-{
- unsigned long j;
-
- for (j = 0; j < len; j++) {
- struct cds_lfht_node *new_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
-
- dbg_printf("init hash entry: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
- new_node->p.reverse_hash =
- bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
- }
-}
-
/*
* Holding RCU read lock to protect _cds_lfht_add against memory
* reclaim that could be performed by other call_rcu worker threads (ABA
* problem).
+ *
+ * TODO: when we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
*/
static
-void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len)
+void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len)
{
unsigned long j;
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
- dbg_printf("init link: i %lu j %lu hash %lu\n",
+ dbg_printf("init populate: i %lu j %lu hash %lu\n",
i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ new_node->p.reverse_hash =
+ bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, 0, 1);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
+ (len * sizeof(struct _cds_lfht_node)));
- /* Set all dummy nodes reverse hash values for a level */
- init_table_hash(ht, i, len);
-
/*
- * Link all dummy nodes into the table. Concurrent
- * add/remove are helping us.
+ * Set all dummy nodes reverse hash values for a level and
+ * link all dummy nodes into the table.
*/
- init_table_link(ht, i, len);
+ init_table_populate(ht, i, len);
/*
* Update table size.
*
* Logical removal and garbage collection can therefore be done in batch or on a
* node-per-node basis, as long as the guarantee above holds.
+ *
+ * TODO: when we reach a certain length, we can split this removal over many
+ * worker threads, based on the number of CPUs available in the system. This
+ * should take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
*/
static
void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
}
}
-struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
cds_lfht_compare_fct compare_fct,
unsigned long hash_seed,
unsigned long init_size,