}
#endif
-unsigned int fls_ulong(unsigned long x)
+unsigned int cds_lfht_fls_ulong(unsigned long x)
{
#if (CAA_BITS_PER_LONG == 32)
return fls_u32(x);
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
-int get_count_order_u32(uint32_t x)
+int cds_lfht_get_count_order_u32(uint32_t x)
{
if (!x)
return -1;
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
-int get_count_order_ulong(unsigned long x)
+int cds_lfht_get_count_order_ulong(unsigned long x)
{
if (!x)
return -1;
- return fls_ulong(x - 1);
+ return cds_lfht_fls_ulong(x - 1);
}
static
* round up number of CPUs to next power of two, so we
* can use & for modulo.
*/
- maxcpus = 1UL << get_count_order_ulong(maxcpus);
+ maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
nr_cpus_mask = maxcpus - 1;
}
#else /* #if defined(HAVE_SYSCONF) */
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
cds_lfht_resize_lazy_grow(ht, size,
- get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+ cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
}
static
new_next = clear_flag(next);
(void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
}
- return;
}
static
} else {
nr_threads = 1;
}
- partition_len = len >> get_count_order_ulong(nr_threads);
+ partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
assert(work);
for (thread = 0; thread < nr_threads; thread++) {
assert(i > MIN_TABLE_ORDER);
ht->flavor->read_lock();
for (j = size + start; j < size + start + len; j++) {
- struct cds_lfht_node *fini_node = bucket_at(ht, j);
+ struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
+ struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
assert(j >= size && j < (size << 1));
dbg_printf("remove entry: order %lu index %lu hash %lu\n",
i, j, j);
- fini_node->reverse_hash = bit_reverse_ulong(j);
- (void) _cds_lfht_del(ht, size, fini_node, 1);
+ /* Set the REMOVED_FLAG to freeze the ->next for gc */
+ uatomic_or(&fini_bucket->next, REMOVED_FLAG);
+ _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
}
ht->flavor->read_unlock();
}
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
- for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
+ for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
alloc_split_items_count(ht);
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(init_size);
+ order = cds_lfht_get_count_order_ulong(init_size);
ht->resize_target = 1UL << order;
cds_lfht_create_bucket(ht, 1UL << order);
ht->size = 1UL << order;
assert(is_bucket(node->next));
}
- for (order = get_count_order_ulong(size); (long)order >= 0; order--)
+ for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
cds_lfht_free_bucket_table(ht, order);
return 0;
{
unsigned long old_order, new_order;
- old_order = get_count_order_ulong(old_size);
- new_order = get_count_order_ulong(new_size);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size > old_size);
unsigned long old_order, new_order;
new_size = max(new_size, MIN_TABLE_SIZE);
- old_order = get_count_order_ulong(old_size);
- new_order = get_count_order_ulong(new_size);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size < old_size);