void (*cds_lfht_rcu_register_thread)(void);
void (*cds_lfht_rcu_unregister_thread)(void);
pthread_attr_t *resize_attr; /* Resize threads attributes */
- unsigned long count; /* global approximate item count */
+ long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
return;
percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- unsigned long count;
+ long count;
dbg_printf("add percpu %lu\n", percpu_count);
count = uatomic_add_return(&ht->count,
if (!(count & (count - 1))) {
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
return;
- dbg_printf("add set global %lu\n", count);
+ dbg_printf("add set global %ld\n", count);
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
return;
percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- unsigned long count;
+ long count;
dbg_printf("del percpu %lu\n", percpu_count);
count = uatomic_add_return(&ht->count,
if (!(count & (count - 1))) {
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
return;
- dbg_printf("del set global %lu\n", count);
+ dbg_printf("del set global %ld\n", count);
+ /*
+ * Don't shrink table if the number of nodes is below a
+ * certain threshold.
+ */
+ if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1))
+ return;
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
}
void cds_lfht_count_nodes(struct cds_lfht *ht,
- unsigned long *approx_before,
+ long *approx_before,
unsigned long *count,
unsigned long *removed,
- unsigned long *approx_after)
+ long *approx_after)
{
struct cds_lfht_node *node, *next;
struct _cds_lfht_node *lookup;