+unsigned long ht_count_update(struct cds_lfht *ht, unsigned int value)
+{
+ int cpu;
+
+ if (unlikely(!ht->percpu_count))
+ return 0;
+ cpu = ht_get_cpu();
+ if (unlikely(cpu < 0))
+ return 0;
+ return uatomic_add_return(&ht->percpu_count[cpu].v, 1);
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+
+ percpu_count = ht_count_update(ht, 1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("add percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ dbg_printf("add global %lu\n", count);
+ cds_lfht_resize_lazy(ht, t, 1);
+ }
+ }
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+ unsigned long percpu_count;
+
+ percpu_count = ht_count_update(ht, -1);
+ if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+ unsigned long count;
+
+ dbg_printf("remove percpu %lu\n", percpu_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ /* If power of 2 */
+ if (!(count & (count - 1))) {
+ dbg_printf("remove global %lu\n", count);
+ cds_lfht_resize_lazy(ht, t, -1);
+ }
+ }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+ return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+
+static
+void ht_count_add(struct cds_lfht *ht)
+{
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht)
+{
+}
+
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+
+static
+void check_resize(struct cds_lfht *ht, struct rcu_table *t,