#include <unistd.h>
#include "compat-getcpu.h"
-#include <urcu-pointer.h>
-#include <urcu-call-rcu.h>
-#include <urcu-flavor.h>
+#include <urcu/pointer.h>
+#include <urcu/call-rcu.h>
+#include <urcu/flavor.h>
#include <urcu/arch.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
static void cds_lfht_init_worker(const struct rcu_flavor_struct *flavor);
static void cds_lfht_fini_worker(const struct rcu_flavor_struct *flavor);
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ iter->lfht = ht;
+}
+
+#define cds_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
+
+#else
+
+static
+void cds_lfht_iter_debug_set_ht(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+}
+
+#define cds_lfht_iter_debug_assert(...)
+
+#endif
+
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
if (unique_ret
&& !is_bucket(next)
&& clear_flag(iter)->reverse_hash == node->reverse_hash) {
- struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+ struct cds_lfht_iter d_iter = {
+ .node = node,
+ .next = iter,
+#ifdef CONFIG_CDS_LFHT_ITER_DEBUG
+ .lfht = ht,
+#endif
+ };
/*
* uniquely adding inserts the node as the first
}
}
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+ return &cds_lfht_mm_mmap;
+ else
+ return &cds_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ return &cds_lfht_mm_order;
+}
+#endif
+
struct cds_lfht *_cds_lfht_new(unsigned long init_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets,
/*
* Memory management plugin default.
*/
- if (!mm) {
- if (CAA_BITS_PER_LONG > 32
- && max_nr_buckets
- && max_nr_buckets <= (1ULL << 32)) {
- /*
- * For 64-bit architectures, with max number of
- * buckets small enough not to use the entire
- * 64-bit memory mapping space (and allowing a
- * fair number of hash table instances), use the
- * mmap allocator, which is faster than the
- * order allocator.
- */
- mm = &cds_lfht_mm_mmap;
- } else {
- /*
- * The fallback is to use the order allocator.
- */
- mm = &cds_lfht_mm_order;
- }
- }
+ if (!mm)
+ mm = get_mm_type(max_nr_buckets);
/* max_nr_buckets == 0 for order based mm means infinite */
if (mm == &cds_lfht_mm_order && !max_nr_buckets)
struct cds_lfht_node *node, *next, *bucket;
unsigned long reverse_hash, size;
+ cds_lfht_iter_debug_set_ht(ht, iter);
+
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->size);
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
+ cds_lfht_iter_debug_assert(ht == iter->lfht);
node = iter->node;
reverse_hash = node->reverse_hash;
next = iter->next;
{
struct cds_lfht_node *node, *next;
+ cds_lfht_iter_debug_assert(ht == iter->lfht);
node = clear_flag(iter->next);
for (;;) {
if (caa_unlikely(is_end(node))) {
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
+ cds_lfht_iter_debug_set_ht(ht, iter);
/*
* Get next after first bucket node. The first bucket node is the
* first node of the linked list.