+/*
+ * Note: urcu_adaptative_wake_up needs "value" to stay allocated
+ * throughout its execution. In this scheme, the waiter owns the futex
+ * memory, and we only allow it to free this memory when it receives the
+ * AWAKE_TEARDOWN flag.
+ */
+static void urcu_adaptative_wake_up(int32_t *value)
+{
+ cmm_smp_mb();
+ assert(uatomic_read(value) == AWAKE_WAITING);
+ uatomic_set(value, AWAKE_WAKEUP);
+ if (!(uatomic_read(value) & AWAKE_AWAKENED))
+ futex_noasync(value, FUTEX_WAKE, 1, NULL, NULL, 0);
+ /* Allow teardown of "value" memory. */
+ uatomic_or(value, AWAKE_TEARDOWN);
+}
+
+/*
+ * Caller must initialize "value" to AWAKE_WAITING before passing its
+ * memory to waker thread.
+ */
+static void urcu_adaptative_busy_wait(int32_t *value)
+{
+ unsigned int i;
+
+ /* Load and test condition before read futex */
+ cmm_smp_rmb();
+ for (i = 0; i < RCU_AWAKE_ATTEMPTS; i++) {
+ if (uatomic_read(value) != AWAKE_WAITING)
+ goto skip_futex_wait;
+ caa_cpu_relax();
+ }
+ futex_noasync(value, FUTEX_WAIT, AWAKE_WAITING, NULL, NULL, 0);
+skip_futex_wait:
+
+ /* Tell waker thread than we are awakened. */
+ uatomic_or(value, AWAKE_AWAKENED);
+
+ /*
+ * Wait until waker thread lets us know it's ok to tear down
+ * memory allocated for value.
+ */
+ for (i = 0; i < RCU_AWAKE_ATTEMPTS; i++) {
+ if (uatomic_read(value) & AWAKE_TEARDOWN)
+ break;
+ caa_cpu_relax();
+ }
+ while (!(uatomic_read(value) & AWAKE_TEARDOWN))
+ poll(NULL, 0, 10);
+ assert(uatomic_read(value) & AWAKE_TEARDOWN);
+}
+
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)