+ cmm_smp_mb();
+
+ /*
+ * Wait for each thread rcu_reader_qs_gp count to become 0.
+ */
+ for (;;) {
+ wait_loops++;
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_set(&gp_futex, -1);
+ /*
+ * Write futex before write waiting (the other side
+ * reads them in the opposite order).
+ */
+ cmm_smp_wmb();
+ cds_list_for_each_entry(index, ®istry, node) {
+ _CMM_STORE_SHARED(index->waiting, 1);
+ }
+ /* Write futex before read reader_gp */
+ cmm_smp_mb();
+ }
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
+ if (!rcu_gp_ongoing(&index->ctr))
+ cds_list_move(&index->node, &qsreaders);
+ }
+
+ if (cds_list_empty(®istry)) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ cmm_smp_mb();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ wait_gp();
+ } else {
+#ifndef HAS_INCOHERENT_CACHES
+ caa_cpu_relax();
+#else /* #ifndef HAS_INCOHERENT_CACHES */
+ cmm_smp_mb();
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ }