+ for (;;) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_dec(&gp_futex);
+ /* Write futex before read reader_gp */
+ smp_mb_master(RCU_MB_GROUP);
+ }
+
+ cds_list_for_each_entry_safe(index, tmp, ®istry, node) {
+ if (!rcu_gp_ongoing(&index->ctr))
+ cds_list_move(&index->node, &qsreaders);
+ }
+
+#ifndef HAS_INCOHERENT_CACHES
+ if (cds_list_empty(®istry)) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ smp_mb_master(RCU_MB_GROUP);
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* wait_gp unlocks/locks registry lock. */
+ wait_gp();
+ } else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
+ caa_cpu_relax();
+ /*
+ * Re-lock the registry lock before the
+ * next loop.
+ */
+ mutex_lock(&rcu_registry_lock);
+ }
+ }
+#else /* #ifndef HAS_INCOHERENT_CACHES */
+ /*
+ * BUSY-LOOP. Force the reader thread to commit its
+ * URCU_TLS(rcu_reader).ctr update to memory if we wait
+ * for too long.
+ */
+ if (cds_list_empty(®istry)) {
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ smp_mb_master(RCU_MB_GROUP);
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_gp_loops == KICK_READER_LOOPS) {
+ smp_mb_master(RCU_MB_GROUP);
+ wait_gp_loops = 0;
+ }
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
+ /* wait_gp unlocks/locks registry lock. */
+ wait_gp();
+ wait_gp_loops++;
+ } else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
+ caa_cpu_relax();
+ /*
+ * Re-lock the registry lock before the
+ * next loop.
+ */
+ mutex_lock(&rcu_registry_lock);
+ }
+ }
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ /* put back the reader list in the registry */
+ cds_list_splice(&qsreaders, ®istry);