+ LIST_HEAD(qsreaders);
+ int wait_loops = 0;
+ struct rcu_reader *index, *tmp;
+
+#if (BITS_PER_LONG < 64)
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+#else /* !(BITS_PER_LONG < 64) */
+ /* Increment current G.P. */
+ STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+#endif /* !(BITS_PER_LONG < 64) */
+
+ /*
+ * Wait for each thread rcu_reader_qs_gp count to become 0.
+ */
+ for (;;) {
+ wait_loops++;
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ uatomic_dec(&gp_futex);
+ /* Write futex before read reader_gp */
+ smp_mb();
+ }
+
+ list_for_each_entry_safe(index, tmp, ®istry, head) {
+ if (!rcu_gp_ongoing(&index->ctr))
+ list_move(&index->head, &qsreaders);
+ }
+
+ if (list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ smp_mb();
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ wait_gp();
+ } else {
+#ifndef HAS_INCOHERENT_CACHES
+ cpu_relax();
+#else /* #ifndef HAS_INCOHERENT_CACHES */
+ smp_mb();
+#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ }
+ }
+ /* put back the reader list in the registry */
+ list_splice(&qsreaders, ®istry);