- if (!reader_data) {
- alloc_readers = INIT_NUM_THREADS;
- num_readers = 0;
- reader_data =
- malloc(sizeof(struct reader_data) * alloc_readers);
- }
- if (alloc_readers < num_readers + 1) {
- oldarray = reader_data;
- reader_data = malloc(sizeof(struct reader_data)
- * (alloc_readers << 1));
- memcpy(reader_data, oldarray,
- sizeof(struct reader_data) * alloc_readers);
- alloc_readers <<= 1;
- free(oldarray);
- }
- reader_data[num_readers].tid = id;
- /* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
- num_readers++;
+ if (cds_list_empty(®istry))
+ goto out;
+
+ /*
+ * All threads should read qparity before accessing data structure
+ * where new ptr points to. Must be done within rcu_registry_lock
+ * because it iterates on reader threads.
+ */
+ /* Write new ptr before changing the qparity */
+ smp_mb_master();
+
+ /*
+ * Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
+
+ /*
+ * Must finish waiting for quiescent state for original parity before
+ * committing next rcu_gp.ctr update to memory. Failure to do so could
+ * result in the writer waiting forever while new readers are always
+ * accessing data (no progress). Enforce compiler-order of load
+ * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr.
+ */
+ cmm_barrier();
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit rcu_gp.ctr update to memory before waiting for quiescent
+ * state. Failure to do so could result in the writer waiting forever
+ * while new readers are always accessing data (no progress). Enforce
+ * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr.
+ */
+ cmm_barrier();
+
+ /*
+ *
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /*
+ * Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
+
+ /*
+ * Finish waiting for reader threads before letting the old ptr
+ * being freed. Must be done within rcu_registry_lock because it
+ * iterates on reader threads.
+ */
+ smp_mb_master();
+out:
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+
+ /*
+ * Wakeup waiters only after we have completed the grace period
+ * and have ensured the memory barriers at the end of the grace
+ * period have been issued.
+ */
+ urcu_wake_all_waiters(&waiters);