-/*
- * Never shrink (implementation limitation).
- * This is O(nb threads). Eventually use a hash table.
- */
-void urcu_remove_reader(pthread_t id)
-{
- struct reader_data *index;
-
- assert(reader_data != NULL);
- for (index = reader_data; index < reader_data + num_readers; index++) {
- if (index->tid == id) {
- memcpy(index, &reader_data[num_readers - 1],
- sizeof(struct reader_data));
- reader_data[num_readers - 1].tid = 0;
- reader_data[num_readers - 1].urcu_active_readers = NULL;
- num_readers--;
- return;
+#ifndef HAS_INCOHERENT_CACHES
+ if (cds_list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ smp_mb_master(RCU_MB_GROUP);
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
+ wait_gp();
+ else
+ caa_cpu_relax();
+ }
+#else /* #ifndef HAS_INCOHERENT_CACHES */
+ /*
+ * BUSY-LOOP. Force the reader thread to commit its
+ * rcu_reader.ctr update to memory if we wait for too long.
+ */
+ if (cds_list_empty(®istry)) {
+ if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* Read reader_gp before write futex */
+ smp_mb_master(RCU_MB_GROUP);
+ uatomic_set(&gp_futex, 0);
+ }
+ break;
+ } else {
+ switch (wait_loops) {
+ case RCU_QS_ACTIVE_ATTEMPTS:
+ wait_gp();
+ break; /* only escape switch */
+ case KICK_READER_LOOPS:
+ smp_mb_master(RCU_MB_GROUP);
+ wait_loops = 0;
+ break; /* only escape switch */
+ default:
+ caa_cpu_relax();
+ }