#include <string.h>
#include <errno.h>
#include <poll.h>
+#include <unistd.h>
#define BUILD_QSBR_LIB
#include "urcu-qsbr-static.h"
/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
#include "urcu-qsbr.h"
-pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
/*
* Global grace period counter.
* Written to only by each individual reader. Read by both the reader and the
* writers.
*/
-unsigned long __thread rcu_reader_qs_gp;
+struct urcu_reader_status __thread urcu_reader_status;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
struct reader_registry {
pthread_t tid;
- unsigned long *rcu_reader_qs_gp;
+ struct urcu_reader_status *urcu_reader_status;
};
#ifdef DEBUG_YIELD
}
}
+/*
+ * synchronize_rcu() waiting. Single thread.
+ */
static void wait_for_quiescent_state(void)
{
struct reader_registry *index;
if (!registry)
return;
/*
- * Wait for each thread rcu_reader_qs_gp count to become 0.
+ * Wait for each thread rcu_reader qs_gp count to become 0.
*/
for (index = registry; index < registry + num_readers; index++) {
+ int wait_loops = 0;
+
+ if (likely(!rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)))
+ continue;
+
+ index->urcu_reader_status->gp_waiting = 1;
+ while (rcu_gp_ongoing(&index->urcu_reader_status->qs_gp)) {
+ if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) {
+ /* adapted wait time, in us */
+ usleep(LOAD_SHARED(index->urcu_reader_status->qs_time_delta_usec) / 4);
+ wait_loops = 0;
+ } else {
#ifndef HAS_INCOHERENT_CACHES
- while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
- cpu_relax();
+ cpu_relax();
#else /* #ifndef HAS_INCOHERENT_CACHES */
- while (rcu_gp_ongoing(index->rcu_reader_qs_gp))
- smp_mb();
+ smp_mb();
#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
+ }
+ }
+ index->urcu_reader_status->gp_waiting = 0;
}
}
{
unsigned long was_online;
- was_online = rcu_reader_qs_gp;
+ was_online = urcu_reader_status.qs_gp;
/* All threads should read qparity before accessing data structure
* where new ptr points to.
* threads registered as readers.
*/
if (was_online)
- STORE_SHARED(rcu_reader_qs_gp, 0);
+ STORE_SHARED(urcu_reader_status.qs_gp, 0);
internal_urcu_lock();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
+
switch_next_urcu_qparity(); /* 0 -> 1 */
/*
*/
wait_for_quiescent_state(); /* Wait readers in parity 1 */
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
+
internal_urcu_unlock();
/*
* freed.
*/
if (was_online)
- _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(urcu_reader_status.qs_gp,
+ LOAD_SHARED(urcu_gp_ctr));
smp_mb();
}
#else /* !(BITS_PER_LONG < 64) */
{
unsigned long was_online;
- was_online = rcu_reader_qs_gp;
+ was_online = urcu_reader_status.qs_gp;
/*
* Mark the writer thread offline to make sure we don't wait for
*/
smp_mb();
if (was_online)
- STORE_SHARED(rcu_reader_qs_gp, 0);
+ STORE_SHARED(urcu_reader_status.qs_gp, 0);
internal_urcu_lock();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR);
wait_for_quiescent_state();
+ STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_ONGOING);
internal_urcu_unlock();
if (was_online)
- _STORE_SHARED(rcu_reader_qs_gp, LOAD_SHARED(urcu_gp_ctr));
+ _STORE_SHARED(urcu_reader_status.qs_gp,
+ LOAD_SHARED(urcu_gp_ctr));
smp_mb();
}
#endif /* !(BITS_PER_LONG < 64) */
return STORE_SHARED(p, v);
}
+void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ wmb();
+ return cmpxchg(p, old, _new);
+}
+
void *rcu_xchg_pointer_sym(void **p, void *v)
{
wmb();
}
registry[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp;
+ registry[num_readers].urcu_reader_status = &urcu_reader_status;
num_readers++;
}
memcpy(index, ®istry[num_readers - 1],
sizeof(struct reader_registry));
registry[num_readers - 1].tid = 0;
- registry[num_readers - 1].rcu_reader_qs_gp = NULL;
+ registry[num_readers - 1].urcu_reader_status = NULL;
num_readers--;
return;
}