+/*
+ * urcu.c
+ *
+ * Userspace RCU library
+ *
+ * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ *
+ * Distributed under GPLv2
+ */
+
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
#include "urcu.h"
pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER;
-/* Global quiescent period parity */
-int urcu_qparity;
+/* Global grace period counter */
+int urcu_gp_ctr;
-int __thread urcu_active_readers[2];
+int __thread urcu_active_readers;
/* Thread IDs of registered readers */
#define INIT_NUM_THREADS 4
struct reader_data {
pthread_t tid;
- int **urcu_active_readers;
+ int *urcu_active_readers;
};
+#ifdef DEBUG_YIELD
+unsigned int yield_active;
+unsigned int __thread rand_yield;
+#endif
+
static struct reader_data *reader_data;
static int num_readers, alloc_readers;
static int sig_done;
+void internal_urcu_lock(void)
+{
+ int ret;
+ ret = pthread_mutex_lock(&urcu_mutex);
+ if (ret) {
+ perror("Error in pthread mutex lock");
+ exit(-1);
+ }
+}
+
+void internal_urcu_unlock(void)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(&urcu_mutex);
+ if (ret) {
+ perror("Error in pthread mutex unlock");
+ exit(-1);
+ }
+}
+
/*
* called with urcu_mutex held.
*/
-static int switch_next_urcu_qparity(void)
+static void switch_next_urcu_qparity(void)
{
- int old_parity = urcu_qparity;
- urcu_qparity = 1 - old_parity;
- return old_parity;
+ urcu_gp_ctr ^= RCU_GP_CTR_BIT;
}
static void force_mb_all_threads(void)
{
- pthread_t *index;
+ struct reader_data *index;
/*
* Ask for each threads to execute a mb() so we can consider the
* compiler barriers around rcu read lock as real memory barriers.
*/
if (!reader_data)
return;
- sigtask = TASK_FORCE_MB;
+ debug_yield_write();
sig_done = 0;
- mb(); /* write sig_done and sigtask before sending the signals */
- for (index = reader_data; index < reader_data + num_readers; index++)
- pthread_kill(*index, SIGURCU);
+ debug_yield_write();
+ mb(); /* write sig_done before sending the signals */
+ debug_yield_write();
+ for (index = reader_data; index < reader_data + num_readers; index++) {
+ pthread_kill(index->tid, SIGURCU);
+ debug_yield_write();
+ }
/*
* Wait for sighandler (and thus mb()) to execute on every thread.
* BUSY-LOOP.
*/
while (sig_done < num_readers)
barrier();
- mb(); /* read sig_done before writing sigtask */
- sigtask = TASK_NONE;
+ debug_yield_write();
+ mb(); /* read sig_done before ending the barrier */
+ debug_yield_write();
}
-void wait_for_quiescent_state(int parity)
+void wait_for_quiescent_state(void)
{
+ struct reader_data *index;
if (!reader_data)
return;
/* Wait for each thread urcu_active_readers count to become 0.
*/
- for (index = readers_data; index < reader_data + num_readers; index++) {
+ for (index = reader_data; index < reader_data + num_readers; index++) {
/*
* BUSY-LOOP.
*/
- while (*index->urcu_active_readers != 0)
+ while (rcu_old_gp_ongoing(index->urcu_active_readers))
barrier();
}
/*
force_mb_all_threads();
}
+static void switch_qparity(void)
+{
+ /* All threads should read qparity before accessing data structure. */
+ /* Write ptr before changing the qparity */
+ force_mb_all_threads();
+ debug_yield_write();
+ switch_next_urcu_qparity();
+ debug_yield_write();
+
+ /*
+ * Wait for previous parity to be empty of readers.
+ */
+ wait_for_quiescent_state();
+}
+
+void synchronize_rcu(void)
+{
+ debug_yield_write();
+ internal_urcu_lock();
+ debug_yield_write();
+ switch_qparity();
+ debug_yield_write();
+ switch_qparity();
+ debug_yield_write();
+ internal_urcu_lock();
+ debug_yield_write();
+}
+
/*
* Return old pointer, OK to free, no more reference exist.
+ * Called under rcu_write_lock.
*/
void *urcu_publish_content(void **ptr, void *new)
{
- int ret, prev_parity;
void *oldptr;
- ret = pthread_mutex_lock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex lock", __func__);
- exit(-1);
- }
-
+ debug_yield_write();
+ internal_urcu_lock();
+ debug_yield_write();
/*
* We can publish the new pointer before we change the current qparity.
* Readers seeing the new pointer while being in the previous qparity
* when the next quiescent state window will be over.
*/
oldptr = *ptr;
- *ptr = new;
- wmb(); /* Write ptr before changing the qparity */
- /* All threads should read qparity before ptr */
- force_rmb_all_threads();
- prev_parity = switch_next_urcu_qparity();
+ debug_yield_write();
+ rcu_assign_pointer(*ptr, new);
+
+ debug_yield_write();
+ switch_qparity();
+ debug_yield_write();
+ switch_qparity();
+ debug_yield_write();
+ internal_urcu_unlock();
+ debug_yield_write();
- /*
- * Wait for previous parity to be empty of readers.
- */
- wait_for_quiescent_state(prev_parity);
- /*
- * Deleting old data is ok !
- */
-
- ret = pthread_mutex_unlock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex lock", __func__);
- exit(-1);
- }
return oldptr;
}
void urcu_add_reader(pthread_t id)
{
+ struct reader_data *oldarray;
+
if (!reader_data) {
alloc_readers = INIT_NUM_THREADS;
- num_readers = 1;
+ num_readers = 0;
reader_data =
malloc(sizeof(struct reader_data) * alloc_readers);
- return;
}
if (alloc_readers < num_readers + 1) {
- pthread_t *oldarray;
oldarray = reader_data;
reader_data = malloc(sizeof(struct reader_data)
* (alloc_readers << 1));
}
reader_data[num_readers].tid = id;
/* reference to the TLS of _this_ reader thread. */
- reader_data[num_readers].urcu_active_readers = urcu_active_readers;
+ reader_data[num_readers].urcu_active_readers = &urcu_active_readers;
num_readers++;
}
assert(reader_data != NULL);
for (index = reader_data; index < reader_data + num_readers; index++) {
- if (index->tid == id) {
+ if (pthread_equal(index->tid, id)) {
memcpy(index, &reader_data[num_readers - 1],
sizeof(struct reader_data));
reader_data[num_readers - 1].tid = 0;
void urcu_register_thread(void)
{
- pthread_t self = pthread_self();
-
- ret = pthread_mutex_lock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex lock", __func__);
- exit(-1);
- }
-
- urcu_add_reader(self);
-
-
- ret = pthread_mutex_unlock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex unlock", __func__);
- exit(-1);
- }
+ internal_urcu_lock();
+ urcu_add_reader(pthread_self());
+ internal_urcu_unlock();
}
-void urcu_register_thread(void)
+void urcu_unregister_thread(void)
{
- pthread_t self = pthread_self();
-
- ret = pthread_mutex_lock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex lock", __func__);
- exit(-1);
- }
-
- urcu_remove_reader(self);
-
- ret = pthread_mutex_unlock(&urcu_mutex);
- if (ret) {
- perror("Error in %s pthread mutex unlock", __func__);
- exit(-1);
- }
-
+ internal_urcu_lock();
+ urcu_remove_reader(pthread_self());
+ internal_urcu_unlock();
}
-void handler(int signo, siginfo_t *siginfo, void *context)
+void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)
{
mb();
atomic_inc(&sig_done);
act.sa_sigaction = sigurcu_handler;
ret = sigaction(SIGURCU, &act, NULL);
- if (!ret) {
- perror("Error in %s sigaction", __func__);
+ if (ret) {
+ perror("Error in sigaction");
exit(-1);
}
}
int ret;
ret = sigaction(SIGURCU, NULL, &act);
- if (!ret) {
- perror("Error in %s sigaction", __func__);
+ if (ret) {
+ perror("Error in sigaction");
exit(-1);
}
assert(act.sa_sigaction == sigurcu_handler);