#include <signal.h>
#include <assert.h>
#include <stdlib.h>
+#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <poll.h>
#include <sys/time.h>
-#include <syscall.h>
#include <unistd.h>
#include <sched.h>
#include "urcu-call-rcu.h"
#include "urcu-pointer.h"
#include "urcu/list.h"
-#include "urcu/urcu-futex.h"
+#include "urcu/futex.h"
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
struct cds_wfq_queue cbs;
unsigned long flags;
- pthread_mutex_t mtx;
- int futex;
+ int32_t futex;
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
static struct call_rcu_data **per_cpu_call_rcu_data;
static long maxcpus;
-static void call_rcu_wait(struct call_rcu_data *crdp)
-{
- /* Read call_rcu list before read futex */
- cmm_smp_mb();
- if (uatomic_read(&crdp->futex) == -1)
- futex_async(&crdp->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
-}
-
-static void call_rcu_wake_up(struct call_rcu_data *crdp)
-{
- /* Write to call_rcu list before reading/writing futex */
- cmm_smp_mb();
- if (unlikely(uatomic_read(&crdp->futex) == -1)) {
- uatomic_set(&crdp->futex, 0);
- futex_async(&crdp->futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
- }
-}
-
/* Allocate the array if it has not already been allocated. */
static void alloc_cpu_call_rcu_data(void)
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const struct call_rcu_data **per_cpu_call_rcu_data = NULL;
+/*
+ * per_cpu_call_rcu_data should be constant, but some functions below, used both
+ * for cases where cpu number is available and not available, assume it it not
+ * constant.
+ */
+static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
static const long maxcpus = -1;
static void alloc_cpu_call_rcu_data(void)
}
#endif
+static void call_rcu_wait(struct call_rcu_data *crdp)
+{
+ /* Read call_rcu list before read futex */
+ cmm_smp_mb();
+ if (uatomic_read(&crdp->futex) == -1)
+ futex_async(&crdp->futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0);
+}
+
+static void call_rcu_wake_up(struct call_rcu_data *crdp)
+{
+ /* Write to call_rcu list before reading/writing futex */
+ cmm_smp_mb();
+ if (unlikely(uatomic_read(&crdp->futex) == -1)) {
+ uatomic_set(&crdp->futex, 0);
+ futex_async(&crdp->futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0);
+ }
+}
+
/* This is the code run by each call_rcu thread. */
static void *call_rcu_thread(void *arg)
struct cds_wfq_node **cbs_tail;
struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
struct rcu_head *rhp;
+ int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
if (set_thread_cpu_affinity(crdp) != 0) {
perror("pthread_setaffinity_np");
exit(-1);
}
+ /*
+ * If callbacks take a read-side lock, we need to be registered.
+ */
+ rcu_register_thread();
+
thread_call_rcu_data = crdp;
+ if (!rt) {
+ uatomic_dec(&crdp->futex);
+ /* Decrement futex before reading call_rcu list */
+ cmm_smp_mb();
+ }
for (;;) {
- if (!(crdp->flags & URCU_CALL_RCU_RT)) {
- uatomic_dec(&crdp->futex);
- /* Decrement futex before reading call_rcu list */
- cmm_smp_mb();
- }
if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
poll(NULL, 0, 1);
} while (cbs != NULL);
uatomic_sub(&crdp->qlen, cbcount);
}
- if (crdp->flags & URCU_CALL_RCU_STOP) {
- if (!(crdp->flags & URCU_CALL_RCU_RT)) {
+ if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
+ break;
+ rcu_thread_offline();
+ if (!rt) {
+ if (&crdp->cbs.head
+ == _CMM_LOAD_SHARED(crdp->cbs.tail)) {
+ call_rcu_wait(crdp);
+ poll(NULL, 0, 10);
+ uatomic_dec(&crdp->futex);
/*
- * Read call_rcu list before write futex.
+ * Decrement futex before reading
+ * call_rcu list.
*/
cmm_smp_mb();
- uatomic_set(&crdp->futex, 0);
+ } else {
+ poll(NULL, 0, 10);
}
- break;
- }
- if (!(crdp->flags & URCU_CALL_RCU_RT)) {
- if (&crdp->cbs.head == _CMM_LOAD_SHARED(crdp->cbs.tail))
- call_rcu_wait(crdp);
+ } else {
+ poll(NULL, 0, 10);
}
- poll(NULL, 0, 10);
+ rcu_thread_online();
+ }
+ if (!rt) {
+ /*
+ * Read call_rcu list before write futex.
+ */
+ cmm_smp_mb();
+ uatomic_set(&crdp->futex, 0);
}
- call_rcu_lock(&crdp->mtx);
- crdp->flags |= URCU_CALL_RCU_STOPPED;
- call_rcu_unlock(&crdp->mtx);
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
+ rcu_unregister_thread();
return NULL;
}
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
- if (pthread_mutex_init(&crdp->mtx, NULL) != 0) {
- perror("pthread_mutex_init");
- exit(-1);
- }
crdp->futex = 0;
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
{
- int warned = 0;
+ static int warned = 0;
call_rcu_lock(&call_rcu_mutex);
+ alloc_cpu_call_rcu_data();
if (cpu < 0 || maxcpus <= cpu) {
if (!warned) {
fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
errno = EINVAL;
return -EINVAL;
}
- alloc_cpu_call_rcu_data();
- call_rcu_unlock(&call_rcu_mutex);
+
if (per_cpu_call_rcu_data == NULL) {
+ call_rcu_unlock(&call_rcu_mutex);
errno = ENOMEM;
return -ENOMEM;
}
+
+ if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
+ call_rcu_unlock(&call_rcu_mutex);
+ errno = EEXIST;
+ return -EEXIST;
+ }
+
per_cpu_call_rcu_data[cpu] = crdp;
+ call_rcu_unlock(&call_rcu_mutex);
return 0;
}
*/
struct call_rcu_data *get_call_rcu_data(void)
{
- int curcpu;
- static int warned = 0;
+ struct call_rcu_data *crd;
if (thread_call_rcu_data != NULL)
return thread_call_rcu_data;
- if (maxcpus <= 0)
- return get_default_call_rcu_data();
- curcpu = sched_getcpu();
- if (!warned && (curcpu < 0 || maxcpus <= curcpu)) {
- fprintf(stderr, "[error] liburcu: gcrd CPU # out of range\n");
- warned = 1;
+
+ if (maxcpus > 0) {
+ crd = get_cpu_call_rcu_data(sched_getcpu());
+ if (crd)
+ return crd;
}
- if (curcpu >= 0 && maxcpus > curcpu &&
- per_cpu_call_rcu_data != NULL &&
- per_cpu_call_rcu_data[curcpu] != NULL)
- return per_cpu_call_rcu_data[curcpu];
+
return get_default_call_rcu_data();
}
/*
* Create a separate call_rcu thread for each CPU. This does not
* replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
- * function if you want that behavior.
+ * function if you want that behavior. Should be paired with
+ * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
+ * threads.
*/
int create_all_cpu_call_rcu_data(unsigned long flags)
}
call_rcu_unlock(&call_rcu_mutex);
if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
- /* FIXME: Leaks crdp for now. */
- return ret; /* Can happen on race. */
+ call_rcu_data_free(crdp);
+
+ /* it has been created by other thread */
+ if (ret == -EEXIST)
+ continue;
+
+ return ret;
}
}
return 0;
if (crdp == NULL || crdp == default_call_rcu_data) {
return;
}
- if ((crdp->flags & URCU_CALL_RCU_STOPPED) == 0) {
- call_rcu_lock(&crdp->mtx);
- crdp->flags |= URCU_CALL_RCU_STOP;
- call_rcu_unlock(&crdp->mtx);
+ if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
wake_call_rcu_thread(crdp);
- while ((crdp->flags & URCU_CALL_RCU_STOPPED) == 0)
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
poll(NULL, 0, 1);
}
if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
*cbs_endprev = cbs;
uatomic_add(&default_call_rcu_data->qlen,
uatomic_read(&crdp->qlen));
- cds_list_del(&crdp->list);
- free(crdp);
+ wake_call_rcu_thread(default_call_rcu_data);
}
+
+ cds_list_del(&crdp->list);
+ free(crdp);
}
/*
*/
void call_rcu_after_fork_child(void)
{
- struct call_rcu_data *crdp;
+ struct call_rcu_data *crdp, *next;
/* Release the mutex. */
call_rcu_unlock(&call_rcu_mutex);
(void)get_default_call_rcu_data();
/* Dispose of all of the rest of the call_rcu_data structures. */
- while (call_rcu_data_list.next != call_rcu_data_list.prev) {
- crdp = cds_list_entry(call_rcu_data_list.prev,
- struct call_rcu_data, list);
+ cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)
- crdp = cds_list_entry(crdp->list.prev,
- struct call_rcu_data, list);
- crdp->flags = URCU_CALL_RCU_STOPPED;
+ continue;
+ uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
call_rcu_data_free(crdp);
}
}