#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
+#include "urcu-die.h"
+
+#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
+#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
/* Data structure that identifies a call_rcu thread. */
unsigned long qlen; /* maintained for debugging. */
pthread_t tid;
int cpu_affinity;
+ unsigned long gp_count;
struct cds_list_head list;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
/* Link a thread using call_rcu() to its call_rcu thread. */
-static __thread struct call_rcu_data *thread_call_rcu_data;
-
-/* Guard call_rcu thread creation. */
+static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
+/*
+ * Guard call_rcu thread creation and atfork handlers.
+ */
static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
/* If a given thread does not have its own call_rcu thread, this is default. */
* CPUs rather than only to specific threads.
*/
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+#ifdef HAVE_SCHED_GETCPU
+
+static int urcu_sched_getcpu(void)
+{
+ return sched_getcpu();
+}
+
+#else /* #ifdef HAVE_SCHED_GETCPU */
+
+static int urcu_sched_getcpu(void)
+{
+ return -1;
+}
+
+#endif /* #else #ifdef HAVE_SCHED_GETCPU */
+
+#if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU)
/*
* Pointer to array of pointers to per-CPU call_rcu_data structures
}
}
-#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
/*
* per_cpu_call_rcu_data should be constant, but some functions below, used both
{
}
-static int sched_getcpu(void)
-{
- return -1;
-}
-
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+#endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
/* Acquire the specified pthread mutex. */
static void call_rcu_lock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_lock(pmp) != 0) {
- perror("pthread_mutex_lock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_lock(pmp);
+ if (ret)
+ urcu_die(ret);
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_unlock(pmp) != 0) {
- perror("pthread_mutex_unlock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_unlock(pmp);
+ if (ret)
+ urcu_die(ret);
}
+/*
+ * Periodically retry setting CPU affinity if we migrate.
+ * Losing affinity can be caused by CPU hotunplug/hotplug, or by
+ * cpuset(7).
+ */
#if HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
cpu_set_t mask;
+ int ret;
if (crdp->cpu_affinity < 0)
return 0;
+ if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
+ return 0;
+ if (urcu_sched_getcpu() == crdp->cpu_affinity)
+ return 0;
CPU_ZERO(&mask);
CPU_SET(crdp->cpu_affinity, &mask);
#if SCHED_SETAFFINITY_ARGS == 2
- return sched_setaffinity(0, &mask);
+ ret = sched_setaffinity(0, &mask);
#else
- return sched_setaffinity(0, sizeof(mask), &mask);
+ ret = sched_setaffinity(0, sizeof(mask), &mask);
#endif
+ /*
+ * EINVAL is fine: can be caused by hotunplugged CPUs, or by
+ * cpuset(7). This is why we should always retry if we detect
+ * migration.
+ */
+ if (ret && errno == EINVAL) {
+ ret = 0;
+ errno = 0;
+ }
+ return ret;
}
#else
static
{
/* Read call_rcu list before read futex */
cmm_smp_mb();
- if (uatomic_read(&crdp->futex) == -1)
- futex_async(&crdp->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0);
+ if (uatomic_read(&crdp->futex) != -1)
+ return;
+ while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
+ NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ return;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ default:
+ /* Unexpected error. */
+ urcu_die(errno);
+ }
+ }
}
static void call_rcu_wake_up(struct call_rcu_data *crdp)
cmm_smp_mb();
if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
uatomic_set(&crdp->futex, 0);
- futex_async(&crdp->futex, FUTEX_WAKE, 1,
- NULL, NULL, 0);
+ if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
+ NULL, NULL, 0) < 0)
+ urcu_die(errno);
}
}
struct rcu_head *rhp;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
- if (set_thread_cpu_affinity(crdp) != 0) {
- perror("pthread_setaffinity_np");
- exit(-1);
- }
+ if (set_thread_cpu_affinity(crdp))
+ urcu_die(errno);
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
cmm_smp_mb();
}
for (;;) {
+ if (set_thread_cpu_affinity(crdp))
+ urcu_die(errno);
+
+ if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
+ /*
+ * Pause requested. Become quiescent: remove
+ * ourself from all global lists, and don't
+ * process any callback. The callback lists may
+ * still be non-empty though.
+ */
+ rcu_unregister_thread();
+ cmm_smp_mb__before_uatomic_or();
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
+ poll(NULL, 0, 1);
+ uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
+ cmm_smp_mb__after_uatomic_and();
+ rcu_register_thread();
+ }
+
if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
poll(NULL, 0, 1);
int cpu_affinity)
{
struct call_rcu_data *crdp;
+ int ret;
crdp = malloc(sizeof(*crdp));
- if (crdp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(-1);
- }
+ if (crdp == NULL)
+ urcu_die(errno);
memset(crdp, '\0', sizeof(*crdp));
cds_wfq_init(&crdp->cbs);
crdp->qlen = 0;
crdp->flags = flags;
cds_list_add(&crdp->list, &call_rcu_data_list);
crdp->cpu_affinity = cpu_affinity;
+ crdp->gp_count = 0;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
- if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
- perror("pthread_create");
- exit(-1);
- }
+ ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
+ if (ret)
+ urcu_die(ret);
}
/*
* Return a pointer to the call_rcu_data structure for the specified
* CPU, returning NULL if there is none. We cannot automatically
* created it because the platform we are running on might not define
- * sched_getcpu().
+ * urcu_sched_getcpu().
*
* The call to this function and use of the returned call_rcu_data
* should be protected by RCU read-side lock.
{
struct call_rcu_data *crd;
- if (thread_call_rcu_data != NULL)
- return thread_call_rcu_data;
+ if (URCU_TLS(thread_call_rcu_data) != NULL)
+ return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
- crd = get_cpu_call_rcu_data(sched_getcpu());
+ crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
if (crd)
return crd;
}
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return thread_call_rcu_data;
+ return URCU_TLS(thread_call_rcu_data);
}
/*
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
}
/*
cds_wfq_node_init(&head->next);
head->func = func;
/* Holding rcu read-side lock across use of per-cpu crdp */
- rcu_read_lock();
+ _rcu_read_lock();
crdp = get_call_rcu_data();
cds_wfq_enqueue(&crdp->cbs, &head->next);
uatomic_inc(&crdp->qlen);
wake_call_rcu_thread(crdp);
- rcu_read_unlock();
+ _rcu_read_unlock();
}
/*
/* Create default call rcu data if need be */
(void) get_default_call_rcu_data();
cbs_endprev = (struct cds_wfq_node **)
- uatomic_xchg(&default_call_rcu_data, cbs_tail);
- *cbs_endprev = cbs;
+ uatomic_xchg(&default_call_rcu_data->cbs.tail,
+ cbs_tail);
+ _CMM_STORE_SHARED(*cbs_endprev, cbs);
uatomic_add(&default_call_rcu_data->qlen,
uatomic_read(&crdp->qlen));
wake_call_rcu_thread(default_call_rcu_data);
/*
* Acquire the call_rcu_mutex in order to ensure that the child sees
- * all of the call_rcu() data structures in a consistent state.
+ * all of the call_rcu() data structures in a consistent state. Ensure
+ * that all call_rcu threads are in a quiescent state across fork.
* Suitable for pthread_atfork() and friends.
*/
void call_rcu_before_fork(void)
{
+ struct call_rcu_data *crdp;
+
call_rcu_lock(&call_rcu_mutex);
+
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
+ cmm_smp_mb__after_uatomic_or();
+ wake_call_rcu_thread(crdp);
+ }
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
+ poll(NULL, 0, 1);
+ }
}
/*
*/
void call_rcu_after_fork_parent(void)
{
+ struct call_rcu_data *crdp;
+
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
+ uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
+ cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
+ while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
+ poll(NULL, 0, 1);
+ }
call_rcu_unlock(&call_rcu_mutex);
}
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
- thread_call_rcu_data = NULL;
+ URCU_TLS(thread_call_rcu_data) = NULL;
- /* Dispose of all of the rest of the call_rcu_data structures. */
+ /*
+ * Dispose of all of the rest of the call_rcu_data structures.
+ * Leftover call_rcu callbacks will be merged into the new
+ * default call_rcu thread queue.
+ */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
if (crdp == default_call_rcu_data)
continue;