#include <sched.h>
#include "compat-getcpu.h"
-#include "urcu/wfcqueue.h"
-#include "urcu-call-rcu.h"
-#include "urcu-pointer.h"
-#include "urcu/list.h"
-#include "urcu/futex.h"
-#include "urcu/tls-compat.h"
-#include "urcu/ref.h"
+#include <urcu/wfcqueue.h>
+#include <urcu/call-rcu.h>
+#include <urcu/pointer.h>
+#include <urcu/list.h>
+#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
+#include <urcu/ref.h>
#include "urcu-die.h"
+#include "urcu-utils.h"
+#include "compat-smp.h"
#define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
#define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
static struct call_rcu_data *default_call_rcu_data;
+static struct urcu_atfork *registered_rculfhash_atfork;
+static unsigned long registered_rculfhash_atfork_refcount;
+
/*
* If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
* available, then we can have call_rcu threads assigned to individual
*/
static struct call_rcu_data **per_cpu_call_rcu_data;
-static long maxcpus;
+static long cpus_array_len;
-static void maxcpus_reset(void)
+static void cpus_array_len_reset(void)
{
- maxcpus = 0;
+ cpus_array_len = 0;
}
/* Allocate the array if it has not already been allocated. */
struct call_rcu_data **p;
static int warned = 0;
- if (maxcpus != 0)
+ if (cpus_array_len != 0)
return;
- maxcpus = sysconf(_SC_NPROCESSORS_CONF);
- if (maxcpus <= 0) {
+ cpus_array_len = get_possible_cpus_array_len();
+ if (cpus_array_len <= 0) {
return;
}
- p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
+ p = malloc(cpus_array_len * sizeof(*per_cpu_call_rcu_data));
if (p != NULL) {
- memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
+ memset(p, '\0', cpus_array_len * sizeof(*per_cpu_call_rcu_data));
rcu_set_pointer(&per_cpu_call_rcu_data, p);
} else {
if (!warned) {
* constant.
*/
static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
-static const long maxcpus = -1;
+static const long cpus_array_len = -1;
-static void maxcpus_reset(void)
+static void cpus_array_len_reset(void)
{
}
* Losing affinity can be caused by CPU hotunplug/hotplug, or by
* cpuset(7).
*/
-#if HAVE_SCHED_SETAFFINITY
+#ifdef HAVE_SCHED_SETAFFINITY
static
int set_thread_cpu_affinity(struct call_rcu_data *crdp)
{
}
#else
static
-int set_thread_cpu_affinity(struct call_rcu_data *crdp)
+int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
{
return 0;
}
{
/* Read call_rcu list before read futex */
cmm_smp_mb();
- if (uatomic_read(&crdp->futex) != -1)
- return;
- while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0)) {
+ while (uatomic_read(&crdp->futex) == -1) {
+ if (!futex_async(&crdp->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
+ /*
+ * Prior queued wakeups queued by unrelated code
+ * using the same address can cause futex wait to
+ * return 0 even through the futex value is still
+ * -1 (spurious wakeups). Check the value again
+ * in user-space to validate whether it really
+ * differs from -1.
+ */
+ continue;
+ }
switch (errno) {
- case EWOULDBLOCK:
+ case EAGAIN:
/* Value already changed. */
return;
case EINTR:
/* Retry if interrupted by signal. */
- break; /* Get out of switch. */
+ break; /* Get out of switch. Check again. */
default:
/* Unexpected error. */
urcu_die(errno);
{
/* Read completion barrier count before read futex */
cmm_smp_mb();
- if (uatomic_read(&completion->futex) != -1)
- return;
- while (futex_async(&completion->futex, FUTEX_WAIT, -1,
- NULL, NULL, 0)) {
+ while (uatomic_read(&completion->futex) == -1) {
+ if (!futex_async(&completion->futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
+ /*
+ * Prior queued wakeups queued by unrelated code
+ * using the same address can cause futex wait to
+ * return 0 even through the futex value is still
+ * -1 (spurious wakeups). Check the value again
+ * in user-space to validate whether it really
+ * differs from -1.
+ */
+ continue;
+ }
switch (errno) {
- case EWOULDBLOCK:
+ case EAGAIN:
/* Value already changed. */
return;
case EINTR:
/* Retry if interrupted by signal. */
- break; /* Get out of switch. */
+ break; /* Get out of switch. Check again. */
default:
/* Unexpected error. */
urcu_die(errno);
pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
if (pcpu_crdp == NULL)
return NULL;
- if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
+ if (!warned && cpus_array_len > 0 && (cpu < 0 || cpus_array_len <= cpu)) {
fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
warned = 1;
}
- if (cpu < 0 || maxcpus <= cpu)
+ if (cpu < 0 || cpus_array_len <= cpu)
return NULL;
return rcu_dereference(pcpu_crdp[cpu]);
}
+URCU_ATTR_ALIAS(urcu_stringify(get_cpu_call_rcu_data))
+struct call_rcu_data *alias_get_cpu_call_rcu_data();
/*
* Return the tid corresponding to the call_rcu thread whose
{
return crdp->tid;
}
+URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_thread))
+pthread_t alias_get_call_rcu_thread();
/*
* Create a call_rcu_data structure (with thread) and return a pointer.
return crdp;
}
+URCU_ATTR_ALIAS(urcu_stringify(create_call_rcu_data))
+struct call_rcu_data *alias_create_call_rcu_data();
struct call_rcu_data *create_call_rcu_data(unsigned long flags,
int cpu_affinity)
{
call_rcu_lock(&call_rcu_mutex);
alloc_cpu_call_rcu_data();
- if (cpu < 0 || maxcpus <= cpu) {
+ if (cpu < 0 || cpus_array_len <= cpu) {
if (!warned) {
fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
warned = 1;
call_rcu_unlock(&call_rcu_mutex);
return 0;
}
+URCU_ATTR_ALIAS(urcu_stringify(set_cpu_call_rcu_data))
+int alias_set_cpu_call_rcu_data();
/*
* Return a pointer to the default call_rcu_data structure, creating
call_rcu_unlock(&call_rcu_mutex);
return default_call_rcu_data;
}
+URCU_ATTR_ALIAS(urcu_stringify(get_default_call_rcu_data))
+struct call_rcu_data *alias_get_default_call_rcu_data();
/*
* Return the call_rcu_data structure that applies to the currently
if (URCU_TLS(thread_call_rcu_data) != NULL)
return URCU_TLS(thread_call_rcu_data);
- if (maxcpus > 0) {
+ if (cpus_array_len > 0) {
crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
if (crd)
return crd;
return get_default_call_rcu_data();
}
+URCU_ATTR_ALIAS(urcu_stringify(get_call_rcu_data))
+struct call_rcu_data *alias_get_call_rcu_data();
/*
* Return a pointer to this task's call_rcu_data if there is one.
{
return URCU_TLS(thread_call_rcu_data);
}
+URCU_ATTR_ALIAS(urcu_stringify(get_thread_call_rcu_data))
+struct call_rcu_data *alias_get_thread_call_rcu_data();
/*
* Set this task's call_rcu_data structure as specified, regardless
{
URCU_TLS(thread_call_rcu_data) = crdp;
}
+URCU_ATTR_ALIAS(urcu_stringify(set_thread_call_rcu_data))
+void alias_set_thread_call_rcu_data();
/*
* Create a separate call_rcu thread for each CPU. This does not
call_rcu_lock(&call_rcu_mutex);
alloc_cpu_call_rcu_data();
call_rcu_unlock(&call_rcu_mutex);
- if (maxcpus <= 0) {
+ if (cpus_array_len <= 0) {
errno = EINVAL;
return -EINVAL;
}
errno = ENOMEM;
return -ENOMEM;
}
- for (i = 0; i < maxcpus; i++) {
+ for (i = 0; i < cpus_array_len; i++) {
call_rcu_lock(&call_rcu_mutex);
if (get_cpu_call_rcu_data(i)) {
call_rcu_unlock(&call_rcu_mutex);
}
return 0;
}
+URCU_ATTR_ALIAS(urcu_stringify(create_all_cpu_call_rcu_data))
+int alias_create_all_cpu_call_rcu_data();
/*
* Wake up the call_rcu thread corresponding to the specified
_call_rcu(head, func, crdp);
_rcu_read_unlock();
}
+URCU_ATTR_ALIAS(urcu_stringify(call_rcu)) void alias_call_rcu();
/*
* Free up the specified call_rcu_data structure, terminating the
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
(void) poll(NULL, 0, 1);
}
+ call_rcu_lock(&call_rcu_mutex);
if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
- /* Create default call rcu data if need be */
+ call_rcu_unlock(&call_rcu_mutex);
+ /* Create default call rcu data if need be. */
+ /* CBs queued here will be handed to the default list. */
(void) get_default_call_rcu_data();
+ call_rcu_lock(&call_rcu_mutex);
__cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
&default_call_rcu_data->cbs_tail,
&crdp->cbs_head, &crdp->cbs_tail);
wake_call_rcu_thread(default_call_rcu_data);
}
- call_rcu_lock(&call_rcu_mutex);
cds_list_del(&crdp->list);
call_rcu_unlock(&call_rcu_mutex);
free(crdp);
}
+URCU_ATTR_ALIAS(urcu_stringify(call_rcu_data_free))
+void alias_call_rcu_data_free();
/*
* Clean up all the per-CPU call_rcu threads.
struct call_rcu_data **crdp;
static int warned = 0;
- if (maxcpus <= 0)
+ if (cpus_array_len <= 0)
return;
- crdp = malloc(sizeof(*crdp) * maxcpus);
+ crdp = malloc(sizeof(*crdp) * cpus_array_len);
if (!crdp) {
if (!warned) {
fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
return;
}
- for (cpu = 0; cpu < maxcpus; cpu++) {
+ for (cpu = 0; cpu < cpus_array_len; cpu++) {
crdp[cpu] = get_cpu_call_rcu_data(cpu);
if (crdp[cpu] == NULL)
continue;
* call_rcu_data to become quiescent.
*/
synchronize_rcu();
- for (cpu = 0; cpu < maxcpus; cpu++) {
+ for (cpu = 0; cpu < cpus_array_len; cpu++) {
if (crdp[cpu] == NULL)
continue;
call_rcu_data_free(crdp[cpu]);
}
free(crdp);
}
+#ifdef RCU_QSBR
+/* ABI6 has a non-namespaced free_all_cpu_call_rcu_data for qsbr */
+#undef free_all_cpu_call_rcu_data
+URCU_ATTR_ALIAS("urcu_qsbr_free_all_cpu_call_rcu_data")
+void free_all_cpu_call_rcu_data();
+#define free_all_cpu_call_rcu_data urcu_qsbr_free_all_cpu_call_rcu_data
+#else
+URCU_ATTR_ALIAS(urcu_stringify(free_all_cpu_call_rcu_data))
+void alias_free_all_cpu_call_rcu_data();
+#endif
static
void free_completion(struct urcu_ref *ref)
if (was_online)
rcu_thread_online();
}
+URCU_ATTR_ALIAS(urcu_stringify(rcu_barrier))
+void alias_rcu_barrier();
/*
* Acquire the call_rcu_mutex in order to ensure that the child sees
void call_rcu_before_fork(void)
{
struct call_rcu_data *crdp;
+ struct urcu_atfork *atfork;
call_rcu_lock(&call_rcu_mutex);
+ atfork = registered_rculfhash_atfork;
+ if (atfork)
+ atfork->before_fork(atfork->priv);
+
cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
cmm_smp_mb__after_uatomic_or();
(void) poll(NULL, 0, 1);
}
}
+URCU_ATTR_ALIAS(urcu_stringify(call_rcu_before_fork))
+void alias_call_rcu_before_fork();
/*
* Clean up call_rcu data structures in the parent of a successful fork()
void call_rcu_after_fork_parent(void)
{
struct call_rcu_data *crdp;
+ struct urcu_atfork *atfork;
cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
(void) poll(NULL, 0, 1);
}
+ atfork = registered_rculfhash_atfork;
+ if (atfork)
+ atfork->after_fork_parent(atfork->priv);
call_rcu_unlock(&call_rcu_mutex);
}
+URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_parent))
+void alias_call_rcu_after_fork_parent();
/*
* Clean up call_rcu data structures in the child of a successful fork()
void call_rcu_after_fork_child(void)
{
struct call_rcu_data *crdp, *next;
+ struct urcu_atfork *atfork;
/* Release the mutex. */
call_rcu_unlock(&call_rcu_mutex);
+ atfork = registered_rculfhash_atfork;
+ if (atfork)
+ atfork->after_fork_child(atfork->priv);
+
/* Do nothing when call_rcu() has not been used */
if (cds_list_empty(&call_rcu_data_list))
return;
(void)get_default_call_rcu_data();
/* Cleanup call_rcu_data pointers before use */
- maxcpus_reset();
+ cpus_array_len_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
URCU_TLS(thread_call_rcu_data) = NULL;
call_rcu_data_free(crdp);
}
}
+URCU_ATTR_ALIAS(urcu_stringify(call_rcu_after_fork_child))
+void alias_call_rcu_after_fork_child();
+
+void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
+{
+ call_rcu_lock(&call_rcu_mutex);
+ if (registered_rculfhash_atfork_refcount++)
+ goto end;
+ registered_rculfhash_atfork = atfork;
+end:
+ call_rcu_unlock(&call_rcu_mutex);
+}
+URCU_ATTR_ALIAS(urcu_stringify(urcu_register_rculfhash_atfork))
+void alias_urcu_register_rculfhash_atfork();
+
+void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
+{
+ call_rcu_lock(&call_rcu_mutex);
+ if (--registered_rculfhash_atfork_refcount)
+ goto end;
+ registered_rculfhash_atfork = NULL;
+end:
+ call_rcu_unlock(&call_rcu_mutex);
+}
+URCU_ATTR_ALIAS(urcu_stringify(urcu_unregister_rculfhash_atfork))
+void alias_urcu_unregister_rculfhash_atfork();