*/
#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include <sched.h>
#include "config.h"
-#include "urcu/wfqueue.h"
+#include "urcu/wfcqueue.h"
#include "urcu-call-rcu.h"
#include "urcu-pointer.h"
#include "urcu/list.h"
#include "urcu/futex.h"
+#include "urcu/tls-compat.h"
+#include "urcu-die.h"
/* Data structure that identifies a call_rcu thread. */
struct call_rcu_data {
- struct cds_wfq_queue cbs;
+ /*
+ * We do not align head on a different cache-line than tail
+ * mainly because call_rcu callback-invocation threads use
+ * batching ("splice") to get an entire list of callbacks, which
+ * effectively empties the queue, and requires to touch the tail
+ * anyway.
+ */
+ struct cds_wfcq_tail cbs_tail;
+ struct cds_wfcq_head cbs_head;
unsigned long flags;
int32_t futex;
unsigned long qlen; /* maintained for debugging. */
* Protected by call_rcu_mutex.
*/
-CDS_LIST_HEAD(call_rcu_data_list);
+static CDS_LIST_HEAD(call_rcu_data_list);
/* Link a thread using call_rcu() to its call_rcu thread. */
-static __thread struct call_rcu_data *thread_call_rcu_data;
+static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
/* Guard call_rcu thread creation. */
static void call_rcu_lock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_lock(pmp) != 0) {
- perror("pthread_mutex_lock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_lock(pmp);
+ if (ret)
+ urcu_die(ret);
}
/* Release the specified pthread mutex. */
static void call_rcu_unlock(pthread_mutex_t *pmp)
{
- if (pthread_mutex_unlock(pmp) != 0) {
- perror("pthread_mutex_unlock");
- exit(-1);
- }
+ int ret;
+
+ ret = pthread_mutex_unlock(pmp);
+ if (ret)
+ urcu_die(ret);
}
#if HAVE_SCHED_SETAFFINITY
static void *call_rcu_thread(void *arg)
{
unsigned long cbcount;
- struct cds_wfq_node *cbs;
- struct cds_wfq_node **cbs_tail;
- struct call_rcu_data *crdp = (struct call_rcu_data *)arg;
- struct rcu_head *rhp;
+ struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
+ int ret;
- if (set_thread_cpu_affinity(crdp) != 0) {
- perror("pthread_setaffinity_np");
- exit(-1);
- }
+ ret = set_thread_cpu_affinity(crdp);
+ if (ret)
+ urcu_die(errno);
/*
* If callbacks take a read-side lock, we need to be registered.
*/
rcu_register_thread();
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
if (!rt) {
uatomic_dec(&crdp->futex);
/* Decrement futex before reading call_rcu list */
cmm_smp_mb();
}
for (;;) {
- if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
- while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
- poll(NULL, 0, 1);
- _CMM_STORE_SHARED(crdp->cbs.head, NULL);
- cbs_tail = (struct cds_wfq_node **)
- uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+ struct cds_wfcq_head cbs_tmp_head;
+ struct cds_wfcq_tail cbs_tmp_tail;
+ struct cds_wfcq_node *cbs, *cbs_tmp_n;
+ enum cds_wfcq_ret splice_ret;
+
+ cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
+ splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
+ &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
+ assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
+ assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
+ if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
synchronize_rcu();
cbcount = 0;
- do {
- while (cbs->next == NULL &&
- &cbs->next != cbs_tail)
- poll(NULL, 0, 1);
- if (cbs == &crdp->cbs.dummy) {
- cbs = cbs->next;
- continue;
- }
- rhp = (struct rcu_head *)cbs;
- cbs = cbs->next;
+ __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
+ &cbs_tmp_tail, cbs, cbs_tmp_n) {
+ struct rcu_head *rhp;
+
+ rhp = caa_container_of(cbs,
+ struct rcu_head, next);
rhp->func(rhp);
cbcount++;
- } while (cbs != NULL);
+ }
uatomic_sub(&crdp->qlen, cbcount);
}
if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
break;
rcu_thread_offline();
if (!rt) {
- if (&crdp->cbs.head
- == _CMM_LOAD_SHARED(crdp->cbs.tail)) {
+ if (cds_wfcq_empty(&crdp->cbs_head,
+ &crdp->cbs_tail)) {
call_rcu_wait(crdp);
poll(NULL, 0, 10);
uatomic_dec(&crdp->futex);
int cpu_affinity)
{
struct call_rcu_data *crdp;
+ int ret;
crdp = malloc(sizeof(*crdp));
- if (crdp == NULL) {
- fprintf(stderr, "Out of memory.\n");
- exit(-1);
- }
+ if (crdp == NULL)
+ urcu_die(errno);
memset(crdp, '\0', sizeof(*crdp));
- cds_wfq_init(&crdp->cbs);
+ cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
crdp->qlen = 0;
crdp->futex = 0;
crdp->flags = flags;
crdp->cpu_affinity = cpu_affinity;
cmm_smp_mb(); /* Structure initialized before pointer is planted. */
*crdpp = crdp;
- if (pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp) != 0) {
- perror("pthread_create");
- exit(-1);
- }
+ ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
+ if (ret)
+ urcu_die(ret);
}
/*
{
struct call_rcu_data *crd;
- if (thread_call_rcu_data != NULL)
- return thread_call_rcu_data;
+ if (URCU_TLS(thread_call_rcu_data) != NULL)
+ return URCU_TLS(thread_call_rcu_data);
if (maxcpus > 0) {
crd = get_cpu_call_rcu_data(sched_getcpu());
struct call_rcu_data *get_thread_call_rcu_data(void)
{
- return thread_call_rcu_data;
+ return URCU_TLS(thread_call_rcu_data);
}
/*
void set_thread_call_rcu_data(struct call_rcu_data *crdp)
{
- thread_call_rcu_data = crdp;
+ URCU_TLS(thread_call_rcu_data) = crdp;
}
/*
{
struct call_rcu_data *crdp;
- cds_wfq_node_init(&head->next);
+ cds_wfcq_node_init(&head->next);
head->func = func;
/* Holding rcu read-side lock across use of per-cpu crdp */
rcu_read_lock();
crdp = get_call_rcu_data();
- cds_wfq_enqueue(&crdp->cbs, &head->next);
+ cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
uatomic_inc(&crdp->qlen);
wake_call_rcu_thread(crdp);
rcu_read_unlock();
* The caller must wait for a grace-period to pass between return from
* set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
* previous call rcu data as argument.
+ *
+ * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
+ * a list corruption bug in the 0.7.x series. The equivalent fix
+ * appeared in 0.6.8 for the stable-0.6 branch.
*/
void call_rcu_data_free(struct call_rcu_data *crdp)
{
- struct cds_wfq_node *cbs;
- struct cds_wfq_node **cbs_tail;
- struct cds_wfq_node **cbs_endprev;
-
if (crdp == NULL || crdp == default_call_rcu_data) {
return;
}
while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
poll(NULL, 0, 1);
}
- if (&crdp->cbs.head != _CMM_LOAD_SHARED(crdp->cbs.tail)) {
- while ((cbs = _CMM_LOAD_SHARED(crdp->cbs.head)) == NULL)
- poll(NULL, 0, 1);
- _CMM_STORE_SHARED(crdp->cbs.head, NULL);
- cbs_tail = (struct cds_wfq_node **)
- uatomic_xchg(&crdp->cbs.tail, &crdp->cbs.head);
+ if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
/* Create default call rcu data if need be */
(void) get_default_call_rcu_data();
- cbs_endprev = (struct cds_wfq_node **)
- uatomic_xchg(&default_call_rcu_data, cbs_tail);
- *cbs_endprev = cbs;
+ __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
+ &default_call_rcu_data->cbs_tail,
+ &crdp->cbs_head, &crdp->cbs_tail);
uatomic_add(&default_call_rcu_data->qlen,
uatomic_read(&crdp->qlen));
wake_call_rcu_thread(default_call_rcu_data);
maxcpus_reset();
free(per_cpu_call_rcu_data);
rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
- thread_call_rcu_data = NULL;
+ URCU_TLS(thread_call_rcu_data) = NULL;
/* Dispose of all of the rest of the call_rcu_data structures. */
cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {