X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=src%2Furcu-call-rcu-impl.h;h=4392bc6b87ad53d290847ab246ff824fc0f244ea;hb=799d344f0b89f0c3565f54ad740e7964eb75b7d2;hp=56fe943ac8d1ab8b9b5d260efca4d9e1ed8d7728;hpb=6fd172f599e8d798e68974a786dd930d876f182e;p=urcu.git diff --git a/src/urcu-call-rcu-impl.h b/src/urcu-call-rcu-impl.h index 56fe943..4392bc6 100644 --- a/src/urcu-call-rcu-impl.h +++ b/src/urcu-call-rcu-impl.h @@ -24,7 +24,6 @@ #include #include #include -#include #include #include #include @@ -35,14 +34,16 @@ #include #include "compat-getcpu.h" -#include "urcu/wfcqueue.h" -#include "urcu-call-rcu.h" -#include "urcu-pointer.h" -#include "urcu/list.h" -#include "urcu/futex.h" -#include "urcu/tls-compat.h" -#include "urcu/ref.h" +#include +#include +#include +#include +#include +#include +#include +#include #include "urcu-die.h" +#include "urcu-utils.h" #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */ #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1) @@ -88,7 +89,7 @@ static CDS_LIST_HEAD(call_rcu_data_list); /* Link a thread using call_rcu() to its call_rcu thread. */ -static DEFINE_URCU_TLS_IE(struct call_rcu_data *, thread_call_rcu_data); +static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data); /* * Guard call_rcu thread creation and atfork handlers. @@ -198,7 +199,7 @@ static void call_rcu_unlock(pthread_mutex_t *pmp) * Losing affinity can be caused by CPU hotunplug/hotplug, or by * cpuset(7). */ -#if HAVE_SCHED_SETAFFINITY +#ifdef HAVE_SCHED_SETAFFINITY static int set_thread_cpu_affinity(struct call_rcu_data *crdp) { @@ -214,11 +215,8 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp) CPU_ZERO(&mask); CPU_SET(crdp->cpu_affinity, &mask); -#if SCHED_SETAFFINITY_ARGS == 2 - ret = sched_setaffinity(0, &mask); -#else ret = sched_setaffinity(0, sizeof(mask), &mask); -#endif + /* * EINVAL is fine: can be caused by hotunplugged CPUs, or by * cpuset(7). This is why we should always retry if we detect @@ -232,7 +230,7 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp) } #else static -int set_thread_cpu_affinity(struct call_rcu_data *crdp) +int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused))) { return 0; } @@ -357,8 +355,8 @@ static void *call_rcu_thread(void *arg) cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail); splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head, &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail); - assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); - assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); + urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK); + urcu_posix_assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY); if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) { synchronize_rcu(); cbcount = 0; @@ -752,9 +750,13 @@ void call_rcu_data_free(struct call_rcu_data *crdp) while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) (void) poll(NULL, 0, 1); } + call_rcu_lock(&call_rcu_mutex); if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { - /* Create default call rcu data if need be */ + call_rcu_unlock(&call_rcu_mutex); + /* Create default call rcu data if need be. */ + /* CBs queued here will be handed to the default list. */ (void) get_default_call_rcu_data(); + call_rcu_lock(&call_rcu_mutex); __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head, &default_call_rcu_data->cbs_tail, &crdp->cbs_head, &crdp->cbs_tail); @@ -763,7 +765,6 @@ void call_rcu_data_free(struct call_rcu_data *crdp) wake_call_rcu_thread(default_call_rcu_data); } - call_rcu_lock(&call_rcu_mutex); cds_list_del(&crdp->list); call_rcu_unlock(&call_rcu_mutex); @@ -1008,7 +1009,7 @@ end: call_rcu_unlock(&call_rcu_mutex); } -void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork) +void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused))) { call_rcu_lock(&call_rcu_mutex); if (--registered_rculfhash_atfork_refcount)