Add `urcu_posix_assert()` as `assert()` replacement
[urcu.git] / src / urcu-call-rcu-impl.h
index bfa53f8e09f920a833559ba7b334a04bf3cfcf47..4392bc6b87ad53d290847ab246ff824fc0f244ea 100644 (file)
@@ -24,7 +24,6 @@
 #include <stdio.h>
 #include <pthread.h>
 #include <signal.h>
-#include <assert.h>
 #include <stdlib.h>
 #include <stdint.h>
 #include <string.h>
 #include <sched.h>
 
 #include "compat-getcpu.h"
-#include "urcu/wfcqueue.h"
-#include "urcu-call-rcu.h"
-#include "urcu-pointer.h"
-#include "urcu/list.h"
-#include "urcu/futex.h"
-#include "urcu/tls-compat.h"
-#include "urcu/ref.h"
+#include <urcu/assert.h>
+#include <urcu/wfcqueue.h>
+#include <urcu/call-rcu.h>
+#include <urcu/pointer.h>
+#include <urcu/list.h>
+#include <urcu/futex.h>
+#include <urcu/tls-compat.h>
+#include <urcu/ref.h>
 #include "urcu-die.h"
+#include "urcu-utils.h"
 
 #define SET_AFFINITY_CHECK_PERIOD              (1U << 8)       /* 256 */
 #define SET_AFFINITY_CHECK_PERIOD_MASK         (SET_AFFINITY_CHECK_PERIOD - 1)
@@ -99,6 +100,9 @@ static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static struct call_rcu_data *default_call_rcu_data;
 
+static struct urcu_atfork *registered_rculfhash_atfork;
+static unsigned long registered_rculfhash_atfork_refcount;
+
 /*
  * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
  * available, then we can have call_rcu threads assigned to individual
@@ -195,7 +199,7 @@ static void call_rcu_unlock(pthread_mutex_t *pmp)
  * Losing affinity can be caused by CPU hotunplug/hotplug, or by
  * cpuset(7).
  */
-#if HAVE_SCHED_SETAFFINITY
+#ifdef HAVE_SCHED_SETAFFINITY
 static
 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
 {
@@ -211,11 +215,8 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp)
 
        CPU_ZERO(&mask);
        CPU_SET(crdp->cpu_affinity, &mask);
-#if SCHED_SETAFFINITY_ARGS == 2
-       ret = sched_setaffinity(0, &mask);
-#else
        ret = sched_setaffinity(0, sizeof(mask), &mask);
-#endif
+
        /*
         * EINVAL is fine: can be caused by hotunplugged CPUs, or by
         * cpuset(7). This is why we should always retry if we detect
@@ -229,7 +230,7 @@ int set_thread_cpu_affinity(struct call_rcu_data *crdp)
 }
 #else
 static
-int set_thread_cpu_affinity(struct call_rcu_data *crdp)
+int set_thread_cpu_affinity(struct call_rcu_data *crdp __attribute__((unused)))
 {
        return 0;
 }
@@ -354,8 +355,8 @@ static void *call_rcu_thread(void *arg)
                cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
                splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
                        &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
-               assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
-               assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
+               urcu_posix_assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
+               urcu_posix_assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
                if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
                        synchronize_rcu();
                        cbcount = 0;
@@ -749,9 +750,13 @@ void call_rcu_data_free(struct call_rcu_data *crdp)
                while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
                        (void) poll(NULL, 0, 1);
        }
+       call_rcu_lock(&call_rcu_mutex);
        if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
-               /* Create default call rcu data if need be */
+               call_rcu_unlock(&call_rcu_mutex);
+               /* Create default call rcu data if need be. */
+               /* CBs queued here will be handed to the default list. */
                (void) get_default_call_rcu_data();
+               call_rcu_lock(&call_rcu_mutex);
                __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
                        &default_call_rcu_data->cbs_tail,
                        &crdp->cbs_head, &crdp->cbs_tail);
@@ -760,7 +765,6 @@ void call_rcu_data_free(struct call_rcu_data *crdp)
                wake_call_rcu_thread(default_call_rcu_data);
        }
 
-       call_rcu_lock(&call_rcu_mutex);
        cds_list_del(&crdp->list);
        call_rcu_unlock(&call_rcu_mutex);
 
@@ -907,9 +911,14 @@ online:
 void call_rcu_before_fork(void)
 {
        struct call_rcu_data *crdp;
+       struct urcu_atfork *atfork;
 
        call_rcu_lock(&call_rcu_mutex);
 
+       atfork = registered_rculfhash_atfork;
+       if (atfork)
+               atfork->before_fork(atfork->priv);
+
        cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
                uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
                cmm_smp_mb__after_uatomic_or();
@@ -929,6 +938,7 @@ void call_rcu_before_fork(void)
 void call_rcu_after_fork_parent(void)
 {
        struct call_rcu_data *crdp;
+       struct urcu_atfork *atfork;
 
        cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
                uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
@@ -936,6 +946,9 @@ void call_rcu_after_fork_parent(void)
                while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
                        (void) poll(NULL, 0, 1);
        }
+       atfork = registered_rculfhash_atfork;
+       if (atfork)
+               atfork->after_fork_parent(atfork->priv);
        call_rcu_unlock(&call_rcu_mutex);
 }
 
@@ -947,10 +960,15 @@ void call_rcu_after_fork_parent(void)
 void call_rcu_after_fork_child(void)
 {
        struct call_rcu_data *crdp, *next;
+       struct urcu_atfork *atfork;
 
        /* Release the mutex. */
        call_rcu_unlock(&call_rcu_mutex);
 
+       atfork = registered_rculfhash_atfork;
+       if (atfork)
+               atfork->after_fork_child(atfork->priv);
+
        /* Do nothing when call_rcu() has not been used */
        if (cds_list_empty(&call_rcu_data_list))
                return;
@@ -980,3 +998,23 @@ void call_rcu_after_fork_child(void)
                call_rcu_data_free(crdp);
        }
 }
+
+void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
+{
+       call_rcu_lock(&call_rcu_mutex);
+       if (registered_rculfhash_atfork_refcount++)
+               goto end;
+       registered_rculfhash_atfork = atfork;
+end:
+       call_rcu_unlock(&call_rcu_mutex);
+}
+
+void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork __attribute__((unused)))
+{
+       call_rcu_lock(&call_rcu_mutex);
+       if (--registered_rculfhash_atfork_refcount)
+               goto end;
+       registered_rculfhash_atfork = NULL;
+end:
+       call_rcu_unlock(&call_rcu_mutex);
+}
This page took 0.025199 seconds and 4 git commands to generate.