}
#endif
-/* Sleep delay in us */
-#define RCU_SLEEP_DELAY 1000
+/* Sleep delay in ms */
+#define RCU_SLEEP_DELAY_MS 10
#define INIT_NR_THREADS 8
#define ARENA_INIT_ALLOC \
sizeof(struct registry_chunk) \
static
void __attribute__((constructor)) rcu_bp_init(void);
static
-void __attribute__((destructor)) rcu_bp_exit(void);
+void __attribute__((destructor)) _rcu_bp_exit(void);
static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
struct cds_list_head *cur_snap_readers,
struct cds_list_head *qsreaders)
{
- int wait_loops = 0;
+ unsigned int wait_loops = 0;
struct rcu_reader *index, *tmp;
/*
* rcu_gp.ctr value.
*/
for (;;) {
- wait_loops++;
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+
cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
switch (rcu_reader_state(&index->ctr)) {
case RCU_READER_ACTIVE_CURRENT:
if (cds_list_empty(input_readers)) {
break;
} else {
- if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS)
- usleep(RCU_SLEEP_DELAY);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
+ (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
else
caa_cpu_relax();
}
ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
if (ret)
abort();
- rcu_bp_exit();
+ _rcu_bp_exit();
}
/*
}
static
-void rcu_bp_exit(void)
+void _rcu_bp_exit(void)
{
mutex_lock(&init_lock);
if (!--rcu_bp_refcount) {
mutex_unlock(&init_lock);
}
+/*
+ * Keep ABI compability within stable versions. This has never been
+ * exposed through a header, but needs to stay in the .so until the
+ * soname is bumped.
+ */
+void rcu_bp_exit(void)
+{
+}
+
/*
* Holding the rcu_gp_lock across fork will make sure we fork() don't race with
* a concurrent thread executing with this same lock held. This ensures that the