X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-qsbr.c;h=553029578a210383df955fb384d6102ad807d51e;hp=c603c1df3e2643939533984d7aee79be7e0b768f;hb=1f689e13ea7e519b1afc001e9c55a7b1b60b599f;hpb=4f8e33808fdc08d3182a656d509fe42487d19b86 diff --git a/urcu-qsbr.c b/urcu-qsbr.c index c603c1d..5530295 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -3,7 +3,7 @@ * * Userspace RCU QSBR library * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * * This library is free software; you can redistribute it and/or @@ -23,54 +23,70 @@ * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ +#define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include #include #include +#include #include #include #include +#include "urcu/wfqueue.h" +#include "urcu/map/urcu-qsbr.h" #define BUILD_QSBR_LIB -#include "urcu-qsbr-static.h" +#include "urcu/static/urcu-qsbr.h" +#include "urcu-pointer.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu-qsbr.h" +#define _LGPL_SOURCE + +void __attribute__((destructor)) rcu_exit(void); -static pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; -int gp_futex; +int32_t gp_futex; /* * Global grace period counter. */ -unsigned long urcu_gp_ctr = RCU_GP_ONLINE; +unsigned long rcu_gp_ctr = RCU_GP_ONLINE; + +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 /* * Written to only by each individual reader. Read by both the reader and the * writers. */ -struct urcu_reader __thread urcu_reader; +struct rcu_reader __thread rcu_reader; #ifdef DEBUG_YIELD unsigned int yield_active; unsigned int __thread rand_yield; #endif -static LIST_HEAD(registry); +static CDS_LIST_HEAD(registry); -static void internal_urcu_lock(void) +static void mutex_lock(pthread_mutex_t *mutex) { int ret; #ifndef DISTRUST_SIGNALS_EXTREME - ret = pthread_mutex_lock(&urcu_mutex); + ret = pthread_mutex_lock(mutex); if (ret) { perror("Error in pthread mutex lock"); exit(-1); } #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ - while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { + while ((ret = pthread_mutex_trylock(mutex)) != 0) { if (ret != EBUSY && ret != EINTR) { printf("ret = %d, errno = %d\n", ret, errno); perror("Error in pthread mutex lock"); @@ -81,11 +97,11 @@ static void internal_urcu_lock(void) #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ } -static void internal_urcu_unlock(void) +static void mutex_unlock(pthread_mutex_t *mutex) { int ret; - ret = pthread_mutex_unlock(&urcu_mutex); + ret = pthread_mutex_unlock(mutex); if (ret) { perror("Error in pthread mutex unlock"); exit(-1); @@ -95,48 +111,89 @@ static void internal_urcu_unlock(void) /* * synchronize_rcu() waiting. Single thread. */ -static void wait_gp(struct urcu_reader *index) +static void wait_gp(void) { - uatomic_dec(&gp_futex); - smp_mb(); /* Write futex before read reader_gp */ - if (!rcu_gp_ongoing(&index->ctr)) { - /* Read reader_gp before write futex */ - smp_mb(); - /* Callbacks are queued, don't wait. */ - uatomic_set(&gp_futex, 0); - } else { - /* Read reader_gp before read futex */ - smp_rmb(); - if (uatomic_read(&gp_futex) == -1) - futex(&gp_futex, FUTEX_WAIT, -1, - NULL, NULL, 0); - } + /* Read reader_gp before read futex */ + cmm_smp_rmb(); + if (uatomic_read(&gp_futex) == -1) + futex_noasync(&gp_futex, FUTEX_WAIT, -1, + NULL, NULL, 0); } -static void wait_for_quiescent_state(void) +static void update_counter_and_wait(void) { - struct urcu_reader *index; + CDS_LIST_HEAD(qsreaders); + int wait_loops = 0; + struct rcu_reader *index, *tmp; + +#if (CAA_BITS_PER_LONG < 64) + /* Switch parity: 0 -> 1, 1 -> 0 */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); +#else /* !(CAA_BITS_PER_LONG < 64) */ + /* Increment current G.P. */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); +#endif /* !(CAA_BITS_PER_LONG < 64) */ + + /* + * Must commit rcu_gp_ctr update to memory before waiting for + * quiescent state. Failure to do so could result in the writer + * waiting forever while new readers are always accessing data + * (no progress). Enforce compiler-order of store to rcu_gp_ctr + * before load rcu_reader ctr. + */ + cmm_barrier(); + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + cmm_smp_mb(); - if (list_empty(®istry)) - return; /* * Wait for each thread rcu_reader_qs_gp count to become 0. */ - list_for_each_entry(index, ®istry, head) { - int wait_loops = 0; + for (;;) { + wait_loops++; + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + uatomic_set(&gp_futex, -1); + /* + * Write futex before write waiting (the other side + * reads them in the opposite order). + */ + cmm_smp_wmb(); + cds_list_for_each_entry(index, ®istry, node) { + _CMM_STORE_SHARED(index->waiting, 1); + } + /* Write futex before read reader_gp */ + cmm_smp_mb(); + } + cds_list_for_each_entry_safe(index, tmp, ®istry, node) { + if (!rcu_gp_ongoing(&index->ctr)) + cds_list_move(&index->node, &qsreaders); + } - while (rcu_gp_ongoing(&index->ctr)) { - if (wait_loops++ == RCU_QS_ACTIVE_ATTEMPTS) { - wait_gp(index); + if (cds_list_empty(®istry)) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + /* Read reader_gp before write futex */ + cmm_smp_mb(); + uatomic_set(&gp_futex, 0); + } + break; + } else { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + wait_gp(); } else { #ifndef HAS_INCOHERENT_CACHES - cpu_relax(); + caa_cpu_relax(); #else /* #ifndef HAS_INCOHERENT_CACHES */ - smp_mb(); + cmm_smp_mb(); #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ } } } + /* put back the reader list in the registry */ + cds_list_splice(&qsreaders, ®istry); } /* @@ -144,111 +201,99 @@ static void wait_for_quiescent_state(void) * long-size to ensure we do not encounter an overflow bug. */ -#if (BITS_PER_LONG < 64) -/* - * called with urcu_mutex held. - */ -static void switch_next_urcu_qparity(void) -{ - STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr ^ RCU_GP_CTR); -} - +#if (CAA_BITS_PER_LONG < 64) void synchronize_rcu(void) { unsigned long was_online; - was_online = urcu_reader.ctr; + was_online = rcu_reader.ctr; /* All threads should read qparity before accessing data structure - * where new ptr points to. - */ - /* Write new ptr before changing the qparity */ - smp_mb(); - - /* + * where new ptr points to. In the "then" case, rcu_thread_offline + * includes a memory barrier. + * * Mark the writer thread offline to make sure we don't wait for - * our own quiescent state. This allows using synchronize_rcu() in - * threads registered as readers. + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. */ if (was_online) - STORE_SHARED(urcu_reader.ctr, 0); + rcu_thread_offline(); + else + cmm_smp_mb(); - internal_urcu_lock(); + mutex_lock(&rcu_gp_lock); - switch_next_urcu_qparity(); /* 0 -> 1 */ - - /* - * Must commit qparity update to memory before waiting for parity - * 0 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + if (cds_list_empty(®istry)) + goto out; /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(); /* Wait readers in parity 0 */ + update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ /* * Must finish waiting for quiescent state for parity 0 before - * committing qparity update to memory. Failure to do so could result in - * the writer waiting forever while new readers are always accessing - * data (no progress). - * Ensured by STORE_SHARED and LOAD_SHARED. + * committing next rcu_gp_ctr update to memory. Failure to + * do so could result in the writer waiting forever while new + * readers are always accessing data (no progress). Enforce + * compiler-order of load rcu_reader ctr before store to + * rcu_gp_ctr. */ - - switch_next_urcu_qparity(); /* 1 -> 0 */ + cmm_barrier(); /* - * Must commit qparity update to memory before waiting for parity - * 1 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. */ + cmm_smp_mb(); /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(); /* Wait readers in parity 1 */ - - internal_urcu_unlock(); + update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ +out: + mutex_unlock(&rcu_gp_lock); /* * Finish waiting for reader threads before letting the old ptr being * freed. */ if (was_online) - _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr)); - smp_mb(); + rcu_thread_online(); + else + cmm_smp_mb(); } -#else /* !(BITS_PER_LONG < 64) */ +#else /* !(CAA_BITS_PER_LONG < 64) */ void synchronize_rcu(void) { unsigned long was_online; - was_online = urcu_reader.ctr; + was_online = rcu_reader.ctr; /* * Mark the writer thread offline to make sure we don't wait for - * our own quiescent state. This allows using synchronize_rcu() in - * threads registered as readers. + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. */ - smp_mb(); if (was_online) - STORE_SHARED(urcu_reader.ctr, 0); + rcu_thread_offline(); + else + cmm_smp_mb(); - internal_urcu_lock(); - STORE_SHARED(urcu_gp_ctr, urcu_gp_ctr + RCU_GP_CTR); - wait_for_quiescent_state(); - internal_urcu_unlock(); + mutex_lock(&rcu_gp_lock); + if (cds_list_empty(®istry)) + goto out; + update_counter_and_wait(); +out: + mutex_unlock(&rcu_gp_lock); if (was_online) - _STORE_SHARED(urcu_reader.ctr, LOAD_SHARED(urcu_gp_ctr)); - smp_mb(); + rcu_thread_online(); + else + cmm_smp_mb(); } -#endif /* !(BITS_PER_LONG < 64) */ +#endif /* !(CAA_BITS_PER_LONG < 64) */ /* * library wrappers to be used by non-LGPL compatible source code. @@ -264,38 +309,6 @@ void rcu_read_unlock(void) _rcu_read_unlock(); } -void *rcu_dereference(void *p) -{ - return _rcu_dereference(p); -} - -void *rcu_assign_pointer_sym(void **p, void *v) -{ - wmb(); - return STORE_SHARED(p, v); -} - -void *rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new) -{ - wmb(); - return uatomic_cmpxchg(p, old, _new); -} - -void *rcu_xchg_pointer_sym(void **p, void *v) -{ - wmb(); - return uatomic_xchg(p, v); -} - -void *rcu_publish_content_sym(void **p, void *v) -{ - void *oldptr; - - oldptr = _rcu_xchg_pointer(p, v); - synchronize_rcu(); - return oldptr; -} - void rcu_quiescent_state(void) { _rcu_quiescent_state(); @@ -313,12 +326,12 @@ void rcu_thread_online(void) void rcu_register_thread(void) { - urcu_reader.tid = pthread_self(); - assert(urcu_reader.ctr == 0); + rcu_reader.tid = pthread_self(); + assert(rcu_reader.ctr == 0); - internal_urcu_lock(); - list_add(&urcu_reader.head, ®istry); - internal_urcu_unlock(); + mutex_lock(&rcu_gp_lock); + cds_list_add(&rcu_reader.node, ®istry); + mutex_unlock(&rcu_gp_lock); _rcu_thread_online(); } @@ -329,7 +342,19 @@ void rcu_unregister_thread(void) * with a waiting writer. */ _rcu_thread_offline(); - internal_urcu_lock(); - list_del(&urcu_reader.head); - internal_urcu_unlock(); + mutex_lock(&rcu_gp_lock); + cds_list_del(&rcu_reader.node); + mutex_unlock(&rcu_gp_lock); } + +void rcu_exit(void) +{ + /* + * Assertion disabled because call_rcu threads are now rcu + * readers, and left running at exit. + * assert(cds_list_empty(®istry)); + */ +} + +#include "urcu-call-rcu-impl.h" +#include "urcu-defer-impl.h"