X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-bp.c;h=1b0bd6950da04533c35bf00a5c131ac4da3e470b;hp=d841be6351bf8a30368bbe4a57e794f7dc6112af;hb=ee39cfb61d2a389db3342751762412f332a3e851;hpb=02be55611d3b1c7bf4fdfcb3a9c98f621882d417 diff --git a/urcu-bp.c b/urcu-bp.c index d841be6..1b0bd69 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -45,7 +45,7 @@ void __attribute__((destructor)) rcu_bp_exit(void); -static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; #ifdef DEBUG_YIELD unsigned int yield_active; @@ -78,18 +78,18 @@ static struct registry_arena registry_arena; static void rcu_gc_registry(void); -static void internal_rcu_lock(void) +static void mutex_lock(pthread_mutex_t *mutex) { int ret; #ifndef DISTRUST_SIGNALS_EXTREME - ret = pthread_mutex_lock(&rcu_mutex); + ret = pthread_mutex_lock(mutex); if (ret) { perror("Error in pthread mutex lock"); exit(-1); } #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ - while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) { + while ((ret = pthread_mutex_trylock(mutex)) != 0) { if (ret != EBUSY && ret != EINTR) { printf("ret = %d, errno = %d\n", ret, errno); perror("Error in pthread mutex lock"); @@ -105,33 +105,40 @@ static void internal_rcu_lock(void) #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ } -static void internal_rcu_unlock(void) +static void mutex_unlock(pthread_mutex_t *mutex) { int ret; - ret = pthread_mutex_unlock(&rcu_mutex); + ret = pthread_mutex_unlock(mutex); if (ret) { perror("Error in pthread mutex unlock"); exit(-1); } } -/* - * called with rcu_mutex held. - */ -static void switch_next_rcu_qparity(void) -{ - STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); -} - -void wait_for_quiescent_state(void) +void update_counter_and_wait(void) { LIST_HEAD(qsreaders); int wait_loops = 0; struct rcu_reader *index, *tmp; - if (list_empty(®istry)) - return; + /* Switch parity: 0 -> 1, 1 -> 0 */ + STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); + + /* + * Must commit qparity update to memory before waiting for other parity + * quiescent state. Failure to do so could result in the writer waiting + * forever while new readers are always accessing data (no progress). + * Ensured by STORE_SHARED and LOAD_SHARED. + */ + + /* + * Adding a smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + smp_mb(); + /* * Wait for each thread rcu_reader.ctr count to become 0. */ @@ -165,46 +172,23 @@ void synchronize_rcu(void) ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); assert(!ret); - internal_rcu_lock(); + mutex_lock(&rcu_gp_lock); - /* Remove old registry elements */ - rcu_gc_registry(); + if (list_empty(®istry)) + goto out; /* All threads should read qparity before accessing data structure - * where new ptr points to. Must be done within internal_rcu_lock - * because it iterates on reader threads.*/ + * where new ptr points to. */ /* Write new ptr before changing the qparity */ smp_mb(); - switch_next_rcu_qparity(); /* 0 -> 1 */ - - /* - * Must commit qparity update to memory before waiting for parity - * 0 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ - - /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. - */ - smp_mb(); + /* Remove old registry elements */ + rcu_gc_registry(); /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(); /* Wait readers in parity 0 */ - - /* - * Must finish waiting for quiescent state for parity 0 before - * committing qparity update to memory. Failure to do so could result in - * the writer waiting forever while new readers are always accessing - * data (no progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ /* * Adding a smp_mb() which is _not_ formally required, but makes the @@ -213,34 +197,18 @@ void synchronize_rcu(void) */ smp_mb(); - switch_next_rcu_qparity(); /* 1 -> 0 */ - /* - * Must commit qparity update to memory before waiting for parity - * 1 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ - - /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. + * Wait for previous parity to be empty of readers. */ - smp_mb(); + update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ /* - * Wait for previous parity to be empty of readers. + * Finish waiting for reader threads before letting the old ptr being + * freed. */ - wait_for_quiescent_state(); /* Wait readers in parity 1 */ - - /* Finish waiting for reader threads before letting the old ptr being - * freed. Must be done within internal_rcu_lock because it iterates on - * reader threads. */ smp_mb(); - - internal_rcu_unlock(); +out: + mutex_unlock(&rcu_gp_lock); ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); assert(!ret); } @@ -355,9 +323,9 @@ void rcu_bp_register(void) if (rcu_reader) goto end; - internal_rcu_lock(); + mutex_lock(&rcu_gp_lock); add_thread(); - internal_rcu_unlock(); + mutex_unlock(&rcu_gp_lock); end: ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); assert(!ret);