X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-bp.c;h=b457d2bc8f6e49fee5b55565107219aaf02d6ee9;hp=d841be6351bf8a30368bbe4a57e794f7dc6112af;hb=06f22bdbb0c4c4d5db42a2e2dc35818aa61415be;hpb=02be55611d3b1c7bf4fdfcb3a9c98f621882d417 diff --git a/urcu-bp.c b/urcu-bp.c index d841be6..b457d2b 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -3,7 +3,7 @@ * * Userspace RCU library, "bulletproof" version. * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * * This library is free software; you can redistribute it and/or @@ -45,7 +45,7 @@ void __attribute__((destructor)) rcu_bp_exit(void); -static pthread_mutex_t rcu_mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; #ifdef DEBUG_YIELD unsigned int yield_active; @@ -78,68 +78,75 @@ static struct registry_arena registry_arena; static void rcu_gc_registry(void); -static void internal_rcu_lock(void) +static void mutex_lock(pthread_mutex_t *mutex) { int ret; #ifndef DISTRUST_SIGNALS_EXTREME - ret = pthread_mutex_lock(&rcu_mutex); + ret = pthread_mutex_lock(mutex); if (ret) { perror("Error in pthread mutex lock"); exit(-1); } #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ - while ((ret = pthread_mutex_trylock(&rcu_mutex)) != 0) { + while ((ret = pthread_mutex_trylock(mutex)) != 0) { if (ret != EBUSY && ret != EINTR) { printf("ret = %d, errno = %d\n", ret, errno); perror("Error in pthread mutex lock"); exit(-1); } if (rcu_reader.need_mb) { - smp_mb(); + cmm_smp_mb(); rcu_reader.need_mb = 0; - smp_mb(); + cmm_smp_mb(); } poll(NULL,0,10); } #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ } -static void internal_rcu_unlock(void) +static void mutex_unlock(pthread_mutex_t *mutex) { int ret; - ret = pthread_mutex_unlock(&rcu_mutex); + ret = pthread_mutex_unlock(mutex); if (ret) { perror("Error in pthread mutex unlock"); exit(-1); } } -/* - * called with rcu_mutex held. - */ -static void switch_next_rcu_qparity(void) -{ - STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); -} - -void wait_for_quiescent_state(void) +void update_counter_and_wait(void) { LIST_HEAD(qsreaders); int wait_loops = 0; struct rcu_reader *index, *tmp; - if (list_empty(®istry)) - return; + /* Switch parity: 0 -> 1, 1 -> 0 */ + CAA_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); + + /* + * Must commit qparity update to memory before waiting for other parity + * quiescent state. Failure to do so could result in the writer waiting + * forever while new readers are always accessing data (no progress). + * Ensured by CAA_STORE_SHARED and CAA_LOAD_SHARED. + */ + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + cmm_smp_mb(); + /* * Wait for each thread rcu_reader.ctr count to become 0. */ for (;;) { wait_loops++; - list_for_each_entry_safe(index, tmp, ®istry, head) { + list_for_each_entry_safe(index, tmp, ®istry, node) { if (!rcu_old_gp_ongoing(&index->ctr)) - list_move(&index->head, &qsreaders); + list_move(&index->node, &qsreaders); } if (list_empty(®istry)) { @@ -148,7 +155,7 @@ void wait_for_quiescent_state(void) if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) usleep(RCU_SLEEP_DELAY); else - cpu_relax(); + caa_cpu_relax(); } } /* put back the reader list in the registry */ @@ -165,82 +172,43 @@ void synchronize_rcu(void) ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); assert(!ret); - internal_rcu_lock(); + mutex_lock(&rcu_gp_lock); - /* Remove old registry elements */ - rcu_gc_registry(); + if (list_empty(®istry)) + goto out; /* All threads should read qparity before accessing data structure - * where new ptr points to. Must be done within internal_rcu_lock - * because it iterates on reader threads.*/ + * where new ptr points to. */ /* Write new ptr before changing the qparity */ - smp_mb(); - - switch_next_rcu_qparity(); /* 0 -> 1 */ - - /* - * Must commit qparity update to memory before waiting for parity - * 0 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + cmm_smp_mb(); - /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. - */ - smp_mb(); + /* Remove old registry elements */ + rcu_gc_registry(); /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(); /* Wait readers in parity 0 */ - - /* - * Must finish waiting for quiescent state for parity 0 before - * committing qparity update to memory. Failure to do so could result in - * the writer waiting forever while new readers are always accessing - * data (no progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ /* - * Adding a smp_mb() which is _not_ formally required, but makes the + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the * model easier to understand. It does not have a big performance impact * anyway, given this is the write-side. */ - smp_mb(); - - switch_next_rcu_qparity(); /* 1 -> 0 */ + cmm_smp_mb(); /* - * Must commit qparity update to memory before waiting for parity - * 1 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ - - /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. + * Wait for previous parity to be empty of readers. */ - smp_mb(); + update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ /* - * Wait for previous parity to be empty of readers. + * Finish waiting for reader threads before letting the old ptr being + * freed. */ - wait_for_quiescent_state(); /* Wait readers in parity 1 */ - - /* Finish waiting for reader threads before letting the old ptr being - * freed. Must be done within internal_rcu_lock because it iterates on - * reader threads. */ - smp_mb(); - - internal_rcu_unlock(); + cmm_smp_mb(); +out: + mutex_unlock(&rcu_gp_lock); ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); assert(!ret); } @@ -311,7 +279,7 @@ static void add_thread(void) /* Add to registry */ rcu_reader_reg->tid = pthread_self(); assert(rcu_reader_reg->ctr == 0); - list_add(&rcu_reader_reg->head, ®istry); + list_add(&rcu_reader_reg->node, ®istry); rcu_reader = rcu_reader_reg; } @@ -331,7 +299,8 @@ static void rcu_gc_registry(void) ret = pthread_kill(tid, 0); assert(ret != EINVAL); if (ret == ESRCH) { - list_del(&rcu_reader_reg->head); + list_del(&rcu_reader_reg->node); + rcu_reader_reg->ctr = 0; rcu_reader_reg->alloc = 0; registry_arena.used -= sizeof(struct rcu_reader); } @@ -355,9 +324,9 @@ void rcu_bp_register(void) if (rcu_reader) goto end; - internal_rcu_lock(); + mutex_lock(&rcu_gp_lock); add_thread(); - internal_rcu_unlock(); + mutex_unlock(&rcu_gp_lock); end: ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); assert(!ret);