X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu.c;h=e9df01bc7b56cdb25b6e225e6ba46d2182ab28be;hb=2819ef641bb6e2f7aa60c28f3a8f5ad252aa7d00;hp=33e35e1d36a722cb2de2f74e8f3a15c8327f3687;hpb=6b702fa4ea05b4f6d37000d5cd26d19f0d0807e0;p=userspace-rcu.git diff --git a/urcu.c b/urcu.c index 33e35e1..e9df01b 100644 --- a/urcu.c +++ b/urcu.c @@ -26,6 +26,7 @@ #define _BSD_SOURCE #define _GNU_SOURCE #define _LGPL_SOURCE +#define _DEFAULT_SOURCE #include #include #include @@ -81,7 +82,21 @@ void __attribute__((constructor)) rcu_init(void); void __attribute__((destructor)) rcu_exit(void); #endif +/* + * rcu_gp_lock ensures mutual exclusion between threads calling + * synchronize_rcu(). + */ static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; +/* + * rcu_registry_lock ensures mutual exclusion between threads + * registering and unregistering themselves to/from the registry, and + * with threads reading that registry from synchronize_rcu(). However, + * this lock is not held all the way through the completion of awaiting + * for the grace period. It is sporadically released between iterations + * on the registry. + * rcu_registry_lock may nest inside rcu_gp_lock. + */ +static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; int32_t gp_futex; @@ -92,7 +107,6 @@ int32_t gp_futex; * Written to only by writer with mutex taken. Read by both writer and readers. */ unsigned long rcu_gp_ctr = RCU_GP_COUNT; - /* * Written to only by each individual reader. Read by both the reader and the * writers. @@ -205,16 +219,46 @@ static void smp_mb_master(int group) /* * synchronize_rcu() waiting. Single thread. + * Always called with rcu_registry lock held. Releases this lock and + * grabs it again. Holds the lock when it returns. */ static void wait_gp(void) { - /* Read reader_gp before read futex */ + /* + * Read reader_gp before read futex. smp_mb_master() needs to + * be called with the rcu registry lock held in RCU_SIGNAL + * flavor. + */ smp_mb_master(RCU_MB_GROUP); - if (uatomic_read(&gp_futex) == -1) - futex_async(&gp_futex, FUTEX_WAIT, -1, - NULL, NULL, 0); + /* Temporarily unlock the registry lock. */ + mutex_unlock(&rcu_registry_lock); + if (uatomic_read(&gp_futex) != -1) + goto end; + while (futex_async(&gp_futex, FUTEX_WAIT, -1, + NULL, NULL, 0)) { + switch (errno) { + case EWOULDBLOCK: + /* Value already changed. */ + goto end; + case EINTR: + /* Retry if interrupted by signal. */ + break; /* Get out of switch. */ + default: + /* Unexpected error. */ + urcu_die(errno); + } + } +end: + /* + * Re-lock the registry lock before the next loop. + */ + mutex_lock(&rcu_registry_lock); } +/* + * Always called with rcu_registry lock held. Releases this lock between + * iterations and grabs it again. Holds the lock when it returns. + */ void update_counter_and_wait(void) { CDS_LIST_HEAD(qsreaders); @@ -247,12 +291,12 @@ void update_counter_and_wait(void) * Wait for each thread URCU_TLS(rcu_reader).ctr count to become 0. */ for (;;) { + if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) + wait_loops++; if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { uatomic_dec(&gp_futex); /* Write futex before read reader_gp */ smp_mb_master(RCU_MB_GROUP); - } else { - wait_loops++; } cds_list_for_each_entry_safe(index, tmp, ®istry, node) { @@ -269,10 +313,19 @@ void update_counter_and_wait(void) } break; } else { - if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + /* wait_gp unlocks/locks registry lock. */ wait_gp(); - else + } else { + /* Temporarily unlock the registry lock. */ + mutex_unlock(&rcu_registry_lock); caa_cpu_relax(); + /* + * Re-lock the registry lock before the + * next loop. + */ + mutex_lock(&rcu_registry_lock); + } } #else /* #ifndef HAS_INCOHERENT_CACHES */ /* @@ -293,10 +346,18 @@ void update_counter_and_wait(void) wait_gp_loops = 0; } if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + /* wait_gp unlocks/locks registry lock. */ wait_gp(); wait_gp_loops++; } else { + /* Temporarily unlock the registry lock. */ + mutex_unlock(&rcu_registry_lock); caa_cpu_relax(); + /* + * Re-lock the registry lock before the + * next loop. + */ + mutex_lock(&rcu_registry_lock); } } #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ @@ -308,18 +369,23 @@ void update_counter_and_wait(void) void synchronize_rcu(void) { mutex_lock(&rcu_gp_lock); + mutex_lock(&rcu_registry_lock); if (cds_list_empty(®istry)) goto out; - /* All threads should read qparity before accessing data structure - * where new ptr points to. Must be done within rcu_gp_lock because it - * iterates on reader threads.*/ + /* + * All threads should read qparity before accessing data structure + * where new ptr points to. Must be done within rcu_registry_lock + * because it iterates on reader threads. + */ /* Write new ptr before changing the qparity */ smp_mb_master(RCU_MB_GROUP); /* * Wait for previous parity to be empty of readers. + * update_counter_and_wait() can release and grab again + * rcu_registry_lock interally. */ update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ @@ -341,14 +407,19 @@ void synchronize_rcu(void) /* * Wait for previous parity to be empty of readers. + * update_counter_and_wait() can release and grab again + * rcu_registry_lock interally. */ update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ - /* Finish waiting for reader threads before letting the old ptr being - * freed. Must be done within rcu_gp_lock because it iterates on reader - * threads. */ + /* + * Finish waiting for reader threads before letting the old ptr + * being freed. Must be done within rcu_registry_lock because it + * iterates on reader threads. + */ smp_mb_master(RCU_MB_GROUP); out: + mutex_unlock(&rcu_registry_lock); mutex_unlock(&rcu_gp_lock); } @@ -372,17 +443,17 @@ void rcu_register_thread(void) assert(URCU_TLS(rcu_reader).need_mb == 0); assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK)); - mutex_lock(&rcu_gp_lock); + mutex_lock(&rcu_registry_lock); rcu_init(); /* In case gcc does not support constructor attribute */ cds_list_add(&URCU_TLS(rcu_reader).node, ®istry); - mutex_unlock(&rcu_gp_lock); + mutex_unlock(&rcu_registry_lock); } void rcu_unregister_thread(void) { - mutex_lock(&rcu_gp_lock); + mutex_lock(&rcu_registry_lock); cds_list_del(&URCU_TLS(rcu_reader).node); - mutex_unlock(&rcu_gp_lock); + mutex_unlock(&rcu_registry_lock); } #ifdef RCU_MEMBARRIER @@ -413,9 +484,9 @@ static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context) * rcu_init constructor. Called when the library is linked, but also when * reader threads are calling rcu_register_thread(). * Should only be called by a single thread at a given time. This is ensured by - * holing the rcu_gp_lock from rcu_register_thread() or by running at library - * load time, which should not be executed by multiple threads nor concurrently - * with rcu_register_thread() anyway. + * holing the rcu_registry_lock from rcu_register_thread() or by running + * at library load time, which should not be executed by multiple + * threads nor concurrently with rcu_register_thread() anyway. */ void rcu_init(void) {