X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-qsbr.c;h=06e81c7bbbbe47ad5eb4b0b40bf81be55d8bb9b7;hp=25074d09063a60ae5c94a17b18cd9d241b1e2fb9;hb=541d828d3101283ccdb1e25fa5a885e1d1743c1a;hpb=7a5a38f52b8814d0188b74a91cb635bc205df96c diff --git a/urcu-qsbr.c b/urcu-qsbr.c index 25074d0..06e81c7 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -3,7 +3,7 @@ * * Userspace RCU QSBR library * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * * This library is free software; you can redistribute it and/or @@ -23,31 +23,45 @@ * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ +#define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include #include #include +#include #include #include #include +#include "urcu/wfqueue.h" +#include "urcu/map/urcu-qsbr.h" #define BUILD_QSBR_LIB -#include "urcu-qsbr-static.h" +#include "urcu/static/urcu-qsbr.h" +#include "urcu-pointer.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu-qsbr.h" +#define _LGPL_SOURCE void __attribute__((destructor)) rcu_exit(void); static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; -int gp_futex; +int32_t gp_futex; /* * Global grace period counter. */ unsigned long rcu_gp_ctr = RCU_GP_ONLINE; +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 + /* * Written to only by each individual reader. Read by both the reader and the * writers. @@ -59,7 +73,7 @@ unsigned int yield_active; unsigned int __thread rand_yield; #endif -static LIST_HEAD(registry); +static CDS_LIST_HEAD(registry); static void mutex_lock(pthread_mutex_t *mutex) { @@ -100,7 +114,7 @@ static void mutex_unlock(pthread_mutex_t *mutex) static void wait_gp(void) { /* Read reader_gp before read futex */ - smp_rmb(); + cmm_smp_rmb(); if (uatomic_read(&gp_futex) == -1) futex_noasync(&gp_futex, FUTEX_WAIT, -1, NULL, NULL, 0); @@ -108,62 +122,78 @@ static void wait_gp(void) static void update_counter_and_wait(void) { - LIST_HEAD(qsreaders); + CDS_LIST_HEAD(qsreaders); int wait_loops = 0; struct rcu_reader *index, *tmp; -#if (BITS_PER_LONG < 64) +#if (CAA_BITS_PER_LONG < 64) /* Switch parity: 0 -> 1, 1 -> 0 */ - STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); -#else /* !(BITS_PER_LONG < 64) */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); +#else /* !(CAA_BITS_PER_LONG < 64) */ /* Increment current G.P. */ - STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); -#endif /* !(BITS_PER_LONG < 64) */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); +#endif /* !(CAA_BITS_PER_LONG < 64) */ /* - * Enforce compiler-order of store to rcu_gp_ctr before before - * load rcu_reader ctr. - * This ensures synchronize_rcu() cannot be starved by readers. + * Must commit rcu_gp_ctr update to memory before waiting for + * quiescent state. Failure to do so could result in the writer + * waiting forever while new readers are always accessing data + * (no progress). Enforce compiler-order of store to rcu_gp_ctr + * before load rcu_reader ctr. + */ + cmm_barrier(); + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. */ - barrier(); + cmm_smp_mb(); /* * Wait for each thread rcu_reader_qs_gp count to become 0. */ for (;;) { wait_loops++; - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { - uatomic_dec(&gp_futex); + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + uatomic_set(&gp_futex, -1); + /* + * Write futex before write waiting (the other side + * reads them in the opposite order). + */ + cmm_smp_wmb(); + cds_list_for_each_entry(index, ®istry, node) { + _CMM_STORE_SHARED(index->waiting, 1); + } /* Write futex before read reader_gp */ - smp_mb(); + cmm_smp_mb(); } - - list_for_each_entry_safe(index, tmp, ®istry, head) { + cds_list_for_each_entry_safe(index, tmp, ®istry, node) { if (!rcu_gp_ongoing(&index->ctr)) - list_move(&index->head, &qsreaders); + cds_list_move(&index->node, &qsreaders); } - if (list_empty(®istry)) { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (cds_list_empty(®istry)) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ - smp_mb(); + cmm_smp_mb(); uatomic_set(&gp_futex, 0); } break; } else { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { wait_gp(); } else { #ifndef HAS_INCOHERENT_CACHES - cpu_relax(); + caa_cpu_relax(); #else /* #ifndef HAS_INCOHERENT_CACHES */ - smp_mb(); + cmm_smp_mb(); #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ } } } /* put back the reader list in the registry */ - list_splice(&qsreaders, ®istry); + cds_list_splice(&qsreaders, ®istry); } /* @@ -171,7 +201,7 @@ static void update_counter_and_wait(void) * long-size to ensure we do not encounter an overflow bug. */ -#if (BITS_PER_LONG < 64) +#if (CAA_BITS_PER_LONG < 64) void synchronize_rcu(void) { unsigned long was_online; @@ -179,22 +209,21 @@ void synchronize_rcu(void) was_online = rcu_reader.ctr; /* All threads should read qparity before accessing data structure - * where new ptr points to. - */ - /* Write new ptr before changing the qparity */ - smp_mb(); - - /* + * where new ptr points to. In the "then" case, rcu_thread_offline + * includes a memory barrier. + * * Mark the writer thread offline to make sure we don't wait for - * our own quiescent state. This allows using synchronize_rcu() in - * threads registered as readers. + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. */ if (was_online) - STORE_SHARED(rcu_reader.ctr, 0); + rcu_thread_offline(); + else + cmm_smp_mb(); mutex_lock(&rcu_gp_lock); - if (list_empty(®istry)) + if (cds_list_empty(®istry)) goto out; /* @@ -204,18 +233,20 @@ void synchronize_rcu(void) /* * Must finish waiting for quiescent state for parity 0 before - * committing qparity update to memory. Failure to do so could result in - * the writer waiting forever while new readers are always accessing - * data (no progress). - * Ensured by STORE_SHARED and LOAD_SHARED. + * committing next rcu_gp_ctr update to memory. Failure to + * do so could result in the writer waiting forever while new + * readers are always accessing data (no progress). Enforce + * compiler-order of load rcu_reader ctr before store to + * rcu_gp_ctr. */ + cmm_barrier(); /* - * Adding a smp_mb() which is _not_ formally required, but makes the + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the * model easier to understand. It does not have a big performance impact * anyway, given this is the write-side. */ - smp_mb(); + cmm_smp_mb(); /* * Wait for previous parity to be empty of readers. @@ -229,10 +260,11 @@ out: * freed. */ if (was_online) - _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr)); - smp_mb(); + rcu_thread_online(); + else + cmm_smp_mb(); } -#else /* !(BITS_PER_LONG < 64) */ +#else /* !(CAA_BITS_PER_LONG < 64) */ void synchronize_rcu(void) { unsigned long was_online; @@ -241,25 +273,27 @@ void synchronize_rcu(void) /* * Mark the writer thread offline to make sure we don't wait for - * our own quiescent state. This allows using synchronize_rcu() in - * threads registered as readers. + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. */ - smp_mb(); if (was_online) - STORE_SHARED(rcu_reader.ctr, 0); + rcu_thread_offline(); + else + cmm_smp_mb(); mutex_lock(&rcu_gp_lock); - if (list_empty(®istry)) + if (cds_list_empty(®istry)) goto out; update_counter_and_wait(); out: mutex_unlock(&rcu_gp_lock); if (was_online) - _STORE_SHARED(rcu_reader.ctr, LOAD_SHARED(rcu_gp_ctr)); - smp_mb(); + rcu_thread_online(); + else + cmm_smp_mb(); } -#endif /* !(BITS_PER_LONG < 64) */ +#endif /* !(CAA_BITS_PER_LONG < 64) */ /* * library wrappers to be used by non-LGPL compatible source code. @@ -296,7 +330,7 @@ void rcu_register_thread(void) assert(rcu_reader.ctr == 0); mutex_lock(&rcu_gp_lock); - list_add(&rcu_reader.head, ®istry); + cds_list_add(&rcu_reader.node, ®istry); mutex_unlock(&rcu_gp_lock); _rcu_thread_online(); } @@ -309,11 +343,20 @@ void rcu_unregister_thread(void) */ _rcu_thread_offline(); mutex_lock(&rcu_gp_lock); - list_del(&rcu_reader.head); + cds_list_del(&rcu_reader.node); mutex_unlock(&rcu_gp_lock); } void rcu_exit(void) { - assert(list_empty(®istry)); + /* + * Assertion disabled because call_rcu threads are now rcu + * readers, and left running at exit. + * assert(cds_list_empty(®istry)); + */ } + +DEFINE_RCU_FLAVOR() + +#include "urcu-call-rcu-impl.h" +#include "urcu-defer-impl.h"