X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-qsbr.c;h=553029578a210383df955fb384d6102ad807d51e;hp=6ba03642f852763057781dcf14bf2ff4a01dc5b9;hb=1f689e13ea7e519b1afc001e9c55a7b1b60b599f;hpb=3395d46cc4ab83f9c23ecc5410122111cce5905d diff --git a/urcu-qsbr.c b/urcu-qsbr.c index 6ba0364..5530295 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -1,9 +1,9 @@ /* - * urcu.c + * urcu-qsbr.c * - * Userspace RCU library + * Userspace RCU QSBR library * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * * This library is free software; you can redistribute it and/or @@ -23,142 +23,277 @@ * IBM's contributions to this file may be relicensed under LGPLv2 or later. */ +#define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include #include #include +#include #include #include #include -#include "urcu-qsbr.h" +#include "urcu/wfqueue.h" +#include "urcu/map/urcu-qsbr.h" +#define BUILD_QSBR_LIB +#include "urcu/static/urcu-qsbr.h" +#include "urcu-pointer.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ -//#include "urcu.h" +#undef _LGPL_SOURCE +#include "urcu-qsbr.h" +#define _LGPL_SOURCE + +void __attribute__((destructor)) rcu_exit(void); + +static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; -pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; +int32_t gp_futex; /* * Global grace period counter. */ -long urcu_gp_ctr = 0; +unsigned long rcu_gp_ctr = RCU_GP_ONLINE; + +/* + * Active attempts to check for reader Q.S. before calling futex(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 /* * Written to only by each individual reader. Read by both the reader and the * writers. */ -long __thread rcu_reader_qs_gp; - -/* Thread IDs of registered readers */ -#define INIT_NUM_THREADS 4 - -struct reader_registry { - pthread_t tid; - long *rcu_reader_qs_gp; - char *need_mb; -}; +struct rcu_reader __thread rcu_reader; #ifdef DEBUG_YIELD unsigned int yield_active; unsigned int __thread rand_yield; #endif -static struct reader_registry *registry; -static char __thread need_mb; -static int num_readers, alloc_readers; +static CDS_LIST_HEAD(registry); -void internal_urcu_lock(void) +static void mutex_lock(pthread_mutex_t *mutex) { int ret; #ifndef DISTRUST_SIGNALS_EXTREME - ret = pthread_mutex_lock(&urcu_mutex); + ret = pthread_mutex_lock(mutex); if (ret) { perror("Error in pthread mutex lock"); exit(-1); } #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ - while ((ret = pthread_mutex_trylock(&urcu_mutex)) != 0) { + while ((ret = pthread_mutex_trylock(mutex)) != 0) { if (ret != EBUSY && ret != EINTR) { printf("ret = %d, errno = %d\n", ret, errno); perror("Error in pthread mutex lock"); exit(-1); } - if (need_mb) { - smp_mb(); - need_mb = 0; - smp_mb(); - } poll(NULL,0,10); } #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ } -void internal_urcu_unlock(void) +static void mutex_unlock(pthread_mutex_t *mutex) { int ret; - ret = pthread_mutex_unlock(&urcu_mutex); + ret = pthread_mutex_unlock(mutex); if (ret) { perror("Error in pthread mutex unlock"); exit(-1); } } -#ifdef HAS_INCOHERENT_CACHES -static void force_mb_single_thread(struct reader_registry *index) +/* + * synchronize_rcu() waiting. Single thread. + */ +static void wait_gp(void) { - smp_mb(); + /* Read reader_gp before read futex */ + cmm_smp_rmb(); + if (uatomic_read(&gp_futex) == -1) + futex_noasync(&gp_futex, FUTEX_WAIT, -1, + NULL, NULL, 0); } -#endif /* #ifdef HAS_INCOHERENT_CACHES */ -static void force_mb_all_threads(void) +static void update_counter_and_wait(void) { - smp_mb(); -} + CDS_LIST_HEAD(qsreaders); + int wait_loops = 0; + struct rcu_reader *index, *tmp; + +#if (CAA_BITS_PER_LONG < 64) + /* Switch parity: 0 -> 1, 1 -> 0 */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); +#else /* !(CAA_BITS_PER_LONG < 64) */ + /* Increment current G.P. */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); +#endif /* !(CAA_BITS_PER_LONG < 64) */ -void wait_for_quiescent_state(void) -{ - struct reader_registry *index; + /* + * Must commit rcu_gp_ctr update to memory before waiting for + * quiescent state. Failure to do so could result in the writer + * waiting forever while new readers are always accessing data + * (no progress). Enforce compiler-order of store to rcu_gp_ctr + * before load rcu_reader ctr. + */ + cmm_barrier(); + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + cmm_smp_mb(); - if (!registry) - return; /* * Wait for each thread rcu_reader_qs_gp count to become 0. */ - for (index = registry; index < registry + num_readers; index++) { + for (;;) { + wait_loops++; + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + uatomic_set(&gp_futex, -1); + /* + * Write futex before write waiting (the other side + * reads them in the opposite order). + */ + cmm_smp_wmb(); + cds_list_for_each_entry(index, ®istry, node) { + _CMM_STORE_SHARED(index->waiting, 1); + } + /* Write futex before read reader_gp */ + cmm_smp_mb(); + } + cds_list_for_each_entry_safe(index, tmp, ®istry, node) { + if (!rcu_gp_ongoing(&index->ctr)) + cds_list_move(&index->node, &qsreaders); + } + + if (cds_list_empty(®istry)) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + /* Read reader_gp before write futex */ + cmm_smp_mb(); + uatomic_set(&gp_futex, 0); + } + break; + } else { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + wait_gp(); + } else { #ifndef HAS_INCOHERENT_CACHES - while (rcu_gp_ongoing(index->rcu_reader_qs_gp) && - (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) - cpu_relax(); + caa_cpu_relax(); #else /* #ifndef HAS_INCOHERENT_CACHES */ - int wait_loops = 0; - /* - * BUSY-LOOP. Force the reader thread to commit its - * rcu_reader_qs_gp update to memory if we wait for too long. - */ - while (rcu_gp_ongoing(index->rcu_reader_qs_gp) && - (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) { - if (wait_loops++ == KICK_READER_LOOPS) { - force_mb_single_thread(index); - wait_loops = 0; - } else { - cpu_relax(); + cmm_smp_mb(); +#endif /* #else #ifndef HAS_INCOHERENT_CACHES */ } } -#endif /* #else #ifndef HAS_INCOHERENT_CACHES */ } + /* put back the reader list in the registry */ + cds_list_splice(&qsreaders, ®istry); } +/* + * Using a two-subphases algorithm for architectures with smaller than 64-bit + * long-size to ensure we do not encounter an overflow bug. + */ + +#if (CAA_BITS_PER_LONG < 64) void synchronize_rcu(void) { - internal_urcu_lock(); - force_mb_all_threads(); - urcu_gp_ctr += 2; - wait_for_quiescent_state(); - force_mb_all_threads(); - internal_urcu_unlock(); + unsigned long was_online; + + was_online = rcu_reader.ctr; + + /* All threads should read qparity before accessing data structure + * where new ptr points to. In the "then" case, rcu_thread_offline + * includes a memory barrier. + * + * Mark the writer thread offline to make sure we don't wait for + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. + */ + if (was_online) + rcu_thread_offline(); + else + cmm_smp_mb(); + + mutex_lock(&rcu_gp_lock); + + if (cds_list_empty(®istry)) + goto out; + + /* + * Wait for previous parity to be empty of readers. + */ + update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ + + /* + * Must finish waiting for quiescent state for parity 0 before + * committing next rcu_gp_ctr update to memory. Failure to + * do so could result in the writer waiting forever while new + * readers are always accessing data (no progress). Enforce + * compiler-order of load rcu_reader ctr before store to + * rcu_gp_ctr. + */ + cmm_barrier(); + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + cmm_smp_mb(); + + /* + * Wait for previous parity to be empty of readers. + */ + update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ +out: + mutex_unlock(&rcu_gp_lock); + + /* + * Finish waiting for reader threads before letting the old ptr being + * freed. + */ + if (was_online) + rcu_thread_online(); + else + cmm_smp_mb(); +} +#else /* !(CAA_BITS_PER_LONG < 64) */ +void synchronize_rcu(void) +{ + unsigned long was_online; + + was_online = rcu_reader.ctr; + + /* + * Mark the writer thread offline to make sure we don't wait for + * our own quiescent state. This allows using synchronize_rcu() + * in threads registered as readers. + */ + if (was_online) + rcu_thread_offline(); + else + cmm_smp_mb(); + + mutex_lock(&rcu_gp_lock); + if (cds_list_empty(®istry)) + goto out; + update_counter_and_wait(); +out: + mutex_unlock(&rcu_gp_lock); + + if (was_online) + rcu_thread_online(); + else + cmm_smp_mb(); } +#endif /* !(CAA_BITS_PER_LONG < 64) */ /* * library wrappers to be used by non-LGPL compatible source code. @@ -174,91 +309,52 @@ void rcu_read_unlock(void) _rcu_read_unlock(); } -void *rcu_dereference(void *p) +void rcu_quiescent_state(void) { - return _rcu_dereference(p); + _rcu_quiescent_state(); } -void *rcu_assign_pointer_sym(void **p, void *v) +void rcu_thread_offline(void) { - wmb(); - return STORE_SHARED(p, v); + _rcu_thread_offline(); } -void *rcu_xchg_pointer_sym(void **p, void *v) +void rcu_thread_online(void) { - wmb(); - return xchg(p, v); + _rcu_thread_online(); } -void *rcu_publish_content_sym(void **p, void *v) -{ - void *oldptr; - - oldptr = _rcu_xchg_pointer(p, v); - synchronize_rcu(); - return oldptr; -} - -static void rcu_add_reader(pthread_t id) +void rcu_register_thread(void) { - struct reader_registry *oldarray; + rcu_reader.tid = pthread_self(); + assert(rcu_reader.ctr == 0); - if (!registry) { - alloc_readers = INIT_NUM_THREADS; - num_readers = 0; - registry = - malloc(sizeof(struct reader_registry) * alloc_readers); - } - if (alloc_readers < num_readers + 1) { - oldarray = registry; - registry = malloc(sizeof(struct reader_registry) - * (alloc_readers << 1)); - memcpy(registry, oldarray, - sizeof(struct reader_registry) * alloc_readers); - alloc_readers <<= 1; - free(oldarray); - } - registry[num_readers].tid = id; - /* reference to the TLS of _this_ reader thread. */ - registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp; - registry[num_readers].need_mb = &need_mb; - num_readers++; + mutex_lock(&rcu_gp_lock); + cds_list_add(&rcu_reader.node, ®istry); + mutex_unlock(&rcu_gp_lock); + _rcu_thread_online(); } -/* - * Never shrink (implementation limitation). - * This is O(nb threads). Eventually use a hash table. - */ -static void rcu_remove_reader(pthread_t id) +void rcu_unregister_thread(void) { - struct reader_registry *index; - - assert(registry != NULL); - for (index = registry; index < registry + num_readers; index++) { - if (pthread_equal(index->tid, id)) { - memcpy(index, ®istry[num_readers - 1], - sizeof(struct reader_registry)); - registry[num_readers - 1].tid = 0; - registry[num_readers - 1].rcu_reader_qs_gp = NULL; - num_readers--; - return; - } - } - /* Hrm not found, forgot to register ? */ - assert(0); + /* + * We have to make the thread offline otherwise we end up dealocking + * with a waiting writer. + */ + _rcu_thread_offline(); + mutex_lock(&rcu_gp_lock); + cds_list_del(&rcu_reader.node); + mutex_unlock(&rcu_gp_lock); } -void rcu_register_thread(void) +void rcu_exit(void) { - internal_urcu_lock(); - rcu_add_reader(pthread_self()); - internal_urcu_unlock(); + /* + * Assertion disabled because call_rcu threads are now rcu + * readers, and left running at exit. + * assert(cds_list_empty(®istry)); + */ } -void rcu_unregister_thread(void) -{ - internal_urcu_lock(); - rcu_remove_reader(pthread_self()); - internal_urcu_unlock(); -} +#include "urcu-call-rcu-impl.h" +#include "urcu-defer-impl.h"