X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=urcu-qsbr.c;h=d1f5d91846af8f8098eacab85a588d945ab9873f;hb=256dd999d11736ebe5ad2865219879199c89cc90;hp=48bc62ec25e09e18395970d591f9dc64b57a1ed7;hpb=656c6da75f7ee119bfc49e026f52d695d0d1aea2;p=urcu.git diff --git a/urcu-qsbr.c b/urcu-qsbr.c index 48bc62e..d1f5d91 100644 --- a/urcu-qsbr.c +++ b/urcu-qsbr.c @@ -1,7 +1,7 @@ /* - * urcu.c + * urcu-qsbr.c * - * Userspace RCU library + * Userspace RCU QSBR library * * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. @@ -32,9 +32,9 @@ #include #include -#include "urcu-static.h" +#include "urcu-qsbr-static.h" /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ -#include "urcu.h" +#include "urcu-qsbr.h" pthread_mutex_t urcu_mutex = PTHREAD_MUTEX_INITIALIZER; @@ -47,15 +47,14 @@ long urcu_gp_ctr = 0; * Written to only by each individual reader. Read by both the reader and the * writers. */ -long __thread urcu_active_readers; +long __thread rcu_reader_qs_gp; /* Thread IDs of registered readers */ #define INIT_NUM_THREADS 4 struct reader_registry { pthread_t tid; - long *urcu_active_readers; - char *need_mb; + long *rcu_reader_qs_gp; }; #ifdef DEBUG_YIELD @@ -64,10 +63,9 @@ unsigned int __thread rand_yield; #endif static struct reader_registry *registry; -static char __thread need_mb; static int num_readers, alloc_readers; -void internal_urcu_lock(void) +static void internal_urcu_lock(void) { int ret; @@ -84,17 +82,12 @@ void internal_urcu_lock(void) perror("Error in pthread mutex lock"); exit(-1); } - if (need_mb) { - smp_mb(); - need_mb = 0; - smp_mb(); - } poll(NULL,0,10); } #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ } -void internal_urcu_unlock(void) +static void internal_urcu_unlock(void) { int ret; @@ -117,26 +110,28 @@ static void force_mb_all_threads(void) smp_mb(); } -void wait_for_quiescent_state(void) +static void wait_for_quiescent_state(void) { struct reader_registry *index; if (!registry) return; /* - * Wait for each thread urcu_active_readers count to become 0. + * Wait for each thread rcu_reader_qs_gp count to become 0. */ for (index = registry; index < registry + num_readers; index++) { #ifndef HAS_INCOHERENT_CACHES - while (rcu_old_gp_ongoing(index->urcu_active_readers)) + while (rcu_gp_ongoing(index->rcu_reader_qs_gp) && + (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) cpu_relax(); #else /* #ifndef HAS_INCOHERENT_CACHES */ int wait_loops = 0; /* * BUSY-LOOP. Force the reader thread to commit its - * urcu_active_readers update to memory if we wait for too long. + * rcu_reader_qs_gp update to memory if we wait for too long. */ - while (rcu_old_gp_ongoing(index->urcu_active_readers)) { + while (rcu_gp_ongoing(index->rcu_reader_qs_gp) && + (*index->rcu_reader_qs_gp - urcu_gp_ctr < 0)) { if (wait_loops++ == KICK_READER_LOOPS) { force_mb_single_thread(index); wait_loops = 0; @@ -150,12 +145,27 @@ void wait_for_quiescent_state(void) void synchronize_rcu(void) { + int was_online; + + was_online = rcu_reader_qs_gp & 1; + + /* + * Mark the writer thread offline to make sure we don't wait for + * our own quiescent state. This allows using synchronize_rcu() in + * threads registered as readers. + */ + if (was_online) + _rcu_thread_offline(); + internal_urcu_lock(); force_mb_all_threads(); urcu_gp_ctr += 2; wait_for_quiescent_state(); force_mb_all_threads(); internal_urcu_unlock(); + + if (was_online) + _rcu_thread_online(); } /* @@ -198,6 +208,21 @@ void *rcu_publish_content_sym(void **p, void *v) return oldptr; } +void rcu_quiescent_state(void) +{ + _rcu_quiescent_state(); +} + +void rcu_thread_offline(void) +{ + _rcu_thread_offline(); +} + +void rcu_thread_online(void) +{ + _rcu_thread_online(); +} + static void rcu_add_reader(pthread_t id) { struct reader_registry *oldarray; @@ -219,8 +244,7 @@ static void rcu_add_reader(pthread_t id) } registry[num_readers].tid = id; /* reference to the TLS of _this_ reader thread. */ - registry[num_readers].urcu_active_readers = &urcu_active_readers; - registry[num_readers].need_mb = &need_mb; + registry[num_readers].rcu_reader_qs_gp = &rcu_reader_qs_gp; num_readers++; } @@ -238,7 +262,7 @@ static void rcu_remove_reader(pthread_t id) memcpy(index, ®istry[num_readers - 1], sizeof(struct reader_registry)); registry[num_readers - 1].tid = 0; - registry[num_readers - 1].urcu_active_readers = NULL; + registry[num_readers - 1].rcu_reader_qs_gp = NULL; num_readers--; return; } @@ -252,10 +276,16 @@ void rcu_register_thread(void) internal_urcu_lock(); rcu_add_reader(pthread_self()); internal_urcu_unlock(); + _rcu_thread_online(); } void rcu_unregister_thread(void) { + /* + * We have to make the thread offline otherwise we end up dealocking + * with a waiting writer. + */ + _rcu_thread_offline(); internal_urcu_lock(); rcu_remove_reader(pthread_self()); internal_urcu_unlock();