X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-bp.c;h=3b2062d6590034ad33619ab1372b137237a9fd1e;hp=eda000539b19b01ce582fe098be1d1cc0f1a3b3f;hb=541d828d3101283ccdb1e25fa5a885e1d1743c1a;hpb=6abb4bd53503c325dc94b0c0f60b44b9550b462f diff --git a/urcu-bp.c b/urcu-bp.c index eda0005..3b2062d 100644 --- a/urcu-bp.c +++ b/urcu-bp.c @@ -3,7 +3,7 @@ * * Userspace RCU library, "bulletproof" version. * - * Copyright (c) 2009 Mathieu Desnoyers + * Copyright (c) 2009 Mathieu Desnoyers * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. * * This library is free software; you can redistribute it and/or @@ -24,6 +24,7 @@ */ #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -35,14 +36,58 @@ #include #include -#include "urcu-bp-static.h" +#include "urcu/wfqueue.h" +#include "urcu/map/urcu-bp.h" +#include "urcu/static/urcu-bp.h" +#include "urcu-pointer.h" + /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ +#undef _LGPL_SOURCE #include "urcu-bp.h" +#define _LGPL_SOURCE + +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + +#ifndef __linux__ + +#define MREMAP_MAYMOVE 1 +#define MREMAP_FIXED 2 + +/* + * mremap wrapper for non-Linux systems. Maps a RW, anonymous private mapping. + * This is not generic. +*/ +void *mremap(void *old_address, size_t old_size, size_t new_size, int flags) +{ + void *new_address; + + assert(flags & MREMAP_MAYMOVE); + assert(!(flags & MREMAP_FIXED)); + new_address = mmap(old_address, new_size, + PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, + -1, 0); + if (new_address == MAP_FAILED) + return MAP_FAILED; + if (old_address) { + memcpy(new_address, old_address, old_size); + munmap(old_address, old_size); + } + return new_address; +} +#endif /* Sleep delay in us */ #define RCU_SLEEP_DELAY 1000 #define ARENA_INIT_ALLOC 16 +/* + * Active attempts to check for reader Q.S. before calling sleep(). + */ +#define RCU_QS_ACTIVE_ATTEMPTS 100 + void __attribute__((destructor)) rcu_bp_exit(void); static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; @@ -66,7 +111,7 @@ long rcu_gp_ctr = RCU_GP_COUNT; */ struct rcu_reader __thread *rcu_reader; -static LIST_HEAD(registry); +static CDS_LIST_HEAD(registry); struct registry_arena { void *p; @@ -76,6 +121,9 @@ struct registry_arena { static struct registry_arena registry_arena; +/* Saved fork signal mask, protected by rcu_gp_lock */ +static sigset_t saved_fork_signal_mask; + static void rcu_gc_registry(void); static void mutex_lock(pthread_mutex_t *mutex) @@ -95,11 +143,6 @@ static void mutex_lock(pthread_mutex_t *mutex) perror("Error in pthread mutex lock"); exit(-1); } - if (rcu_reader.need_mb) { - smp_mb(); - rcu_reader.need_mb = 0; - smp_mb(); - } poll(NULL,0,10); } #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ @@ -116,43 +159,50 @@ static void mutex_unlock(pthread_mutex_t *mutex) } } -/* - * called with rcu_gp_lock held. - */ -static void switch_next_rcu_qparity(void) +void update_counter_and_wait(void) { - STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); -} - -void wait_for_quiescent_state(void) -{ - LIST_HEAD(qsreaders); + CDS_LIST_HEAD(qsreaders); int wait_loops = 0; struct rcu_reader *index, *tmp; - if (list_empty(®istry)) - return; + /* Switch parity: 0 -> 1, 1 -> 0 */ + CMM_STORE_SHARED(rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR_PHASE); + + /* + * Must commit qparity update to memory before waiting for other parity + * quiescent state. Failure to do so could result in the writer waiting + * forever while new readers are always accessing data (no progress). + * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. + */ + + /* + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the + * model easier to understand. It does not have a big performance impact + * anyway, given this is the write-side. + */ + cmm_smp_mb(); + /* * Wait for each thread rcu_reader.ctr count to become 0. */ for (;;) { wait_loops++; - list_for_each_entry_safe(index, tmp, ®istry, head) { + cds_list_for_each_entry_safe(index, tmp, ®istry, node) { if (!rcu_old_gp_ongoing(&index->ctr)) - list_move(&index->head, &qsreaders); + cds_list_move(&index->node, &qsreaders); } - if (list_empty(®istry)) { + if (cds_list_empty(®istry)) { break; } else { if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) usleep(RCU_SLEEP_DELAY); else - cpu_relax(); + caa_cpu_relax(); } } /* put back the reader list in the registry */ - list_splice(&qsreaders, ®istry); + cds_list_splice(&qsreaders, ®istry); } void synchronize_rcu(void) @@ -167,79 +217,40 @@ void synchronize_rcu(void) mutex_lock(&rcu_gp_lock); - /* Remove old registry elements */ - rcu_gc_registry(); + if (cds_list_empty(®istry)) + goto out; /* All threads should read qparity before accessing data structure - * where new ptr points to. Must be done within rcu_gp_lock because it - * iterates on reader threads.*/ + * where new ptr points to. */ /* Write new ptr before changing the qparity */ - smp_mb(); + cmm_smp_mb(); - switch_next_rcu_qparity(); /* 0 -> 1 */ - - /* - * Must commit qparity update to memory before waiting for parity - * 0 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ - - /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. - */ - smp_mb(); + /* Remove old registry elements */ + rcu_gc_registry(); /* * Wait for previous parity to be empty of readers. */ - wait_for_quiescent_state(); /* Wait readers in parity 0 */ - - /* - * Must finish waiting for quiescent state for parity 0 before - * committing qparity update to memory. Failure to do so could result in - * the writer waiting forever while new readers are always accessing - * data (no progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + update_counter_and_wait(); /* 0 -> 1, wait readers in parity 0 */ /* - * Adding a smp_mb() which is _not_ formally required, but makes the + * Adding a cmm_smp_mb() which is _not_ formally required, but makes the * model easier to understand. It does not have a big performance impact * anyway, given this is the write-side. */ - smp_mb(); - - switch_next_rcu_qparity(); /* 1 -> 0 */ - - /* - * Must commit qparity update to memory before waiting for parity - * 1 quiescent state. Failure to do so could result in the writer - * waiting forever while new readers are always accessing data (no - * progress). - * Ensured by STORE_SHARED and LOAD_SHARED. - */ + cmm_smp_mb(); /* - * Adding a smp_mb() which is _not_ formally required, but makes the - * model easier to understand. It does not have a big performance impact - * anyway, given this is the write-side. + * Wait for previous parity to be empty of readers. */ - smp_mb(); + update_counter_and_wait(); /* 1 -> 0, wait readers in parity 1 */ /* - * Wait for previous parity to be empty of readers. + * Finish waiting for reader threads before letting the old ptr being + * freed. */ - wait_for_quiescent_state(); /* Wait readers in parity 1 */ - - /* Finish waiting for reader threads before letting the old ptr being - * freed. Must be done within rcu_gp_lock because it iterates on reader - * threads. */ - smp_mb(); - + cmm_smp_mb(); +out: mutex_unlock(&rcu_gp_lock); ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); assert(!ret); @@ -282,7 +293,6 @@ static void resize_arena(struct registry_arena *arena, size_t len) if (new_arena == arena->p) return; - memcpy(new_arena, arena->p, arena->len); bzero(new_arena + arena->len, len - arena->len); arena->p = new_arena; } @@ -295,7 +305,7 @@ static void add_thread(void) if (registry_arena.len < registry_arena.used + sizeof(struct rcu_reader)) resize_arena(®istry_arena, - max(registry_arena.len << 1, ARENA_INIT_ALLOC)); + caa_max(registry_arena.len << 1, ARENA_INIT_ALLOC)); /* * Find a free spot. */ @@ -311,7 +321,7 @@ static void add_thread(void) /* Add to registry */ rcu_reader_reg->tid = pthread_self(); assert(rcu_reader_reg->ctr == 0); - list_add(&rcu_reader_reg->head, ®istry); + cds_list_add(&rcu_reader_reg->node, ®istry); rcu_reader = rcu_reader_reg; } @@ -331,7 +341,8 @@ static void rcu_gc_registry(void) ret = pthread_kill(tid, 0); assert(ret != EINVAL); if (ret == ESRCH) { - list_del(&rcu_reader_reg->head); + cds_list_del(&rcu_reader_reg->node); + rcu_reader_reg->ctr = 0; rcu_reader_reg->alloc = 0; registry_arena.used -= sizeof(struct rcu_reader); } @@ -363,7 +374,54 @@ end: assert(!ret); } -void rcu_bp_exit() +void rcu_bp_exit(void) +{ + if (registry_arena.p) + munmap(registry_arena.p, registry_arena.len); +} + +/* + * Holding the rcu_gp_lock across fork will make sure we fork() don't race with + * a concurrent thread executing with this same lock held. This ensures that the + * registry is in a coherent state in the child. + */ +void rcu_bp_before_fork(void) +{ + sigset_t newmask, oldmask; + int ret; + + ret = sigemptyset(&newmask); + assert(!ret); + ret = pthread_sigmask(SIG_SETMASK, &newmask, &oldmask); + assert(!ret); + mutex_lock(&rcu_gp_lock); + saved_fork_signal_mask = oldmask; +} + +void rcu_bp_after_fork_parent(void) +{ + sigset_t oldmask; + int ret; + + oldmask = saved_fork_signal_mask; + mutex_unlock(&rcu_gp_lock); + ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); + assert(!ret); +} + +void rcu_bp_after_fork_child(void) { - munmap(registry_arena.p, registry_arena.len); + sigset_t oldmask; + int ret; + + rcu_gc_registry(); + oldmask = saved_fork_signal_mask; + mutex_unlock(&rcu_gp_lock); + ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); + assert(!ret); } + +DEFINE_RCU_FLAVOR() + +#include "urcu-call-rcu-impl.h" +#include "urcu-defer-impl.h"