X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.c;h=ae3490f7cb2cdc1f7cdb1d26f77c0ee22620e9eb;hp=759b94bdb9e9550e178caa04904e6048c7810b2c;hb=5e81fed7cc48d0e14b7478ec1152c3ca2bd01f97;hpb=71210954265a0c7591a7dc9bead07ae18b8cd12f diff --git a/urcu.c b/urcu.c index 759b94b..ae3490f 100644 --- a/urcu.c +++ b/urcu.c @@ -53,9 +53,9 @@ /* * If a reader is really non-cooperative and refuses to commit its * rcu_active_readers count to memory (there is no barrier in the reader - * per-se), kick it after a few loops waiting for it. + * per-se), kick it after 10 loops waiting for it. */ -#define KICK_READER_LOOPS 10000 +#define KICK_READER_LOOPS 10 /* * Active attempts to check for reader Q.S. before calling futex(). @@ -66,7 +66,7 @@ * RCU_MEMBARRIER is only possibly available on Linux. */ #if defined(RCU_MEMBARRIER) && defined(__linux__) -#include +#include #endif /* If the headers do not support SYS_membarrier, fall back on RCU_MB */ @@ -109,11 +109,6 @@ struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT }; */ DEFINE_URCU_TLS(struct rcu_reader, rcu_reader); -#ifdef DEBUG_YIELD -unsigned int rcu_yield_active; -DEFINE_URCU_TLS(unsigned int, rcu_rand_yield); -#endif - static CDS_LIST_HEAD(registry); /* @@ -235,8 +230,11 @@ static void wait_for_readers(struct cds_list_head *input_readers, struct cds_list_head *cur_snap_readers, struct cds_list_head *qsreaders) { - int wait_loops = 0; + unsigned int wait_loops = 0; struct rcu_reader *index, *tmp; +#ifdef HAS_INCOHERENT_CACHES + unsigned int wait_gp_loops = 0; +#endif /* HAS_INCOHERENT_CACHES */ /* * Wait for each thread URCU_TLS(rcu_reader).ctr to either @@ -244,8 +242,9 @@ static void wait_for_readers(struct cds_list_head *input_readers, * rcu_gp.ctr value. */ for (;;) { - wait_loops++; - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) + wait_loops++; + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { uatomic_dec(&rcu_gp.futex); /* Write futex before read reader_gp */ smp_mb_master(RCU_MB_GROUP); @@ -276,14 +275,14 @@ static void wait_for_readers(struct cds_list_head *input_readers, #ifndef HAS_INCOHERENT_CACHES if (cds_list_empty(input_readers)) { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ smp_mb_master(RCU_MB_GROUP); uatomic_set(&rcu_gp.futex, 0); } break; } else { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) wait_gp(); else caa_cpu_relax(); @@ -295,22 +294,21 @@ static void wait_for_readers(struct cds_list_head *input_readers, * for too long. */ if (cds_list_empty(input_readers)) { - if (wait_loops == RCU_QS_ACTIVE_ATTEMPTS) { + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { /* Read reader_gp before write futex */ smp_mb_master(RCU_MB_GROUP); uatomic_set(&rcu_gp.futex, 0); } break; } else { - switch (wait_loops) { - case RCU_QS_ACTIVE_ATTEMPTS: - wait_gp(); - break; /* only escape switch */ - case KICK_READER_LOOPS: + if (wait_gp_loops == KICK_READER_LOOPS) { smp_mb_master(RCU_MB_GROUP); - wait_loops = 0; - break; /* only escape switch */ - default: + wait_gp_loops = 0; + } + if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { + wait_gp(); + wait_gp_loops++; + } else { caa_cpu_relax(); } }