X-Git-Url: https://git.liburcu.org/?a=blobdiff_plain;f=tests%2Ftest_urcu.c;h=1b1b94b5bfcebc77c3fec5481e528858d62ed4f9;hb=ae51baf267fc36ccc259f9287e2750c138ff80bc;hp=bbdb2d8f4ac33abe82aa23ce6a8844a2db179b89;hpb=92e8d9d64a97e2d098cefa223208b2d18118194e;p=urcu.git diff --git a/tests/test_urcu.c b/tests/test_urcu.c index bbdb2d8..1b1b94b 100644 --- a/tests/test_urcu.c +++ b/tests/test_urcu.c @@ -31,11 +31,15 @@ #include #include #include -#include #include #include #include +#include + +#ifdef __linux__ +#include +#endif /* hardcoded number of CPUs */ #define NR_CPUS 16384 @@ -151,8 +155,8 @@ static int test_duration_read(void) return !test_stop; } -static unsigned long long __thread nr_writes; -static unsigned long long __thread nr_reads; +static DEFINE_URCU_TLS(unsigned long long, nr_writes); +static DEFINE_URCU_TLS(unsigned long long, nr_reads); static unsigned int nr_readers; static unsigned int nr_writers; @@ -183,7 +187,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -195,7 +200,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -203,7 +207,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -211,9 +214,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -239,11 +240,11 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); - nr_reads++; - if (unlikely(!test_duration_read())) + URCU_TLS(nr_reads)++; + if (caa_unlikely(!test_duration_read())) break; } @@ -253,7 +254,7 @@ void *thr_reader(void *_count) rcu_register_thread(); rcu_unregister_thread(); - *count = nr_reads; + *count = URCU_TLS(nr_reads); printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", "reader", pthread_self(), (unsigned long)gettid()); return ((void*)1); @@ -276,25 +277,27 @@ void *thr_writer(void *_count) cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); if (old) old->a = 0; test_array_free(old); - nr_writes++; - if (unlikely(!test_duration_write())) + rcu_copy_mutex_unlock(); + URCU_TLS(nr_writes)++; + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", "writer", pthread_self(), (unsigned long)gettid()); - *count = nr_writes; + *count = URCU_TLS(nr_writes); return ((void*)2); }