X-Git-Url: http://git.liburcu.org/?p=userspace-rcu.git;a=blobdiff_plain;f=tests%2Ftest_urcu_bp.c;h=30c79c17e32113a2c85ec219beb60c1b0f301528;hp=79711541ee3e1fc52387ce7c8a62653a55ac6b9b;hb=95d8822d8b4c4a3563bf51c7c718350eb2babd20;hpb=a50a7b4333a5039f1471c0b7cfbfb3d6dc5379fb diff --git a/tests/test_urcu_bp.c b/tests/test_urcu_bp.c index 7971154..30c79c1 100644 --- a/tests/test_urcu_bp.c +++ b/tests/test_urcu_bp.c @@ -3,7 +3,7 @@ * * Userspace RCU library - test program * - * Copyright February 2009 - Mathieu Desnoyers + * Copyright February 2009 - Mathieu Desnoyers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -31,11 +31,15 @@ #include #include #include -#include -#include #include #include +#include +#include "cpuset.h" + +#ifdef __linux__ +#include +#endif /* hardcoded number of CPUs */ #define NR_CPUS 16384 @@ -83,7 +87,7 @@ static unsigned long wduration; static inline void loop_sleep(unsigned long l) { while(l-- != 0) - cpu_relax(); + caa_cpu_relax(); } static int verbose_mode; @@ -100,12 +104,6 @@ static int use_affinity = 0; pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER; -#ifndef HAVE_CPU_SET_T -typedef unsigned long cpu_set_t; -# define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0) -# define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0) -#endif - static void set_affinity(void) { cpu_set_t mask; @@ -151,8 +149,8 @@ static int test_duration_read(void) return !test_stop; } -static unsigned long long __thread nr_writes; -static unsigned long long __thread nr_reads; +static DEFINE_URCU_TLS(unsigned long long, nr_writes); +static DEFINE_URCU_TLS(unsigned long long, nr_reads); static unsigned int nr_readers; static unsigned int nr_writers; @@ -183,7 +181,8 @@ void rcu_copy_mutex_unlock(void) /* * malloc/free are reusing memory areas too quickly, which does not let us * test races appropriately. Use a large circular array for allocations. - * ARRAY_SIZE is larger than nr_writers, which insures we never run over our tail. + * ARRAY_SIZE is larger than nr_writers, and we keep the mutex across + * both alloc and free, which insures we never run over our tail. */ #define ARRAY_SIZE (1048576 * nr_writers) #define ARRAY_POISON 0xDEADBEEF @@ -195,7 +194,6 @@ static struct test_array *test_array_alloc(void) struct test_array *ret; int index; - rcu_copy_mutex_lock(); index = array_index % ARRAY_SIZE; assert(test_array[index].a == ARRAY_POISON || test_array[index].a == 0); @@ -203,7 +201,6 @@ static struct test_array *test_array_alloc(void) array_index++; if (array_index == ARRAY_SIZE) array_index = 0; - rcu_copy_mutex_unlock(); return ret; } @@ -211,9 +208,7 @@ static void test_array_free(struct test_array *ptr) { if (!ptr) return; - rcu_copy_mutex_lock(); ptr->a = ARRAY_POISON; - rcu_copy_mutex_unlock(); } void *thr_reader(void *_count) @@ -222,7 +217,8 @@ void *thr_reader(void *_count) struct test_array *local_ptr; printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", - "reader", pthread_self(), (unsigned long)gettid()); + "reader", (unsigned long) pthread_self(), + (unsigned long) gettid()); set_affinity(); @@ -231,7 +227,7 @@ void *thr_reader(void *_count) while (!test_go) { } - smp_mb(); + cmm_smp_mb(); for (;;) { rcu_read_lock(); @@ -239,19 +235,20 @@ void *thr_reader(void *_count) debug_yield_read(); if (local_ptr) assert(local_ptr->a == 8); - if (unlikely(rduration)) + if (caa_unlikely(rduration)) loop_sleep(rduration); rcu_read_unlock(); - nr_reads++; - if (unlikely(!test_duration_read())) + URCU_TLS(nr_reads)++; + if (caa_unlikely(!test_duration_read())) break; } rcu_unregister_thread(); - *count = nr_reads; + *count = URCU_TLS(nr_reads); printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", - "reader", pthread_self(), (unsigned long)gettid()); + "reader", (unsigned long) pthread_self(), + (unsigned long) gettid()); return ((void*)1); } @@ -262,35 +259,39 @@ void *thr_writer(void *_count) struct test_array *new, *old; printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n", - "writer", pthread_self(), (unsigned long)gettid()); + "writer", (unsigned long) pthread_self(), + (unsigned long) gettid()); set_affinity(); while (!test_go) { } - smp_mb(); + cmm_smp_mb(); for (;;) { + rcu_copy_mutex_lock(); new = test_array_alloc(); new->a = 8; old = rcu_xchg_pointer(&test_rcu_pointer, new); - if (unlikely(wduration)) + if (caa_unlikely(wduration)) loop_sleep(wduration); synchronize_rcu(); if (old) old->a = 0; test_array_free(old); - nr_writes++; - if (unlikely(!test_duration_write())) + rcu_copy_mutex_unlock(); + URCU_TLS(nr_writes)++; + if (caa_unlikely(!test_duration_write())) break; - if (unlikely(wdelay)) + if (caa_unlikely(wdelay)) loop_sleep(wdelay); } printf_verbose("thread_end %s, thread id : %lx, tid %lu\n", - "writer", pthread_self(), (unsigned long)gettid()); - *count = nr_writes; + "writer", (unsigned long) pthread_self(), + (unsigned long) gettid()); + *count = URCU_TLS(nr_writes); return ((void*)2); } @@ -394,9 +395,10 @@ int main(int argc, char **argv) printf_verbose("Writer delay : %lu loops.\n", wdelay); printf_verbose("Reader duration : %lu loops.\n", rduration); printf_verbose("thread %-6s, thread id : %lx, tid %lu\n", - "main", pthread_self(), (unsigned long)gettid()); + "main", (unsigned long) pthread_self(), + (unsigned long) gettid()); - test_array = malloc(sizeof(*test_array) * ARRAY_SIZE); + test_array = calloc(1, sizeof(*test_array) * ARRAY_SIZE); tid_reader = malloc(sizeof(*tid_reader) * nr_readers); tid_writer = malloc(sizeof(*tid_writer) * nr_writers); count_reader = malloc(sizeof(*count_reader) * nr_readers); @@ -417,7 +419,7 @@ int main(int argc, char **argv) exit(1); } - smp_mb(); + cmm_smp_mb(); test_go = 1;