Add DEBUG_YIELD, add test duration
authorMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Mon, 9 Feb 2009 00:01:58 +0000 (19:01 -0500)
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Mon, 9 Feb 2009 00:01:58 +0000 (19:01 -0500)
Add some testing calling the scheduler, and add a duration parameter to
test_urcu.c.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Makefile
test_urcu.c
urcu.c
urcu.h

index 1bc089f2948246770898e3a5405481321748c371..cac6b5cada5f090e46d7d32ed33e05efd17d9f61 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -6,11 +6,14 @@ LDFLAGS=-lpthread
 
 SRC_DEP=`echo $^ | sed 's/[^ ]*.h//g'`
 
-all: test_urcu test_urcu_timing test_rwlock_timing
+all: test_urcu test_urcu_timing test_rwlock_timing test_urcu_yield
 
 test_urcu: urcu.o test_urcu.c
        $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
 
+test_urcu_yield: urcu-yield.o test_urcu.c
+       $(CC) -DDEBUG_YIELD ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
+
 test_urcu_timing: urcu.o test_urcu_timing.c
        $(CC) ${CFLAGS} $(LDFLAGS) -o $@ $(SRC_DEP)
 
@@ -20,6 +23,9 @@ test_rwlock_timing: urcu.o test_rwlock_timing.c
 urcu.o: urcu.c urcu.h
        $(CC) ${CFLAGS} $(LDFLAGS) -c -o $@ $(SRC_DEP)
 
+urcu-yield.o: urcu.c urcu.h
+       $(CC) -DDEBUG_YIELD ${CFLAGS} $(LDFLAGS) -c -o $@ $(SRC_DEP)
+
 .PHONY: clean
 
 clean:
index f6be45b1e2769a9f700e4bb0cf0968046db5a2e1..93bc013b4a3a3575e598ef10eb61096871db7aa1 100644 (file)
@@ -38,12 +38,28 @@ static inline pid_t gettid(void)
 
 struct test_array {
        int a;
-       int b;
-       char c[200];
 };
 
 static struct test_array *test_rcu_pointer;
 
+static unsigned long duration;
+static time_t start_time;
+static unsigned long __thread duration_interval;
+#define DURATION_TEST_DELAY 100
+
+/*
+ * returns 0 if test should end.
+ */
+static int test_duration(void)
+{
+       if (duration_interval++ >= DURATION_TEST_DELAY) {
+               duration_interval = 0;
+               if (time(NULL) - start_time >= duration)
+                       return 0;
+       }
+       return 1;
+}
+
 #define NR_READ 10
 #define NR_WRITE 9
 
@@ -72,77 +88,99 @@ void rcu_copy_mutex_unlock(void)
 
 void *thr_reader(void *arg)
 {
-       int qparity, i, j;
+       int qparity;
        struct test_array *local_ptr;
 
-       printf("thread %s, thread id : %lx, tid %lu\n",
+       printf("thread_begin %s, thread id : %lx, tid %lu\n",
                        "reader", pthread_self(), (unsigned long)gettid());
-       sleep(2);
 
        urcu_register_thread();
 
-       for (i = 0; i < 100000; i++) {
-               for (j = 0; j < 100000000; j++) {
-                       rcu_read_lock(&qparity);
-                       local_ptr = rcu_dereference(test_rcu_pointer);
-                       if (local_ptr) {
-                               assert(local_ptr->a == 8);
-                               assert(local_ptr->b == 12);
-                               assert(local_ptr->c[55] == 2);
-                       }
-                       rcu_read_unlock(&qparity);
-               }
+       for (;;) {
+               rcu_read_lock(&qparity);
+               local_ptr = rcu_dereference(test_rcu_pointer);
+               if (local_ptr)
+                       assert(local_ptr->a == 8);
+               rcu_read_unlock(&qparity);
+               if (!test_duration())
+                       break;
        }
 
        urcu_unregister_thread();
 
+       printf("thread_end %s, thread id : %lx, tid %lu\n",
+                       "reader", pthread_self(), (unsigned long)gettid());
        return ((void*)1);
 
 }
 
 void *thr_writer(void *arg)
 {
-       int i;
        struct test_array *new, *old;
 
-       printf("thread %s, thread id : %lx, tid %lu\n",
+       printf("thread_begin %s, thread id : %lx, tid %lu\n",
                        "writer", pthread_self(), (unsigned long)gettid());
-       sleep(2);
 
-       for (i = 0; i < 10000000; i++) {
+       for (;;) {
                new = malloc(sizeof(struct test_array));
                rcu_copy_mutex_lock();
                old = test_rcu_pointer;
-               if (old) {
+               if (old)
                        assert(old->a == 8);
-                       assert(old->b == 12);
-                       assert(old->c[55] == 2);
-               }
-               new->c[55] = 2;
-               new->b = 12;
                new->a = 8;
                old = urcu_publish_content((void **)&test_rcu_pointer, new);
                rcu_copy_mutex_unlock();
                /* can be done after unlock */
-               if (old) {
+               if (old)
                        old->a = 0;
-                       old->b = 0;
-                       old->c[55] = 0;
-               }
                free(old);
+               if (!test_duration())
+                       break;
                usleep(1);
        }
 
+       printf("thread_end %s, thread id : %lx, tid %lu\n",
+                       "writer", pthread_self(), (unsigned long)gettid());
        return ((void*)2);
 }
 
-int main()
+int main(int argc, char **argv)
 {
        int err;
        pthread_t tid_reader[NR_READ], tid_writer[NR_WRITE];
        void *tret;
        int i;
 
+       if (argc < 2) {
+               printf("Usage : %s duration (s) [-r] [-w] "
+                      "(yield reader and/or writer)\n", argv[0]);
+               return -1;
+       }
+
+       err = sscanf(argv[1], "%lu", &duration);
+       if (err != 1) {
+               printf("Usage : %s duration (s) [-r] [-w] "
+                      "(yield reader and/or writer)\n", argv[0]);
+               return -1;
+       }
+
+#ifdef DEBUG_YIELD
+       for (i = 2; i < argc; i++) {
+               if (argv[i][0] != '-')
+                       continue;
+               switch (argv[i][1]) {
+               case 'r':
+                       yield_active |= YIELD_READ;
+                       break;
+               case 'w':
+                       yield_active |= YIELD_WRITE;
+                       break;
+               }
+       }
+#endif
+
+       printf("running test for %lu seconds.\n", duration);
+       start_time = time(NULL);
        printf("thread %-6s, thread id : %lx, tid %lu\n",
                        "main", pthread_self(), (unsigned long)gettid());
 
@@ -157,8 +195,6 @@ int main()
                        exit(1);
        }
 
-       sleep(10);
-
        for (i = 0; i < NR_READ; i++) {
                err = pthread_join(tid_reader[i], &tret);
                if (err != 0)
diff --git a/urcu.c b/urcu.c
index 08fb75dd0feae13421647f7d84615bee6d88654f..4362217d49ab4020c0bd199c30410733f32bbd89 100644 (file)
--- a/urcu.c
+++ b/urcu.c
@@ -32,6 +32,10 @@ struct reader_data {
        int *urcu_active_readers;
 };
 
+#ifdef DEBUG_YIELD
+int yield_active;
+#endif
+
 static struct reader_data *reader_data;
 static int num_readers, alloc_readers;
 static int sig_done;
@@ -76,17 +80,24 @@ static void force_mb_all_threads(void)
         */
        if (!reader_data)
                return;
+       debug_yield_write();
        sig_done = 0;
+       debug_yield_write();
        mb();   /* write sig_done before sending the signals */
-       for (index = reader_data; index < reader_data + num_readers; index++)
+       debug_yield_write();
+       for (index = reader_data; index < reader_data + num_readers; index++) {
                pthread_kill(index->tid, SIGURCU);
+               debug_yield_write();
+       }
        /*
         * Wait for sighandler (and thus mb()) to execute on every thread.
         * BUSY-LOOP.
         */
        while (sig_done < num_readers)
                barrier();
+       debug_yield_write();
        mb();   /* read sig_done before ending the barrier */
+       debug_yield_write();
 }
 
 void wait_for_quiescent_state(int parity)
@@ -120,7 +131,9 @@ static void switch_qparity(void)
        /* All threads should read qparity before accessing data structure. */
        /* Write ptr before changing the qparity */
        force_mb_all_threads();
+       debug_yield_write();
        prev_parity = switch_next_urcu_qparity();
+       debug_yield_write();
 
        /*
         * Wait for previous parity to be empty of readers.
@@ -130,10 +143,15 @@ static void switch_qparity(void)
 
 void synchronize_rcu(void)
 {
+       debug_yield_write();
        internal_urcu_lock();
+       debug_yield_write();
        switch_qparity();
+       debug_yield_write();
        switch_qparity();
+       debug_yield_write();
        internal_urcu_lock();
+       debug_yield_write();
 }
 
 /*
@@ -144,7 +162,9 @@ void *urcu_publish_content(void **ptr, void *new)
 {
        void *oldptr;
 
+       debug_yield_write();
        internal_urcu_lock();
+       debug_yield_write();
        /*
         * We can publish the new pointer before we change the current qparity.
         * Readers seeing the new pointer while being in the previous qparity
@@ -156,11 +176,16 @@ void *urcu_publish_content(void **ptr, void *new)
         * when the next quiescent state window will be over.
         */
        oldptr = *ptr;
+       debug_yield_write();
        *ptr = new;
 
+       debug_yield_write();
        switch_qparity();
+       debug_yield_write();
        switch_qparity();
+       debug_yield_write();
        internal_urcu_unlock();
+       debug_yield_write();
 
        return oldptr;
 }
diff --git a/urcu.h b/urcu.h
index b6b5c7b039e359225d947afb91a29a2f25ba6a3c..2aa35977036d9432884206983c21b6bc686b9cf0 100644 (file)
--- a/urcu.h
+++ b/urcu.h
@@ -66,6 +66,35 @@ static inline void atomic_inc(int *v)
 
 #define SIGURCU SIGUSR1
 
+#ifdef DEBUG_YIELD
+#include <sched.h>
+
+#define YIELD_READ     (1 << 0)
+#define YIELD_WRITE    (1 << 1)
+
+extern int yield_active;
+
+static inline void debug_yield_read(void)
+{
+       if (yield_active & YIELD_READ)
+               sched_yield();
+}
+
+static inline void debug_yield_write(void)
+{
+       if (yield_active & YIELD_WRITE)
+               sched_yield();
+}
+#else
+static inline void debug_yield_read(void)
+{
+}
+
+static inline void debug_yield_write(void)
+{
+}
+#endif
+
 /* Global quiescent period parity */
 extern int urcu_qparity;
 
@@ -81,23 +110,30 @@ static inline int get_urcu_qparity(void)
  */
 static inline void rcu_read_lock(int *urcu_parity)
 {
+       debug_yield_read();
        *urcu_parity = get_urcu_qparity();
+       debug_yield_read();
        urcu_active_readers[*urcu_parity]++;
+       debug_yield_read();
        /*
         * Increment active readers count before accessing the pointer.
         * See force_mb_all_threads().
         */
        barrier();
+       debug_yield_read();
 }
 
 static inline void rcu_read_unlock(int *urcu_parity)
 {
+       debug_yield_read();
        barrier();
+       debug_yield_read();
        /*
         * Finish using rcu before decrementing the pointer.
         * See force_mb_all_threads().
         */
        urcu_active_readers[*urcu_parity]--;
+       debug_yield_read();
 }
 
 extern void *urcu_publish_content(void **ptr, void *new);
This page took 0.02939 seconds and 4 git commands to generate.