X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu.c;h=83d2fe4d6d9c367796d123fb05286f8305dfc332;hp=c55a5a2be9395ad691876e8e1b58d787729207d4;hb=9d335088c7c7eb6219e32ec9426d336f3a211a77;hpb=f69f195a06af55b7501ed2f59ed719970727ce5b diff --git a/urcu.c b/urcu.c index c55a5a2..83d2fe4 100644 --- a/urcu.c +++ b/urcu.c @@ -1,3 +1,13 @@ +/* + * urcu.c + * + * Userspace RCU library + * + * Copyright February 2009 - Mathieu Desnoyers + * + * Distributed under GPLv2 + */ + #include #include #include @@ -22,10 +32,36 @@ struct reader_data { int *urcu_active_readers; }; +#ifdef DEBUG_YIELD +unsigned int yield_active; +unsigned int __thread rand_yield; +#endif + static struct reader_data *reader_data; static int num_readers, alloc_readers; static int sig_done; +void internal_urcu_lock(void) +{ + int ret; + ret = pthread_mutex_lock(&urcu_mutex); + if (ret) { + perror("Error in pthread mutex lock"); + exit(-1); + } +} + +void internal_urcu_unlock(void) +{ + int ret; + + ret = pthread_mutex_unlock(&urcu_mutex); + if (ret) { + perror("Error in pthread mutex unlock"); + exit(-1); + } +} + /* * called with urcu_mutex held. */ @@ -45,17 +81,24 @@ static void force_mb_all_threads(void) */ if (!reader_data) return; + debug_yield_write(); sig_done = 0; + debug_yield_write(); mb(); /* write sig_done before sending the signals */ - for (index = reader_data; index < reader_data + num_readers; index++) + debug_yield_write(); + for (index = reader_data; index < reader_data + num_readers; index++) { pthread_kill(index->tid, SIGURCU); + debug_yield_write(); + } /* * Wait for sighandler (and thus mb()) to execute on every thread. * BUSY-LOOP. */ while (sig_done < num_readers) barrier(); + debug_yield_write(); mb(); /* read sig_done before ending the barrier */ + debug_yield_write(); } void wait_for_quiescent_state(int parity) @@ -70,7 +113,7 @@ void wait_for_quiescent_state(int parity) /* * BUSY-LOOP. */ - while (*index->urcu_active_readers != 0) + while (index->urcu_active_readers[parity] != 0) barrier(); } /* @@ -82,20 +125,47 @@ void wait_for_quiescent_state(int parity) force_mb_all_threads(); } +static void switch_qparity(void) +{ + int prev_parity; + + /* All threads should read qparity before accessing data structure. */ + /* Write ptr before changing the qparity */ + force_mb_all_threads(); + debug_yield_write(); + prev_parity = switch_next_urcu_qparity(); + debug_yield_write(); + + /* + * Wait for previous parity to be empty of readers. + */ + wait_for_quiescent_state(prev_parity); +} + +void synchronize_rcu(void) +{ + debug_yield_write(); + internal_urcu_lock(); + debug_yield_write(); + switch_qparity(); + debug_yield_write(); + switch_qparity(); + debug_yield_write(); + internal_urcu_lock(); + debug_yield_write(); +} + /* * Return old pointer, OK to free, no more reference exist. + * Called under rcu_write_lock. */ void *urcu_publish_content(void **ptr, void *new) { - int ret, prev_parity; void *oldptr; - ret = pthread_mutex_lock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex lock"); - exit(-1); - } - + debug_yield_write(); + internal_urcu_lock(); + debug_yield_write(); /* * We can publish the new pointer before we change the current qparity. * Readers seeing the new pointer while being in the previous qparity @@ -107,25 +177,17 @@ void *urcu_publish_content(void **ptr, void *new) * when the next quiescent state window will be over. */ oldptr = *ptr; + debug_yield_write(); *ptr = new; - wmb(); /* Write ptr before changing the qparity */ - /* All threads should read qparity before ptr */ - force_mb_all_threads(); - prev_parity = switch_next_urcu_qparity(); - /* - * Wait for previous parity to be empty of readers. - */ - wait_for_quiescent_state(prev_parity); - /* - * Deleting old data is ok ! - */ - - ret = pthread_mutex_unlock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex lock"); - exit(-1); - } + debug_yield_write(); + switch_qparity(); + debug_yield_write(); + switch_qparity(); + debug_yield_write(); + internal_urcu_unlock(); + debug_yield_write(); + return oldptr; } @@ -164,7 +226,7 @@ void urcu_remove_reader(pthread_t id) assert(reader_data != NULL); for (index = reader_data; index < reader_data + num_readers; index++) { - if (index->tid == id) { + if (pthread_equal(index->tid, id)) { memcpy(index, &reader_data[num_readers - 1], sizeof(struct reader_data)); reader_data[num_readers - 1].tid = 0; @@ -179,44 +241,16 @@ void urcu_remove_reader(pthread_t id) void urcu_register_thread(void) { - pthread_t self = pthread_self(); - int ret; - - ret = pthread_mutex_lock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex lock"); - exit(-1); - } - - urcu_add_reader(self); - - - ret = pthread_mutex_unlock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex unlock"); - exit(-1); - } + internal_urcu_lock(); + urcu_add_reader(pthread_self()); + internal_urcu_unlock(); } void urcu_unregister_thread(void) { - pthread_t self = pthread_self(); - int ret; - - ret = pthread_mutex_lock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex lock"); - exit(-1); - } - - urcu_remove_reader(self); - - ret = pthread_mutex_unlock(&urcu_mutex); - if (ret) { - perror("Error in pthread mutex unlock"); - exit(-1); - } - + internal_urcu_lock(); + urcu_remove_reader(pthread_self()); + internal_urcu_unlock(); } void sigurcu_handler(int signo, siginfo_t *siginfo, void *context)