X-Git-Url: http://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-defer.c;h=412eb30477908b2a60acff9dde794d9ecfc793f8;hp=1e5d2b6d6dba842c3aaa13eae8aed8d92dd17ebc;hb=b55487c706982c8190e42545f7f74dfadad59548;hpb=786ee85b1676d2a1865b2db7120c45bafdf954d6 diff --git a/urcu-defer.c b/urcu-defer.c index 1e5d2b6..412eb30 100644 --- a/urcu-defer.c +++ b/urcu-defer.c @@ -101,18 +101,31 @@ static void internal_urcu_unlock(pthread_mutex_t *mutex) * Must be called after Q.S. is reached. */ static void rcu_defer_barrier_queue(struct defer_queue *queue, - unsigned long head) + unsigned long head) { unsigned long i; + void (*fct)(void *p); + void *p; /* * Tail is only modified when lock is held. * Head is only modified by owner thread. */ - for (i = queue->tail; i != head; i++) { + for (i = queue->tail; i != head;) { smp_rmb(); /* read head before q[]. */ - free(LOAD_SHARED(queue->q[i & DEFER_QUEUE_MASK])); + p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + if (unlikely(DQ_IS_FCT_BIT(p))) { + DQ_CLEAR_FCT_BIT(p); + queue->last_fct_out = p; + p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + } else if (unlikely(p == DQ_FCT_MARK)) { + p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + queue->last_fct_out = p; + p = LOAD_SHARED(queue->q[i++ & DEFER_QUEUE_MASK]); + } + fct = queue->last_fct_out; + fct(p); } smp_mb(); /* push tail after having used q[] */ STORE_SHARED(queue->tail, i); @@ -120,9 +133,12 @@ static void rcu_defer_barrier_queue(struct defer_queue *queue, static void _rcu_defer_barrier_thread(void) { - unsigned long head; + unsigned long head, num_items; head = defer_queue.head; + num_items = head - defer_queue.tail; + if (unlikely(!num_items)) + return; synchronize_rcu(); rcu_defer_barrier_queue(&defer_queue, head); } @@ -135,20 +151,44 @@ void rcu_defer_barrier_thread(void) internal_urcu_unlock(&urcu_defer_mutex); } +/* + * rcu_defer_barrier - Execute all queued rcu callbacks. + * + * Execute all RCU callbacks queued before rcu_defer_barrier() execution. + * All callbacks queued on the local thread prior to a rcu_defer_barrier() call + * are guaranteed to be executed. + * Callbacks queued by other threads concurrently with rcu_defer_barrier() + * execution are not guaranteed to be executed in the current batch (could + * be left for the next batch). These callbacks queued by other threads are only + * guaranteed to be executed if there is explicit synchronization between + * the thread adding to the queue and the thread issuing the defer_barrier call. + */ + void rcu_defer_barrier(void) { struct deferer_registry *index; + unsigned long num_items = 0; if (!registry) return; internal_urcu_lock(&urcu_defer_mutex); - for (index = registry; index < registry + num_deferers; index++) + for (index = registry; index < registry + num_deferers; index++) { index->last_head = LOAD_SHARED(index->defer_queue->head); + num_items += index->last_head - index->defer_queue->tail; + } + if (likely(!num_items)) { + /* + * We skip the grace period because there are no queued + * callbacks to execute. + */ + goto end; + } synchronize_rcu(); for (index = registry; index < registry + num_deferers; index++) rcu_defer_barrier_queue(index->defer_queue, index->last_head); +end: internal_urcu_unlock(&urcu_defer_mutex); } @@ -168,9 +208,9 @@ void *thr_defer(void *args) * library wrappers to be used by non-LGPL compatible source code. */ -void rcu_defer_queue(void *p) +void rcu_defer_queue(void (*fct)(void *p), void *p) { - _rcu_defer_queue(p); + _rcu_defer_queue(fct, p); } static void rcu_add_deferer(pthread_t id) @@ -195,6 +235,7 @@ static void rcu_add_deferer(pthread_t id) registry[num_deferers].tid = id; /* reference to the TLS of _this_ deferer thread. */ registry[num_deferers].defer_queue = &defer_queue; + registry[num_deferers].last_head = 0; num_deferers++; } @@ -213,6 +254,7 @@ static void rcu_remove_deferer(pthread_t id) sizeof(struct deferer_registry)); registry[num_deferers - 1].tid = 0; registry[num_deferers - 1].defer_queue = NULL; + registry[num_deferers - 1].last_head = 0; num_deferers--; return; }