X-Git-Url: https://git.liburcu.org/?p=urcu.git;a=blobdiff_plain;f=urcu-call-rcu-impl.h;h=5cc02d97704514df5e45a7dd459744d7f79eb597;hp=c2e175b7524982ab341f73865248404e8e303542;hb=109105b7f91d58e29a2b0cfb18c325d57cf80199;hpb=fc236e5edb83c2273e251be4cd659f47491cc90c diff --git a/urcu-call-rcu-impl.h b/urcu-call-rcu-impl.h index c2e175b..5cc02d9 100644 --- a/urcu-call-rcu-impl.h +++ b/urcu-call-rcu-impl.h @@ -42,6 +42,7 @@ #include "urcu/list.h" #include "urcu/futex.h" #include "urcu/tls-compat.h" +#include "urcu/ref.h" #include "urcu-die.h" /* Data structure that identifies a call_rcu thread. */ @@ -67,6 +68,7 @@ struct call_rcu_data { struct call_rcu_completion { int barrier_count; int32_t futex; + struct urcu_ref ref; }; struct call_rcu_completion_work { @@ -307,7 +309,7 @@ static void *call_rcu_thread(void *arg) cmm_smp_mb__before_uatomic_or(); uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED); while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0) - poll(NULL, 0, 1); + (void) poll(NULL, 0, 1); uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED); cmm_smp_mb__after_uatomic_and(); rcu_register_thread(); @@ -339,7 +341,7 @@ static void *call_rcu_thread(void *arg) if (cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { call_rcu_wait(crdp); - poll(NULL, 0, 10); + (void) poll(NULL, 0, 10); uatomic_dec(&crdp->futex); /* * Decrement futex before reading @@ -347,10 +349,10 @@ static void *call_rcu_thread(void *arg) */ cmm_smp_mb(); } else { - poll(NULL, 0, 10); + (void) poll(NULL, 0, 10); } } else { - poll(NULL, 0, 10); + (void) poll(NULL, 0, 10); } rcu_thread_online(); } @@ -667,10 +669,10 @@ void call_rcu(struct rcu_head *head, struct call_rcu_data *crdp; /* Holding rcu read-side lock across use of per-cpu crdp */ - rcu_read_lock(); + _rcu_read_lock(); crdp = get_call_rcu_data(); _call_rcu(head, func, crdp); - rcu_read_unlock(); + _rcu_read_unlock(); } /* @@ -708,7 +710,7 @@ void call_rcu_data_free(struct call_rcu_data *crdp) uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP); wake_call_rcu_thread(crdp); while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) - poll(NULL, 0, 1); + (void) poll(NULL, 0, 1); } if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) { /* Create default call rcu data if need be */ @@ -768,6 +770,15 @@ void free_all_cpu_call_rcu_data(void) free(crdp); } +static +void free_completion(struct urcu_ref *ref) +{ + struct call_rcu_completion *completion; + + completion = caa_container_of(ref, struct call_rcu_completion, ref); + free(completion); +} + static void _rcu_barrier_complete(struct rcu_head *head) { @@ -776,8 +787,9 @@ void _rcu_barrier_complete(struct rcu_head *head) work = caa_container_of(head, struct call_rcu_completion_work, head); completion = work->completion; - uatomic_dec(&completion->barrier_count); - call_rcu_completion_wake_up(completion); + if (!uatomic_sub_return(&completion->barrier_count, 1)) + call_rcu_completion_wake_up(completion); + urcu_ref_put(&completion->ref, free_completion); free(work); } @@ -787,19 +799,19 @@ void _rcu_barrier_complete(struct rcu_head *head) void rcu_barrier(void) { struct call_rcu_data *crdp; - struct call_rcu_completion completion; + struct call_rcu_completion *completion; int count = 0; int was_online; /* Put in offline state in QSBR. */ - was_online = rcu_read_ongoing(); + was_online = _rcu_read_ongoing(); if (was_online) rcu_thread_offline(); /* * Calling a rcu_barrier() within a RCU read-side critical * section is an error. */ - if (rcu_read_ongoing()) { + if (_rcu_read_ongoing()) { static int warned = 0; if (!warned) { @@ -809,11 +821,17 @@ void rcu_barrier(void) goto online; } + completion = calloc(sizeof(*completion), 1); + if (!completion) + urcu_die(errno); + call_rcu_lock(&call_rcu_mutex); cds_list_for_each_entry(crdp, &call_rcu_data_list, list) count++; - completion.barrier_count = count; + /* Referenced by rcu_barrier() and each call_rcu thread. */ + urcu_ref_set(&completion->ref, count + 1); + completion->barrier_count = count; cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { struct call_rcu_completion_work *work; @@ -821,20 +839,23 @@ void rcu_barrier(void) work = calloc(sizeof(*work), 1); if (!work) urcu_die(errno); - work->completion = &completion; + work->completion = completion; _call_rcu(&work->head, _rcu_barrier_complete, crdp); } call_rcu_unlock(&call_rcu_mutex); /* Wait for them */ for (;;) { - uatomic_dec(&completion.futex); + uatomic_dec(&completion->futex); /* Decrement futex before reading barrier_count */ cmm_smp_mb(); - if (!uatomic_read(&completion.barrier_count)) + if (!uatomic_read(&completion->barrier_count)) break; - call_rcu_completion_wait(&completion); + call_rcu_completion_wait(completion); } + + urcu_ref_put(&completion->ref, free_completion); + online: if (was_online) rcu_thread_online(); @@ -859,7 +880,7 @@ void call_rcu_before_fork(void) } cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0) - poll(NULL, 0, 1); + (void) poll(NULL, 0, 1); } } @@ -876,7 +897,7 @@ void call_rcu_after_fork_parent(void) uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE); cds_list_for_each_entry(crdp, &call_rcu_data_list, list) { while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0) - poll(NULL, 0, 1); + (void) poll(NULL, 0, 1); } call_rcu_unlock(&call_rcu_mutex); }