X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=22cc62fcc02e7de82122801378a6bc6c3d89d7d1;hb=0fdec6861bdb4095e7537667f10131577eb7b279;hp=77f9e8bb4f560591f025d5b2b30b07fa4e1a777d;hpb=f5ea58003633d154b84c5df14a7e1fa7fcccb15c;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 77f9e8bb..22cc62fc 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -1352,8 +1352,10 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, /* * lib_ring_buffer_switch_new_end: finish switching current subbuffer * - * The only remaining threads could be the ones with pending commits. They will - * have to do the deliver themselves. + * Calls subbuffer_set_data_size() to set the data size of the current + * sub-buffer. We do not need to perform check_deliver nor commit here, + * since this task will be done by the "commit" of the event for which + * we are currently doing the space reservation. */ static void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, @@ -1362,33 +1364,11 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, u64 tsc) { const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long endidx = subbuf_index(offsets->end - 1, chan); - unsigned long commit_count, padding_size, data_size; + unsigned long endidx, data_size; + endidx = subbuf_index(offsets->end - 1, chan); data_size = subbuf_offset(offsets->end - 1, chan) + 1; - padding_size = chan->backend.subbuf_size - data_size; subbuffer_set_data_size(config, &buf->backend, endidx, data_size); - - /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. - */ - if (config->ipi == RING_BUFFER_IPI_BARRIER) { - /* - * Must write slot data before incrementing commit count. This - * compiler barrier is upgraded into a smp_mb() by the IPI sent - * by get_subbuf(). - */ - barrier(); - } else - smp_wmb(); - v_add(config, padding_size, &buf->commit_hot[endidx].cc); - commit_count = v_read(config, &buf->commit_hot[endidx].cc); - lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, - commit_count, endidx); - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - offsets->end, commit_count, - padding_size); } /* @@ -1579,9 +1559,10 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, struct lib_ring_buffer_ctx *ctx) { const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long reserve_commit_diff; + unsigned long reserve_commit_diff, offset_cmp; - offsets->begin = v_read(config, &buf->offset); +retry: + offsets->begin = offset_cmp = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; offsets->switch_new_end = 0; @@ -1613,7 +1594,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, } } if (unlikely(offsets->switch_new_start)) { - unsigned long sb_index; + unsigned long sb_index, commit_count; /* * We are typically not filling the previous buffer completely. @@ -1624,12 +1605,31 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, + config->cb.subbuffer_header_size(); /* Test new buffer integrity */ sb_index = subbuf_index(offsets->begin, chan); + /* + * Read buf->offset before buf->commit_cold[sb_index].cc_sb. + * lib_ring_buffer_check_deliver() has the matching + * memory barriers required around commit_cold cc_sb + * updates to ensure reserve and commit counter updates + * are not seen reordered when updated by another CPU. + */ + smp_rmb(); + commit_count = v_read(config, + &buf->commit_cold[sb_index].cc_sb); + /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */ + smp_rmb(); + if (unlikely(offset_cmp != v_read(config, &buf->offset))) { + /* + * The reserve counter have been concurrently updated + * while we read the commit counter. This means the + * commit counter we read might not match buf->offset + * due to concurrent update. We therefore need to retry. + */ + goto retry; + } reserve_commit_diff = (buf_trunc(offsets->begin, chan) >> chan->backend.num_subbuf_order) - - ((unsigned long) v_read(config, - &buf->commit_cold[sb_index].cc_sb) - & chan->commit_count_mask); + - (commit_count & chan->commit_count_mask); if (likely(reserve_commit_diff == 0)) { /* Next subbuffer not being written to. */ if (unlikely(config->mode != RING_BUFFER_OVERWRITE && @@ -1654,9 +1654,10 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, } else { /* * Next subbuffer reserve offset does not match the - * commit offset. Drop record in producer-consumer and - * overwrite mode. Caused by either a writer OOPS or too - * many nested writes over a reserve/commit pair. + * commit offset, and this did not involve update to the + * reserve counter. Drop record in producer-consumer and + * overwrite mode. Caused by either a writer OOPS or + * too many nested writes over a reserve/commit pair. */ v_inc(config, &buf->records_lost_wrap); return -EIO;