From: Mathieu Desnoyers Date: Sun, 30 Jun 2013 19:27:21 +0000 (-0400) Subject: Cleanup: ring buffer: remove lib_ring_buffer_switch_new_end() X-Git-Tag: v2.3.0-rc1~19 X-Git-Url: http://git.liburcu.org/?p=lttng-modules.git;a=commitdiff_plain;h=5fb66f07aae4884426f1706d0281bd242a38c2a7 Cleanup: ring buffer: remove lib_ring_buffer_switch_new_end() lib_ring_buffer_switch_new_end() is a leftover from the days where an event that would exactly fill the current sub-buffer would automatically trigger a sub-buffer switch into the next sub-buffer. Even before the ring buffer code has been moved into lttng-modules, this behavior had been changed: an event that exactly fills a sub-buffer only fills this current sub-buffer, and does not need to switch into the next one to populate the sub-buffer header. This change had been done so periodical timer switch, which shares the same semantic as an event exactly filling a sub-buffer, would not create tons of empty sub-buffers. However, when doing this change, lib_ring_buffer_switch_new_end() has not been removed, but clearly should have been. Its job is now performed by the event "commit". lib_ring_buffer_switch_new_end() has no effect, since padding_size is always 0. Signed-off-by: Mathieu Desnoyers --- diff --git a/lib/ringbuffer/frontend_api.h b/lib/ringbuffer/frontend_api.h index c36ec3dc..13975602 100644 --- a/lib/ringbuffer/frontend_api.h +++ b/lib/ringbuffer/frontend_api.h @@ -125,14 +125,6 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, * boundary. It's safe to write. */ *o_end = *o_begin + ctx->slot_size; - - if (unlikely((subbuf_offset(*o_end, chan)) == 0)) - /* - * The offset_end will fall at the very beginning of the next - * subbuffer. - */ - return 1; - return 0; } diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 84341adc..1dafe8a4 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -67,8 +67,7 @@ struct switch_offsets { unsigned long begin, end, old; size_t pre_header_padding, size; - unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1, - switch_old_end:1; + unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1; }; #ifdef CONFIG_NO_HZ @@ -1349,48 +1348,6 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, config->cb.subbuffer_header_size()); } -/* - * lib_ring_buffer_switch_new_end: finish switching current subbuffer - * - * The only remaining threads could be the ones with pending commits. They will - * have to do the deliver themselves. - */ -static -void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, - struct channel *chan, - struct switch_offsets *offsets, - u64 tsc) -{ - const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long endidx = subbuf_index(offsets->end - 1, chan); - unsigned long commit_count, padding_size, data_size; - - data_size = subbuf_offset(offsets->end - 1, chan) + 1; - padding_size = chan->backend.subbuf_size - data_size; - subbuffer_set_data_size(config, &buf->backend, endidx, data_size); - - /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. - */ - if (config->ipi == RING_BUFFER_IPI_BARRIER) { - /* - * Must write slot data before incrementing commit count. This - * compiler barrier is upgraded into a smp_mb() by the IPI sent - * by get_subbuf(). - */ - barrier(); - } else - smp_wmb(); - v_add(config, padding_size, &buf->commit_hot[endidx].cc); - commit_count = v_read(config, &buf->commit_hot[endidx].cc); - lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, - commit_count, endidx); - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - offsets->end, commit_count, - padding_size); -} - /* * Returns : * 0 if ok @@ -1542,7 +1499,6 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, offsets->begin = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; - offsets->switch_new_end = 0; offsets->switch_old_end = 0; offsets->pre_header_padding = 0; @@ -1649,14 +1605,6 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, */ } offsets->end = offsets->begin + offsets->size; - - if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { - /* - * The offset_end will fall at the very beginning of the next - * subbuffer. - */ - offsets->switch_new_end = 1; /* For offsets->begin */ - } return 0; } @@ -1727,9 +1675,6 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) if (unlikely(offsets.switch_new_start)) lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc); - if (unlikely(offsets.switch_new_end)) - lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc); - ctx->slot_size = offsets.size; ctx->pre_offset = offsets.begin; ctx->buf_offset = offsets.begin + offsets.pre_header_padding;