X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=77f9e8bb4f560591f025d5b2b30b07fa4e1a777d;hb=f5ea58003633d154b84c5df14a7e1fa7fcccb15c;hp=1dafe8a4162a6a24a796df28274e0f1fa7120500;hpb=5fb66f07aae4884426f1706d0281bd242a38c2a7;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 1dafe8a4..77f9e8bb 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -67,7 +67,8 @@ struct switch_offsets { unsigned long begin, end, old; size_t pre_header_padding, size; - unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1; + unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1, + switch_old_end:1; }; #ifdef CONFIG_NO_HZ @@ -1348,6 +1349,48 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, config->cb.subbuffer_header_size()); } +/* + * lib_ring_buffer_switch_new_end: finish switching current subbuffer + * + * The only remaining threads could be the ones with pending commits. They will + * have to do the deliver themselves. + */ +static +void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, + struct channel *chan, + struct switch_offsets *offsets, + u64 tsc) +{ + const struct lib_ring_buffer_config *config = &chan->backend.config; + unsigned long endidx = subbuf_index(offsets->end - 1, chan); + unsigned long commit_count, padding_size, data_size; + + data_size = subbuf_offset(offsets->end - 1, chan) + 1; + padding_size = chan->backend.subbuf_size - data_size; + subbuffer_set_data_size(config, &buf->backend, endidx, data_size); + + /* + * Order all writes to buffer before the commit count update that will + * determine that the subbuffer is full. + */ + if (config->ipi == RING_BUFFER_IPI_BARRIER) { + /* + * Must write slot data before incrementing commit count. This + * compiler barrier is upgraded into a smp_mb() by the IPI sent + * by get_subbuf(). + */ + barrier(); + } else + smp_wmb(); + v_add(config, padding_size, &buf->commit_hot[endidx].cc); + commit_count = v_read(config, &buf->commit_hot[endidx].cc); + lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, + commit_count, endidx); + lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, + offsets->end, commit_count, + padding_size); +} + /* * Returns : * 0 if ok @@ -1480,6 +1523,48 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m } EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow); +static void remote_switch(void *info) +{ + struct lib_ring_buffer *buf = info; + + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); +} + +void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf) +{ + struct channel *chan = buf->backend.chan; + const struct lib_ring_buffer_config *config = &chan->backend.config; + int ret; + + /* + * With global synchronization we don't need to use the IPI scheme. + */ + if (config->sync == RING_BUFFER_SYNC_GLOBAL) { + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + return; + } + + /* + * Taking lock on CPU hotplug to ensure two things: first, that the + * target cpu is not taken concurrently offline while we are within + * smp_call_function_single() (I don't trust that get_cpu() on the + * _local_ CPU actually inhibit CPU hotplug for the _remote_ CPU (to be + * confirmed)). Secondly, if it happens that the CPU is not online, our + * own call to lib_ring_buffer_switch_slow() needs to be protected from + * CPU hotplug handlers, which can also perform a remote subbuffer + * switch. + */ + get_online_cpus(); + ret = smp_call_function_single(buf->backend.cpu, + remote_switch, buf, 1); + if (ret) { + /* Remote CPU is offline, do it ourself. */ + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + } + put_online_cpus(); +} +EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote); + /* * Returns : * 0 if ok @@ -1499,6 +1584,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, offsets->begin = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; + offsets->switch_new_end = 0; offsets->switch_old_end = 0; offsets->pre_header_padding = 0; @@ -1605,6 +1691,14 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, */ } offsets->end = offsets->begin + offsets->size; + + if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { + /* + * The offset_end will fall at the very beginning of the next + * subbuffer. + */ + offsets->switch_new_end = 1; /* For offsets->begin */ + } return 0; } @@ -1675,6 +1769,9 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) if (unlikely(offsets.switch_new_start)) lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc); + if (unlikely(offsets.switch_new_end)) + lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc); + ctx->slot_size = offsets.size; ctx->pre_offset = offsets.begin; ctx->buf_offset = offsets.begin + offsets.pre_header_padding;