X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=7993633c5bd4a38789d47af280e63defae824a4c;hb=4a6ea683a440dc224dc4a9628ff4f713c42f5922;hp=52afa8b0b3a7d76a59e4324b56671e50972a2bb8;hpb=9f36eaed6f91d5897924b551b44d1edd8cee00e2;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 52afa8b0..7993633c 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include #include @@ -136,6 +137,7 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf) lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu); lttng_kvfree(buf->commit_hot); lttng_kvfree(buf->commit_cold); + lttng_kvfree(buf->ts_end); lib_ring_buffer_backend_free(&buf->backend); } @@ -165,6 +167,7 @@ void lib_ring_buffer_reset(struct lib_ring_buffer *buf) v_set(config, &buf->commit_hot[i].cc, 0); v_set(config, &buf->commit_hot[i].seq, 0); v_set(config, &buf->commit_cold[i].cc_sb, 0); + buf->ts_end[i] = 0; } atomic_long_set(&buf->consumed, 0); atomic_set(&buf->record_disabled, 0); @@ -253,6 +256,17 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, goto free_commit; } + buf->ts_end = + lttng_kvzalloc_node(ALIGN(sizeof(*buf->ts_end) + * chan->backend.num_subbuf, + 1 << INTERNODE_CACHE_SHIFT), + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(cpu, 0))); + if (!buf->ts_end) { + ret = -ENOMEM; + goto free_commit_cold; + } + init_waitqueue_head(&buf->read_wait); init_waitqueue_head(&buf->write_wait); raw_spin_lock_init(&buf->raw_tick_nohz_spinlock); @@ -292,6 +306,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, /* Error handling */ free_init: + lttng_kvfree(buf->ts_end); +free_commit_cold: lttng_kvfree(buf->commit_cold); free_commit: lttng_kvfree(buf->commit_hot); @@ -392,7 +408,7 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned int flags; + unsigned int flags = 0; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER || !chan->read_timer_interval @@ -438,7 +454,7 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 0; } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) enum cpuhp_state lttng_rb_hp_prepare; enum cpuhp_state lttng_rb_hp_online; @@ -509,7 +525,7 @@ int lttng_cpuhp_rb_frontend_offline(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_frontend_offline); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU @@ -571,7 +587,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* @@ -677,7 +693,7 @@ static void channel_unregister_notifiers(struct channel *chan) * concurrency. */ #endif /* CONFIG_NO_HZ */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) { int ret; @@ -688,12 +704,12 @@ static void channel_unregister_notifiers(struct channel *chan) &chan->cpuhp_prepare.node); WARN_ON(ret); } -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { int cpu; #ifdef CONFIG_HOTPLUG_CPU - get_online_cpus(); + lttng_cpus_read_lock(); chan->cpu_hp_enable = 0; for_each_online_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, @@ -701,7 +717,7 @@ static void channel_unregister_notifiers(struct channel *chan) lib_ring_buffer_stop_switch_timer(buf); lib_ring_buffer_stop_read_timer(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); unregister_cpu_notifier(&chan->cpu_hp_notifier); #else for_each_possible_cpu(cpu) { @@ -712,7 +728,7 @@ static void channel_unregister_notifiers(struct channel *chan) } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -741,14 +757,14 @@ void lib_ring_buffer_set_quiescent_channel(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); for_each_channel_cpu(cpu, chan) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); lib_ring_buffer_set_quiescent(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -763,14 +779,14 @@ void lib_ring_buffer_clear_quiescent_channel(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - get_online_cpus(); + lttng_cpus_read_lock(); for_each_channel_cpu(cpu, chan) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); lib_ring_buffer_clear_quiescent(buf); } - put_online_cpus(); + lttng_cpus_read_unlock(); } else { struct lib_ring_buffer *buf = chan->backend.buf; @@ -841,7 +857,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, init_waitqueue_head(&chan->hp_wait); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) chan->cpuhp_prepare.component = LTTNG_RING_BUFFER_FRONTEND; ret = cpuhp_state_add_instance_nocalls(lttng_rb_hp_prepare, &chan->cpuhp_prepare.node); @@ -853,7 +869,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, &chan->cpuhp_online.node); if (ret) goto cpuhp_online_error; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { int cpu; /* @@ -867,7 +883,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, chan->cpu_hp_notifier.priority = 6; register_cpu_notifier(&chan->cpu_hp_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); @@ -877,7 +893,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu)); } chan->cpu_hp_enable = 1; - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(cpu) { struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, @@ -889,7 +905,7 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* Only benefit from NO_HZ idle with per-cpu buffers for now. */ @@ -909,13 +925,13 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, return chan; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) cpuhp_online_error: ret = cpuhp_state_remove_instance_nocalls(lttng_rb_hp_prepare, &chan->cpuhp_prepare.node); WARN_ON(ret); cpuhp_prepare_error: -#endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ error_free_backend: channel_backend_free(&chan->backend); error: @@ -963,8 +979,6 @@ void *channel_destroy(struct channel *chan) config->cb.buffer_finalize(buf, chan->backend.priv, cpu); - if (buf->backend.allocated) - lib_ring_buffer_set_quiescent(buf); /* * Perform flush before writing to finalized. */ @@ -977,8 +991,6 @@ void *channel_destroy(struct channel *chan) if (config->cb.buffer_finalize) config->cb.buffer_finalize(buf, chan->backend.priv, -1); - if (buf->backend.allocated) - lib_ring_buffer_set_quiescent(buf); /* * Perform flush before writing to finalized. */ @@ -1063,7 +1075,7 @@ int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, int finalized; retry: - finalized = READ_ONCE(buf->finalized); + finalized = LTTNG_READ_ONCE(buf->finalized); /* * Read finalized before counters. */ @@ -1234,7 +1246,7 @@ int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, return -EBUSY; } retry: - finalized = READ_ONCE(buf->finalized); + finalized = LTTNG_READ_ONCE(buf->finalized); /* * Read finalized before counters. */ @@ -1487,12 +1499,13 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, cpu); } +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static -void lib_ring_buffer_print_errors(struct channel *chan, - struct lib_ring_buffer *buf, int cpu) +void lib_ring_buffer_print_records_count(struct channel *chan, + struct lib_ring_buffer *buf, + int cpu) { const struct lib_ring_buffer_config *config = &chan->backend.config; - void *priv = chan->backend.priv; if (!strcmp(chan->backend.name, "relay-metadata")) { printk(KERN_DEBUG "ring buffer %s: %lu records written, " @@ -1506,7 +1519,26 @@ void lib_ring_buffer_print_errors(struct channel *chan, chan->backend.name, cpu, v_read(config, &buf->records_count), v_read(config, &buf->records_overrun)); + } +} +#else +static +void lib_ring_buffer_print_records_count(struct channel *chan, + struct lib_ring_buffer *buf, + int cpu) +{ +} +#endif + +static +void lib_ring_buffer_print_errors(struct channel *chan, + struct lib_ring_buffer *buf, int cpu) +{ + const struct lib_ring_buffer_config *config = &chan->backend.config; + void *priv = chan->backend.priv; + lib_ring_buffer_print_records_count(chan, buf, cpu); + if (strcmp(chan->backend.name, "relay-metadata")) { if (v_read(config, &buf->records_lost_full) || v_read(config, &buf->records_lost_wrap) || v_read(config, &buf->records_lost_big)) @@ -1582,14 +1614,26 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, unsigned long oldidx = subbuf_index(offsets->old - 1, chan); unsigned long commit_count, padding_size, data_size; struct commit_counters_hot *cc_hot; + u64 *ts_end; data_size = subbuf_offset(offsets->old - 1, chan) + 1; padding_size = chan->backend.subbuf_size - data_size; subbuffer_set_data_size(config, &buf->backend, oldidx, data_size); + ts_end = &buf->ts_end[oldidx]; /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. + * This is the last space reservation in that sub-buffer before + * it gets delivered. This provides exclusive access to write to + * this sub-buffer's ts_end. There are also no concurrent + * readers of that ts_end because delivery of that sub-buffer is + * postponed until the commit counter is incremented for the + * current space reservation. + */ + *ts_end = tsc; + + /* + * Order all writes to buffer and store to ts_end before the commit + * count update that will determine that the subbuffer is full. */ if (config->ipi == RING_BUFFER_IPI_BARRIER) { /* @@ -1670,10 +1714,21 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, { const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long endidx, data_size; + u64 *ts_end; endidx = subbuf_index(offsets->end - 1, chan); data_size = subbuf_offset(offsets->end - 1, chan) + 1; subbuffer_set_data_size(config, &buf->backend, endidx, data_size); + ts_end = &buf->ts_end[endidx]; + /* + * This is the last space reservation in that sub-buffer before + * it gets delivered. This provides exclusive access to write to + * this sub-buffer's ts_end. There are also no concurrent + * readers of that ts_end because delivery of that sub-buffer is + * postponed until the commit counter is incremented for the + * current space reservation. + */ + *ts_end = tsc; } /* @@ -1903,6 +1958,16 @@ void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf) } EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty); +void lib_ring_buffer_clear(struct lib_ring_buffer *buf) +{ + struct lib_ring_buffer_backend *bufb = &buf->backend; + struct channel *chan = bufb->chan; + + lib_ring_buffer_switch_remote(buf); + lib_ring_buffer_clear_reader(buf, chan); +} +EXPORT_SYMBOL_GPL(lib_ring_buffer_clear); + /* * Returns : * 0 if ok @@ -2237,14 +2302,24 @@ void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *con if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb, old_commit_count, old_commit_count + 1) == old_commit_count)) { + u64 *ts_end; + /* * Start of exclusive subbuffer access. We are * guaranteed to be the last writer in this subbuffer * and any other writer trying to access this subbuffer * in this state is required to drop records. + * + * We can read the ts_end for the current sub-buffer + * which has been saved by the very last space + * reservation for the current sub-buffer. + * + * Order increment of commit counter before reading ts_end. */ + smp_mb(); + ts_end = &buf->ts_end[idx]; deliver_count_events(config, buf, idx); - config->cb.buffer_end(buf, tsc, idx, + config->cb.buffer_end(buf, *ts_end, idx, lib_ring_buffer_get_data_size(config, buf, idx));