X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_frontend.c;h=bff920d16a0f6a7a40d3eb2325bb9612a5639273;hb=d8092cc6c4277293c508a6f9059cc52a1f186bde;hp=9ecabb0b20ee02ff2b6b57fda3ef4a53ef3a7aef;hpb=e4d878f6be12c2cba10aab1e41b24e8b603b200e;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_frontend.c b/lib/ringbuffer/ring_buffer_frontend.c index 9ecabb0b..bff920d1 100644 --- a/lib/ringbuffer/ring_buffer_frontend.c +++ b/lib/ringbuffer/ring_buffer_frontend.c @@ -1,7 +1,22 @@ /* * ring_buffer_frontend.c * - * (C) Copyright 2005-2010 - Mathieu Desnoyers + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * * * Ring buffer wait-free buffer synchronization. Producer-consumer and flight * recorder (overwrite) modes. See thesis: @@ -34,8 +49,6 @@ * - splice one subbuffer worth of data to a pipe * - splice the data from pipe to disk/network * - put_subbuf - * - * Dual LGPL v2.1/GPL v2 license. */ #include @@ -103,7 +116,7 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf) void lib_ring_buffer_reset(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned int i; /* @@ -161,10 +174,9 @@ EXPORT_SYMBOL_GPL(channel_reset); int lib_ring_buffer_create(struct lib_ring_buffer *buf, struct channel_backend *chanb, int cpu) { - const struct lib_ring_buffer_config *config = chanb->config; + const struct lib_ring_buffer_config *config = &chanb->config; struct channel *chan = container_of(chanb, struct channel, backend); void *priv = chanb->priv; - unsigned int num_subbuf; size_t subbuf_header_size; u64 tsc; int ret; @@ -203,8 +215,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, goto free_commit; } - num_subbuf = chan->backend.num_subbuf; init_waitqueue_head(&buf->read_wait); + init_waitqueue_head(&buf->write_wait); raw_spin_lock_init(&buf->raw_tick_nohz_spinlock); /* @@ -254,7 +266,7 @@ static void switch_buffer_timer(unsigned long data) { struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; /* * Only flush buffers periodically if readers are active. @@ -276,7 +288,7 @@ static void switch_buffer_timer(unsigned long data) static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (!chan->switch_timer_interval || buf->switch_timer_enabled) return; @@ -312,7 +324,7 @@ static void read_buffer_timer(unsigned long data) { struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; CHAN_WARN_ON(chan, !buf->backend.allocated); @@ -336,7 +348,7 @@ static void read_buffer_timer(unsigned long data) static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER || !chan->read_timer_interval @@ -361,7 +373,7 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER || !chan->read_timer_interval @@ -390,7 +402,7 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) */ static -int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, +int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -398,7 +410,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, struct channel *chan = container_of(nb, struct channel, cpu_hp_notifier); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (!chan->cpu_hp_enable) return NOTIFY_DONE; @@ -410,6 +422,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, case CPU_DOWN_FAILED_FROZEN: case CPU_ONLINE: case CPU_ONLINE_FROZEN: + wake_up_interruptible(&chan->hp_wait); lib_ring_buffer_start_switch_timer(buf); lib_ring_buffer_start_read_timer(buf); return NOTIFY_OK; @@ -437,7 +450,7 @@ int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, } #endif -#if defined(CONFIG_NO_HZ) && !defined(CONFIG_LIB_RING_BUFFER) +#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* * For per-cpu buffers, call the reader wakeups before switching the buffer, so * that wake-up-tracing generated events are flushed before going idle (in @@ -452,7 +465,7 @@ static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb, { struct channel *chan = container_of(nb, struct channel, tick_nohz_notifier); - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; int cpu = smp_processor_id(); @@ -517,14 +530,14 @@ void notrace lib_ring_buffer_tick_nohz_restart(void) atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART, NULL); } -#endif /* defined(CONFIG_NO_HZ) && !defined(CONFIG_LIB_RING_BUFFER) */ +#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */ /* * Holds CPU hotplug. */ static void channel_unregister_notifiers(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; int cpu; channel_iterator_unregister_notifiers(chan); @@ -624,17 +637,19 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order); chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval); chan->read_timer_interval = usecs_to_jiffies(read_timer_interval); + kref_init(&chan->ref); init_waitqueue_head(&chan->read_wait); + init_waitqueue_head(&chan->hp_wait); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#ifdef CONFIG_NO_HZ +#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) /* Only benefit from NO_HZ idle with per-cpu buffers for now. */ chan->tick_nohz_notifier.notifier_call = ring_buffer_tick_nohz_callback; chan->tick_nohz_notifier.priority = ~0U; atomic_notifier_chain_register(&tick_nohz_notifier, &chan->tick_nohz_notifier); -#endif /* CONFIG_NO_HZ */ +#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */ /* * In case of non-hotplug cpu, if the ring-buffer is allocated @@ -685,21 +700,28 @@ error: } EXPORT_SYMBOL_GPL(channel_create); +static +void channel_release(struct kref *kref) +{ + struct channel *chan = container_of(kref, struct channel, ref); + channel_free(chan); +} + /** * channel_destroy - Finalize, wait for q.s. and destroy channel. * @chan: channel to destroy * * Holds cpu hotplug. - * Call "destroy" callback, finalize channels, wait for readers to release their - * reference, then destroy ring buffer data. Note that when readers have - * completed data consumption of finalized channels, get_subbuf() will return - * -ENODATA. They should release their handle at that point. - * Returns the private data pointer. + * Call "destroy" callback, finalize channels, and then decrement the + * channel reference count. Note that when readers have completed data + * consumption of finalized channels, get_subbuf() will return -ENODATA. + * They should release their handle at that point. Returns the private + * data pointer. */ void *channel_destroy(struct channel *chan) { int cpu; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; void *priv; channel_unregister_notifiers(chan); @@ -740,14 +762,11 @@ void *channel_destroy(struct channel *chan) ACCESS_ONCE(buf->finalized) = 1; wake_up_interruptible(&buf->read_wait); } + ACCESS_ONCE(chan->finalized) = 1; + wake_up_interruptible(&chan->hp_wait); wake_up_interruptible(&chan->read_wait); - - while (atomic_long_read(&chan->read_ref) > 0) - msleep(100); - /* Finish waiting for refcount before free */ - smp_mb(); priv = chan->backend.priv; - channel_free(chan); + kref_put(&chan->ref, channel_release); return priv; } EXPORT_SYMBOL_GPL(channel_destroy); @@ -769,7 +788,7 @@ int lib_ring_buffer_open_read(struct lib_ring_buffer *buf) if (!atomic_long_add_unless(&buf->active_readers, 1, 1)) return -EBUSY; - atomic_long_inc(&chan->read_ref); + kref_get(&chan->ref); smp_mb__after_atomic_inc(); return 0; } @@ -781,8 +800,8 @@ void lib_ring_buffer_release_read(struct lib_ring_buffer *buf) CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1); smp_mb__before_atomic_dec(); - atomic_long_dec(&chan->read_ref); atomic_long_dec(&buf->active_readers); + kref_put(&chan->ref, channel_release); } EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read); @@ -812,7 +831,7 @@ int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, unsigned long *consumed, unsigned long *produced) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, write_offset; int finalized; @@ -862,6 +881,8 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot); /** * lib_ring_buffer_put_snapshot - move consumed counter forward + * + * Should only be called from consumer context. * @buf: ring buffer * @consumed_new: new consumed count value */ @@ -883,6 +904,8 @@ void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, while ((long) consumed - (long) consumed_new < 0) consumed = atomic_long_cmpxchg(&buf->consumed, consumed, consumed_new); + /* Wake-up the metadata producer */ + wake_up_interruptible(&buf->write_wait); } EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer); @@ -899,7 +922,7 @@ int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, unsigned long consumed) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, consumed_idx, commit_count, write_offset; int ret; int finalized; @@ -1045,7 +1068,7 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) { struct lib_ring_buffer_backend *bufb = &buf->backend; struct channel *chan = bufb->chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long read_sb_bindex, consumed_idx, consumed; CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1); @@ -1104,7 +1127,7 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, unsigned long cons_offset, int cpu) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long cons_idx, commit_count, commit_count_sb; cons_idx = subbuf_index(cons_offset, chan); @@ -1130,15 +1153,9 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, struct channel *chan, void *priv, int cpu) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long write_offset, cons_offset; - /* - * Can be called in the error path of allocation when - * trans_channel_data is not yet set. - */ - if (!chan) - return; /* * No need to order commit_count, write_offset and cons_offset reads * because we execute at teardown when no more writer nor reader @@ -1147,7 +1164,7 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, write_offset = v_read(config, &buf->offset); cons_offset = atomic_long_read(&buf->consumed); if (write_offset != cons_offset) - printk(KERN_WARNING + printk(KERN_DEBUG "ring buffer %s, cpu %d: " "non-consumed data\n" " [ %lu bytes written, %lu bytes read ]\n", @@ -1166,27 +1183,34 @@ static void lib_ring_buffer_print_errors(struct channel *chan, struct lib_ring_buffer *buf, int cpu) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; void *priv = chan->backend.priv; - printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, " - "%lu records overrun\n", - chan->backend.name, cpu, - v_read(config, &buf->records_count), - v_read(config, &buf->records_overrun)); - - if (v_read(config, &buf->records_lost_full) - || v_read(config, &buf->records_lost_wrap) - || v_read(config, &buf->records_lost_big)) - printk(KERN_WARNING - "ring buffer %s, cpu %d: records were lost. Caused by:\n" - " [ %lu buffer full, %lu nest buffer wrap-around, " - "%lu event too big ]\n", - chan->backend.name, cpu, - v_read(config, &buf->records_lost_full), - v_read(config, &buf->records_lost_wrap), - v_read(config, &buf->records_lost_big)); - + if (!strcmp(chan->backend.name, "relay-metadata")) { + printk(KERN_DEBUG "ring buffer %s: %lu records written, " + "%lu records overrun\n", + chan->backend.name, + v_read(config, &buf->records_count), + v_read(config, &buf->records_overrun)); + } else { + printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, " + "%lu records overrun\n", + chan->backend.name, cpu, + v_read(config, &buf->records_count), + v_read(config, &buf->records_overrun)); + + if (v_read(config, &buf->records_lost_full) + || v_read(config, &buf->records_lost_wrap) + || v_read(config, &buf->records_lost_big)) + printk(KERN_WARNING + "ring buffer %s, cpu %d: records were lost. Caused by:\n" + " [ %lu buffer full, %lu nest buffer wrap-around, " + "%lu event too big ]\n", + chan->backend.name, cpu, + v_read(config, &buf->records_lost_full), + v_read(config, &buf->records_lost_wrap), + v_read(config, &buf->records_lost_big)); + } lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu); } @@ -1201,7 +1225,7 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, struct switch_offsets *offsets, u64 tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old, chan); unsigned long commit_count; @@ -1245,7 +1269,7 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, struct switch_offsets *offsets, u64 tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old - 1, chan); unsigned long commit_count, padding_size, data_size; @@ -1288,7 +1312,7 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, struct switch_offsets *offsets, u64 tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long beginidx = subbuf_index(offsets->begin, chan); unsigned long commit_count; @@ -1330,7 +1354,7 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, struct switch_offsets *offsets, u64 tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long endidx = subbuf_index(offsets->end - 1, chan); unsigned long commit_count, padding_size, data_size; @@ -1372,7 +1396,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, struct switch_offsets *offsets, u64 *tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long off; offsets->begin = v_read(config, &buf->offset); @@ -1400,6 +1424,19 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, */ if (mode == SWITCH_FLUSH || off > 0) { if (unlikely(off == 0)) { + /* + * A final flush that encounters an empty + * sub-buffer cannot switch buffer if a + * reader is located within this sub-buffer. + * Anyway, the purpose of final flushing of a + * sub-buffer at offset 0 is to handle the case + * of entirely empty stream. + */ + if (unlikely(subbuf_trunc(offsets->begin, chan) + - subbuf_trunc((unsigned long) + atomic_long_read(&buf->consumed), chan) + >= chan->backend.buf_size)) + return -1; /* * The client does not save any header information. * Don't switch empty subbuffer on finalize, because it @@ -1431,7 +1468,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct switch_offsets offsets; unsigned long oldidx; u64 tsc; @@ -1482,7 +1519,9 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow); /* * Returns : * 0 if ok - * !0 if execution must be aborted. + * -ENOSPC if event size is too large for packet. + * -ENOBUFS if there is currently not enough space in buffer for the event. + * -EIO if data cannot be written into the buffer for any other reason. */ static int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, @@ -1490,7 +1529,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, struct switch_offsets *offsets, struct lib_ring_buffer_ctx *ctx) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; unsigned long reserve_commit_diff; offsets->begin = v_read(config, &buf->offset); @@ -1501,18 +1540,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, offsets->pre_header_padding = 0; ctx->tsc = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx->tsc == -EIO) + return -EIO; if (last_tsc_overflow(config, buf, ctx->tsc)) - ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC; + ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, offsets->begin, - ctx->data_size, &offsets->pre_header_padding, - ctx->rflags, ctx); + ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) @@ -1553,7 +1593,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * and we are full : record is lost. */ v_inc(config, &buf->records_lost_full); - return -1; + return -ENOBUFS; } else { /* * Next subbuffer not being written to, and we @@ -1570,14 +1610,13 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * many nested writes over a reserve/commit pair. */ v_inc(config, &buf->records_lost_wrap); - return -1; + return -EIO; } offsets->size = config->cb.record_header_size(config, chan, offsets->begin, - ctx->data_size, &offsets->pre_header_padding, - ctx->rflags, ctx); + ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) @@ -1589,7 +1628,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * complete the sub-buffer switch. */ v_inc(config, &buf->records_lost_big); - return -1; + return -ENOSPC; } else { /* * We just made a successful buffer switch and the @@ -1618,15 +1657,17 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer. * @ctx: ring buffer context. * - * Return : -ENOSPC if not enough space, else returns 0. + * Return : -NOBUFS if not enough space, -ENOSPC if event size too large, + * -EIO for other errors, else returns 0. * It will take care of sub-buffer switching. */ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) { struct channel *chan = ctx->chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; struct switch_offsets offsets; + int ret; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = per_cpu_ptr(chan->backend.buf, ctx->cpu); @@ -1637,9 +1678,10 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) offsets.size = 0; do { - if (unlikely(lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, - ctx))) - return -ENOSPC; + ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, + ctx); + if (unlikely(ret)) + return ret; } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) != offsets.old)); @@ -1687,3 +1729,20 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) return 0; } EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow); + +int __init init_lib_ring_buffer_frontend(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + spin_lock_init(&per_cpu(ring_buffer_nohz_lock, cpu)); + return 0; +} + +module_init(init_lib_ring_buffer_frontend); + +void __exit exit_lib_ring_buffer_frontend(void) +{ +} + +module_exit(exit_lib_ring_buffer_frontend);