X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_iterator.c;h=d25db72502756b96a7bc3d65ebb33da635436023;hb=80bb26003945e96a8ade9c8788dab9b2e08cbc08;hp=24e3dbb36a39882eeae8a53011f5ebd3303271ab;hpb=b06ed645c23ac947e0f5ac01b6eba6947c53d77c;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c index 24e3dbb3..d25db725 100644 --- a/lib/ringbuffer/ring_buffer_iterator.c +++ b/lib/ringbuffer/ring_buffer_iterator.c @@ -1,4 +1,5 @@ -/* +/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1) + * * ring_buffer_iterator.c * * Ring buffer and channel iterators. Get each event of a channel in order. Uses @@ -6,27 +7,11 @@ * complexity for the "get next event" operation. * * Copyright (C) 2010-2012 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * Author: - * Mathieu Desnoyers */ -#include "../../wrapper/ringbuffer/iterator.h" -#include "../../wrapper/file.h" +#include +#include +#include #include #include #include @@ -61,7 +46,7 @@ restart: switch (iter->state) { case ITER_GET_SUBBUF: ret = lib_ring_buffer_get_next_subbuf(buf); - if (ret && !ACCESS_ONCE(buf->finalized) + if (ret && !READ_ONCE(buf->finalized) && config->alloc == RING_BUFFER_ALLOC_GLOBAL) { /* * Use "pull" scheme for global buffers. The reader @@ -71,7 +56,7 @@ restart: * Per-CPU buffers rather use a "push" scheme because * the IPI needed to flush all CPU's buffers is too * costly. In the "push" scheme, the reader waits for - * the writer periodic deferrable timer to flush the + * the writer periodic timer to flush the * buffers (keeping track of a quiescent state * timestamp). Therefore, the writer "pushes" data out * of the buffers rather than letting the reader "pull" @@ -350,6 +335,25 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer list_add(&buf->iter.empty_node, &chan->iter.empty_head); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +int lttng_cpuhp_rb_iter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_iter_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + lib_ring_buffer_iterator_init(chan, buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU static int channel_iterator_cpu_hotplug(struct notifier_block *nb, @@ -380,13 +384,15 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb, } #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + int channel_iterator_init(struct channel *chan) { const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - int cpu, ret; + int ret; INIT_LIST_HEAD(&chan->iter.empty_head); ret = lttng_heap_init(&chan->iter.heap, @@ -394,29 +400,43 @@ int channel_iterator_init(struct channel *chan) GFP_KERNEL, buf_is_higher); if (ret) return ret; - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER; + ret = cpuhp_state_add_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + if (ret) + return ret; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ #ifdef CONFIG_HOTPLUG_CPU - chan->hp_iter_notifier.notifier_call = - channel_iterator_cpu_hotplug; - chan->hp_iter_notifier.priority = 10; - register_cpu_notifier(&chan->hp_iter_notifier); - get_online_cpus(); - for_each_online_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } - chan->hp_iter_enable = 1; - put_online_cpus(); + chan->hp_iter_notifier.notifier_call = + channel_iterator_cpu_hotplug; + chan->hp_iter_notifier.priority = 10; + register_cpu_notifier(&chan->hp_iter_notifier); + + get_online_cpus(); + for_each_online_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } + chan->hp_iter_enable = 1; + put_online_cpus(); #else - for_each_possible_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } + for_each_possible_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { buf = channel_get_ring_buffer(config, chan, 0); lib_ring_buffer_iterator_init(chan, buf); @@ -429,8 +449,18 @@ void channel_iterator_unregister_notifiers(struct channel *chan) const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + WARN_ON(ret); + } +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ chan->hp_iter_enable = 0; unregister_cpu_notifier(&chan->hp_iter_notifier); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } } @@ -576,7 +606,7 @@ ssize_t channel_ring_buffer_file_read(struct file *filp, ssize_t len; might_sleep(); - if (!access_ok(VERIFY_WRITE, user_buf, count)) + if (!lttng_access_ok(VERIFY_WRITE, user_buf, count)) return -EFAULT; /* Finish copy of previous record */