X-Git-Url: http://git.liburcu.org/?p=lttng-modules.git;a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_iterator.c;h=15d7c75c15862b8ec4f47771dd98dc1eb8291c30;hp=ff9b8061e7b30bd34cfc87232274b74c4aac2e2e;hb=2459130397d7e7eecc44a5f06a39d65c78257eef;hpb=41affe319e5f4c049ba0ae427f485d8cdfb65488 diff --git a/lib/ringbuffer/ring_buffer_iterator.c b/lib/ringbuffer/ring_buffer_iterator.c index ff9b8061..15d7c75c 100644 --- a/lib/ringbuffer/ring_buffer_iterator.c +++ b/lib/ringbuffer/ring_buffer_iterator.c @@ -1,19 +1,17 @@ -/* - * ring_buffer_iterator.c +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) * - * (C) Copyright 2010 - Mathieu Desnoyers + * ring_buffer_iterator.c * * Ring buffer and channel iterators. Get each event of a channel in order. Uses * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic * complexity for the "get next event" operation. * - * Author: - * Mathieu Desnoyers - * - * Dual LGPL v2.1/GPL v2 license. + * Copyright (C) 2010-2012 Mathieu Desnoyers */ -#include "../../wrapper/ringbuffer/iterator.h" +#include +#include +#include #include #include #include @@ -40,7 +38,7 @@ ssize_t lib_ring_buffer_get_next_record(struct channel *chan, struct lib_ring_buffer *buf) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer_iter *iter = &buf->iter; int ret; @@ -48,7 +46,7 @@ restart: switch (iter->state) { case ITER_GET_SUBBUF: ret = lib_ring_buffer_get_next_subbuf(buf); - if (ret && !ACCESS_ONCE(buf->finalized) + if (ret && !READ_ONCE(buf->finalized) && config->alloc == RING_BUFFER_ALLOC_GLOBAL) { /* * Use "pull" scheme for global buffers. The reader @@ -58,7 +56,7 @@ restart: * Per-CPU buffers rather use a "push" scheme because * the IPI needed to flush all CPU's buffers is too * costly. In the "push" scheme, the reader waits for - * the writer periodic deferrable timer to flush the + * the writer periodic timer to flush the * buffers (keeping track of a quiescent state * timestamp). Therefore, the writer "pushes" data out * of the buffers rather than letting the reader "pull" @@ -120,7 +118,7 @@ static void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config *config, struct channel *chan) { - struct ptr_heap *heap = &chan->iter.heap; + struct lttng_ptr_heap *heap = &chan->iter.heap; struct lib_ring_buffer *buf, *tmp; ssize_t len; @@ -151,11 +149,11 @@ void lib_ring_buffer_get_empty_buf_records(const struct lib_ring_buffer_config * default: /* * Insert buffer into the heap, remove from empty buffer - * list. The heap should never overflow. + * list. */ CHAN_WARN_ON(chan, len < 0); list_del(&buf->iter.empty_node); - CHAN_WARN_ON(chan, heap_insert(heap, buf) != NULL); + CHAN_WARN_ON(chan, lttng_heap_insert(heap, buf)); } } } @@ -225,9 +223,9 @@ void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config, ssize_t channel_get_next_record(struct channel *chan, struct lib_ring_buffer **ret_buf) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; - struct ptr_heap *heap; + struct lttng_ptr_heap *heap; ssize_t len; if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) { @@ -240,7 +238,7 @@ ssize_t channel_get_next_record(struct channel *chan, /* * get next record for topmost buffer. */ - buf = heap_maximum(heap); + buf = lttng_heap_maximum(heap); if (buf) { len = lib_ring_buffer_get_next_record(chan, buf); /* @@ -252,7 +250,7 @@ ssize_t channel_get_next_record(struct channel *chan, buf->iter.timestamp = 0; list_add(&buf->iter.empty_node, &chan->iter.empty_head); /* Remove topmost buffer from the heap */ - CHAN_WARN_ON(chan, heap_remove(heap) != buf); + CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf); break; case -ENODATA: /* @@ -260,7 +258,7 @@ ssize_t channel_get_next_record(struct channel *chan, * don't add to list of empty buffer, because it has no * more data to provide, ever. */ - CHAN_WARN_ON(chan, heap_remove(heap) != buf); + CHAN_WARN_ON(chan, lttng_heap_remove(heap) != buf); break; case -EBUSY: CHAN_WARN_ON(chan, 1); @@ -269,15 +267,15 @@ ssize_t channel_get_next_record(struct channel *chan, /* * Reinsert buffer into the heap. Note that heap can be * partially empty, so we need to use - * heap_replace_max(). + * lttng_heap_replace_max(). */ CHAN_WARN_ON(chan, len < 0); - CHAN_WARN_ON(chan, heap_replace_max(heap, buf) != buf); + CHAN_WARN_ON(chan, lttng_heap_replace_max(heap, buf) != buf); break; } } - buf = heap_maximum(heap); + buf = lttng_heap_maximum(heap); if (!buf || buf->iter.timestamp > chan->iter.last_qs) { /* * Deal with buffers previously showing no data. @@ -287,7 +285,7 @@ ssize_t channel_get_next_record(struct channel *chan, lib_ring_buffer_wait_for_qs(config, chan); } - *ret_buf = buf = heap_maximum(heap); + *ret_buf = buf = lttng_heap_maximum(heap); if (buf) { /* * If this warning triggers, you probably need to check your @@ -333,13 +331,32 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer } /* Add to list of buffers without any current record */ - if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU) + if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU) list_add(&buf->iter.empty_node, &chan->iter.empty_head); } +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + +int lttng_cpuhp_rb_iter_online(unsigned int cpu, + struct lttng_cpuhp_node *node) +{ + struct channel *chan = container_of(node, struct channel, + cpuhp_iter_online); + struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); + const struct lib_ring_buffer_config *config = &chan->backend.config; + + CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL); + + lib_ring_buffer_iterator_init(chan, buf); + return 0; +} +EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online); + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + #ifdef CONFIG_HOTPLUG_CPU static -int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb, +int channel_iterator_cpu_hotplug(struct notifier_block *nb, unsigned long action, void *hcpu) { @@ -347,7 +364,7 @@ int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb, struct channel *chan = container_of(nb, struct channel, hp_iter_notifier); struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu); - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (!chan->hp_iter_enable) return NOTIFY_DONE; @@ -367,43 +384,59 @@ int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb, } #endif +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + int channel_iterator_init(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - int cpu, ret; + int ret; INIT_LIST_HEAD(&chan->iter.empty_head); - ret = heap_init(&chan->iter.heap, + ret = lttng_heap_init(&chan->iter.heap, num_possible_cpus(), GFP_KERNEL, buf_is_higher); if (ret) return ret; - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER; + ret = cpuhp_state_add_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + if (ret) + return ret; +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ + { + int cpu; + + /* + * In case of non-hotplug cpu, if the ring-buffer is allocated + * in early initcall, it will not be notified of secondary cpus. + * In that off case, we need to allocate for all possible cpus. + */ #ifdef CONFIG_HOTPLUG_CPU - chan->hp_iter_notifier.notifier_call = - channel_iterator_cpu_hotplug; - chan->hp_iter_notifier.priority = 10; - register_cpu_notifier(&chan->hp_iter_notifier); - get_online_cpus(); - for_each_online_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } - chan->hp_iter_enable = 1; - put_online_cpus(); + chan->hp_iter_notifier.notifier_call = + channel_iterator_cpu_hotplug; + chan->hp_iter_notifier.priority = 10; + register_cpu_notifier(&chan->hp_iter_notifier); + + get_online_cpus(); + for_each_online_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } + chan->hp_iter_enable = 1; + put_online_cpus(); #else - for_each_possible_cpu(cpu) { - buf = per_cpu_ptr(chan->backend.buf, cpu); - lib_ring_buffer_iterator_init(chan, buf); - } + for_each_possible_cpu(cpu) { + buf = per_cpu_ptr(chan->backend.buf, cpu); + lib_ring_buffer_iterator_init(chan, buf); + } #endif + } +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { buf = channel_get_ring_buffer(config, chan, 0); lib_ring_buffer_iterator_init(chan, buf); @@ -413,26 +446,36 @@ int channel_iterator_init(struct channel *chan) void channel_iterator_unregister_notifiers(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) + { + int ret; + + ret = cpuhp_state_remove_instance(lttng_rb_hp_online, + &chan->cpuhp_iter_online.node); + WARN_ON(ret); + } +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ chan->hp_iter_enable = 0; unregister_cpu_notifier(&chan->hp_iter_notifier); +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } } void channel_iterator_free(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - heap_free(&chan->iter.heap); + lttng_heap_free(&chan->iter.heap); } int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf) { struct channel *chan = buf->backend.chan; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR); return lib_ring_buffer_open_read(buf); } @@ -451,7 +494,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release); int channel_iterator_open(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; int ret = 0, cpu; @@ -484,7 +527,7 @@ EXPORT_SYMBOL_GPL(channel_iterator_open); void channel_iterator_release(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; int cpu; @@ -514,7 +557,7 @@ void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf) lib_ring_buffer_put_next_subbuf(buf); buf->iter.state = ITER_GET_SUBBUF; /* Remove from heap (if present). */ - if (heap_cherrypick(&chan->iter.heap, buf)) + if (lttng_heap_cherrypick(&chan->iter.heap, buf)) list_add(&buf->iter.empty_node, &chan->iter.empty_head); buf->iter.timestamp = 0; buf->iter.header_len = 0; @@ -527,12 +570,12 @@ void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf) void channel_iterator_reset(struct channel *chan) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; int cpu; /* Empty heap, put into empty_head */ - while ((buf = heap_remove(&chan->iter.heap)) != NULL) + while ((buf = lttng_heap_remove(&chan->iter.heap)) != NULL) list_add(&buf->iter.empty_node, &chan->iter.empty_head); for_each_channel_cpu(cpu, chan) { @@ -558,12 +601,12 @@ ssize_t channel_ring_buffer_file_read(struct file *filp, struct lib_ring_buffer *buf, int fusionmerge) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; size_t read_count = 0, read_offset; ssize_t len; might_sleep(); - if (!access_ok(VERIFY_WRITE, user_buf, count)) + if (!lttng_access_ok(VERIFY_WRITE, user_buf, count)) return -EFAULT; /* Finish copy of previous record */ @@ -573,7 +616,7 @@ ssize_t channel_ring_buffer_file_read(struct file *filp, read_offset = *ppos; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU && fusionmerge) - buf = heap_maximum(&chan->iter.heap); + buf = lttng_heap_maximum(&chan->iter.heap); CHAN_WARN_ON(chan, !buf); goto skip_get_next; } @@ -679,7 +722,7 @@ ssize_t lib_ring_buffer_file_read(struct file *filp, size_t count, loff_t *ppos) { - struct inode *inode = filp->f_dentry->d_inode; + struct inode *inode = filp->lttng_f_dentry->d_inode; struct lib_ring_buffer *buf = inode->i_private; struct channel *chan = buf->backend.chan; @@ -704,9 +747,9 @@ ssize_t channel_file_read(struct file *filp, size_t count, loff_t *ppos) { - struct inode *inode = filp->f_dentry->d_inode; + struct inode *inode = filp->lttng_f_dentry->d_inode; struct channel *chan = inode->i_private; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lib_ring_buffer_config *config = &chan->backend.config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) return channel_ring_buffer_file_read(filp, user_buf, count, @@ -780,17 +823,19 @@ int channel_file_release(struct inode *inode, struct file *file) } const struct file_operations channel_payload_file_operations = { + .owner = THIS_MODULE, .open = channel_file_open, .release = channel_file_release, .read = channel_file_read, - .llseek = lib_ring_buffer_no_llseek, + .llseek = vfs_lib_ring_buffer_no_llseek, }; EXPORT_SYMBOL_GPL(channel_payload_file_operations); const struct file_operations lib_ring_buffer_payload_file_operations = { + .owner = THIS_MODULE, .open = lib_ring_buffer_file_open, .release = lib_ring_buffer_file_release, .read = lib_ring_buffer_file_read, - .llseek = lib_ring_buffer_no_llseek, + .llseek = vfs_lib_ring_buffer_no_llseek, }; EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);