Move wrapper/ringbuffer to include/ringbuffer
[lttng-modules.git] / lib / ringbuffer / ring_buffer_iterator.c
index 1321b5f965a95bfefd0e0a5ada6ad6fa4fa759ea..96f4c3b2da5cf60230550b248db7fa1f5ee52e61 100644 (file)
@@ -1,19 +1,17 @@
-/*
- * ring_buffer_iterator.c
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
  *
- * (C) Copyright 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * ring_buffer_iterator.c
  *
  * Ring buffer and channel iterators. Get each event of a channel in order. Uses
  * a prio heap for per-cpu buffers, giving a O(log(NR_CPUS)) algorithmic
  * complexity for the "get next event" operation.
  *
- * Author:
- *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Dual LGPL v2.1/GPL v2 license.
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  */
 
-#include "../../wrapper/ringbuffer/iterator.h"
+#include <include/ringbuffer/iterator.h>
+#include <linux/file.h>
+#include <linux/uaccess.h>
 #include <linux/jiffies.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -40,7 +38,7 @@
 ssize_t lib_ring_buffer_get_next_record(struct channel *chan,
                                        struct lib_ring_buffer *buf)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer_iter *iter = &buf->iter;
        int ret;
 
@@ -48,7 +46,7 @@ restart:
        switch (iter->state) {
        case ITER_GET_SUBBUF:
                ret = lib_ring_buffer_get_next_subbuf(buf);
-               if (ret && !ACCESS_ONCE(buf->finalized)
+               if (ret && !READ_ONCE(buf->finalized)
                    && config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                        /*
                         * Use "pull" scheme for global buffers. The reader
@@ -58,7 +56,7 @@ restart:
                         * Per-CPU buffers rather use a "push" scheme because
                         * the IPI needed to flush all CPU's buffers is too
                         * costly. In the "push" scheme, the reader waits for
-                        * the writer periodic deferrable timer to flush the
+                        * the writer periodic timer to flush the
                         * buffers (keeping track of a quiescent state
                         * timestamp). Therefore, the writer "pushes" data out
                         * of the buffers rather than letting the reader "pull"
@@ -225,7 +223,7 @@ void lib_ring_buffer_wait_for_qs(const struct lib_ring_buffer_config *config,
 ssize_t channel_get_next_record(struct channel *chan,
                                struct lib_ring_buffer **ret_buf)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
        struct lttng_ptr_heap *heap;
        ssize_t len;
@@ -333,47 +331,32 @@ void lib_ring_buffer_iterator_init(struct channel *chan, struct lib_ring_buffer
        }
 
        /* Add to list of buffers without any current record */
-       if (chan->backend.config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+       if (chan->backend.config.alloc == RING_BUFFER_ALLOC_PER_CPU)
                list_add(&buf->iter.empty_node, &chan->iter.empty_head);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static
-int __cpuinit channel_iterator_cpu_hotplug(struct notifier_block *nb,
-                                          unsigned long action,
-                                          void *hcpu)
+int lttng_cpuhp_rb_iter_online(unsigned int cpu,
+               struct lttng_cpuhp_node *node)
 {
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel *chan = container_of(nb, struct channel,
-                                           hp_iter_notifier);
+       struct channel *chan = container_of(node, struct channel,
+                                           cpuhp_iter_online);
        struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-
-       if (!chan->hp_iter_enable)
-               return NOTIFY_DONE;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
 
        CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
 
-       switch (action) {
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               lib_ring_buffer_iterator_init(chan, buf);
-               return NOTIFY_OK;
-       default:
-               return NOTIFY_DONE;
-       }
+       lib_ring_buffer_iterator_init(chan, buf);
+       return 0;
 }
-#endif
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_iter_online);
 
 int channel_iterator_init(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               int cpu, ret;
+               int ret;
 
                INIT_LIST_HEAD(&chan->iter.empty_head);
                ret = lttng_heap_init(&chan->iter.heap,
@@ -381,29 +364,12 @@ int channel_iterator_init(struct channel *chan)
                                GFP_KERNEL, buf_is_higher);
                if (ret)
                        return ret;
-               /*
-                * In case of non-hotplug cpu, if the ring-buffer is allocated
-                * in early initcall, it will not be notified of secondary cpus.
-                * In that off case, we need to allocate for all possible cpus.
-                */
-#ifdef CONFIG_HOTPLUG_CPU
-               chan->hp_iter_notifier.notifier_call =
-                       channel_iterator_cpu_hotplug;
-               chan->hp_iter_notifier.priority = 10;
-               register_cpu_notifier(&chan->hp_iter_notifier);
-               get_online_cpus();
-               for_each_online_cpu(cpu) {
-                       buf = per_cpu_ptr(chan->backend.buf, cpu);
-                       lib_ring_buffer_iterator_init(chan, buf);
-               }
-               chan->hp_iter_enable = 1;
-               put_online_cpus();
-#else
-               for_each_possible_cpu(cpu) {
-                       buf = per_cpu_ptr(chan->backend.buf, cpu);
-                       lib_ring_buffer_iterator_init(chan, buf);
-               }
-#endif
+
+               chan->cpuhp_iter_online.component = LTTNG_RING_BUFFER_ITER;
+               ret = cpuhp_state_add_instance(lttng_rb_hp_online,
+                       &chan->cpuhp_iter_online.node);
+               if (ret)
+                       return ret;
        } else {
                buf = channel_get_ring_buffer(config, chan, 0);
                lib_ring_buffer_iterator_init(chan, buf);
@@ -413,17 +379,22 @@ int channel_iterator_init(struct channel *chan)
 
 void channel_iterator_unregister_notifiers(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               chan->hp_iter_enable = 0;
-               unregister_cpu_notifier(&chan->hp_iter_notifier);
+               {
+                       int ret;
+
+                       ret = cpuhp_state_remove_instance(lttng_rb_hp_online,
+                               &chan->cpuhp_iter_online.node);
+                       WARN_ON(ret);
+               }
        }
 }
 
 void channel_iterator_free(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                lttng_heap_free(&chan->iter.heap);
@@ -432,7 +403,7 @@ void channel_iterator_free(struct channel *chan)
 int lib_ring_buffer_iterator_open(struct lib_ring_buffer *buf)
 {
        struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        CHAN_WARN_ON(chan, config->output != RING_BUFFER_ITERATOR);
        return lib_ring_buffer_open_read(buf);
 }
@@ -451,7 +422,7 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_iterator_release);
 
 int channel_iterator_open(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
        int ret = 0, cpu;
 
@@ -484,7 +455,7 @@ EXPORT_SYMBOL_GPL(channel_iterator_open);
 
 void channel_iterator_release(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
        int cpu;
 
@@ -527,7 +498,7 @@ void lib_ring_buffer_iterator_reset(struct lib_ring_buffer *buf)
 
 void channel_iterator_reset(struct channel *chan)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        struct lib_ring_buffer *buf;
        int cpu;
 
@@ -558,12 +529,12 @@ ssize_t channel_ring_buffer_file_read(struct file *filp,
                                      struct lib_ring_buffer *buf,
                                      int fusionmerge)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
        size_t read_count = 0, read_offset;
        ssize_t len;
 
        might_sleep();
-       if (!access_ok(VERIFY_WRITE, user_buf, count))
+       if (!access_ok(user_buf, count))
                return -EFAULT;
 
        /* Finish copy of previous record */
@@ -679,7 +650,7 @@ ssize_t lib_ring_buffer_file_read(struct file *filp,
                                  size_t count,
                                  loff_t *ppos)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = filp->f_path.dentry->d_inode;
        struct lib_ring_buffer *buf = inode->i_private;
        struct channel *chan = buf->backend.chan;
 
@@ -704,9 +675,9 @@ ssize_t channel_file_read(struct file *filp,
                          size_t count,
                          loff_t *ppos)
 {
-       struct inode *inode = filp->f_dentry->d_inode;
+       struct inode *inode = filp->f_path.dentry->d_inode;
        struct channel *chan = inode->i_private;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                return channel_ring_buffer_file_read(filp, user_buf, count,
@@ -784,7 +755,7 @@ const struct file_operations channel_payload_file_operations = {
        .open = channel_file_open,
        .release = channel_file_release,
        .read = channel_file_read,
-       .llseek = lib_ring_buffer_no_llseek,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
 };
 EXPORT_SYMBOL_GPL(channel_payload_file_operations);
 
@@ -793,6 +764,6 @@ const struct file_operations lib_ring_buffer_payload_file_operations = {
        .open = lib_ring_buffer_file_open,
        .release = lib_ring_buffer_file_release,
        .read = lib_ring_buffer_file_read,
-       .llseek = lib_ring_buffer_no_llseek,
+       .llseek = vfs_lib_ring_buffer_no_llseek,
 };
 EXPORT_SYMBOL_GPL(lib_ring_buffer_payload_file_operations);
This page took 0.027973 seconds and 4 git commands to generate.