-/*
+/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
+ *
* ring_buffer_backend.c
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/stddef.h>
if (unlikely(!pages))
goto pages_error;
- bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
+ bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
* num_subbuf_alloc,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
- kzalloc_node(ALIGN(
+ lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_pages) +
sizeof(struct lib_ring_buffer_backend_page)
* num_pages_per_subbuf,
}
/* Allocate write-side subbuffer table */
- bufb->buf_wsb = kzalloc_node(ALIGN(
+ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
/* Allocate subbuffer packet counter table */
- bufb->buf_cnt = kzalloc_node(ALIGN(
+ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_counts)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
return 0;
free_wsb:
- kfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_wsb);
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
- kfree(bufb->array[i]);
+ lttng_kvfree(bufb->array[i]);
depopulate:
/* Free all allocated pages */
for (i = 0; (i < num_pages && pages[i]); i++)
__free_page(pages[i]);
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
array_error:
vfree(pages);
pages_error:
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
- kfree(bufb->buf_wsb);
- kfree(bufb->buf_cnt);
+ lttng_kvfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
__free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
- kfree(bufb->array[i]);
+ lttng_kvfree(bufb->array[i]);
}
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
bufb->allocated = 0;
}
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+
+/*
+ * No need to implement a "dead" callback to do a buffer switch here,
+ * because it will happen when tracing is stopped, or will be done by
+ * switch timer CPU DEAD callback.
+ * We don't free buffers when CPU go away, because it would make trace
+ * data vanish, which is unwanted.
+ */
+int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct channel_backend *chanb = container_of(node,
+ struct channel_backend, cpuhp_prepare);
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ struct lib_ring_buffer *buf;
+ int ret;
+
+ CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
+
+ buf = per_cpu_ptr(chanb->buf, cpu);
+ ret = lib_ring_buffer_create(buf, chanb, cpu);
+ if (ret) {
+ printk(KERN_ERR
+ "ring_buffer_cpu_hp_callback: cpu %d "
+ "buffer creation failed\n", cpu);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
+
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
+
/**
* lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
* @nb: notifier block
}
return NOTIFY_OK;
}
+
#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
/**
* channel_backend_init - initialize a channel backend
* @chanb: channel backend
if (!chanb->buf)
goto free_cpumask;
- /*
- * In case of non-hotplug cpu, if the ring-buffer is allocated
- * in early initcall, it will not be notified of secondary cpus.
- * In that off case, we need to allocate for all possible cpus.
- */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
+ ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ if (ret)
+ goto free_bufs;
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+
+ {
+ /*
+ * In case of non-hotplug cpu, if the ring-buffer is allocated
+ * in early initcall, it will not be notified of secondary cpus.
+ * In that off case, we need to allocate for all possible cpus.
+ */
#ifdef CONFIG_HOTPLUG_CPU
- /*
- * buf->backend.allocated test takes care of concurrent CPU
- * hotplug.
- * Priority higher than frontend, so we create the ring buffer
- * before we start the timer.
- */
- chanb->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chanb->cpu_hp_notifier.priority = 5;
- register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
- put_online_cpus();
+ /*
+ * buf->backend.allocated test takes care of concurrent CPU
+ * hotplug.
+ * Priority higher than frontend, so we create the ring buffer
+ * before we start the timer.
+ */
+ chanb->cpu_hp_notifier.notifier_call =
+ lib_ring_buffer_cpu_hp_callback;
+ chanb->cpu_hp_notifier.priority = 5;
+ register_hotcpu_notifier(&chanb->cpu_hp_notifier);
+
+ get_online_cpus();
+ for_each_online_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs; /* cpu hotplug locked */
+ }
+ put_online_cpus();
#else
- for_each_possible_cpu(i) {
- ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
- chanb, i);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
+ for_each_possible_cpu(i) {
+ ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
+ chanb, i);
+ if (ret)
+ goto free_bufs;
+ }
#endif
+ }
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
} else {
chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#ifdef CONFIG_HOTPLUG_CPU
+ put_online_cpus();
+ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
for_each_possible_cpu(i) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
+ struct lib_ring_buffer *buf =
+ per_cpu_ptr(chanb->buf, i);
if (!buf->backend.allocated)
continue;
lib_ring_buffer_free(buf);
}
-#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
- unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif
free_percpu(chanb->buf);
} else
kfree(chanb->buf);
{
const struct lib_ring_buffer_config *config = &chanb->config;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
+ &chanb->cpuhp_prepare.node);
+ WARN_ON(ret);
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+ }
}
/**