-/*
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
* lttng-context-perf-counters.c
*
* LTTng performance monitoring counters (perf-counters) integration module.
*
* Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/list.h>
#include <linux/string.h>
-#include "lttng-events.h"
-#include "wrapper/ringbuffer/frontend_types.h"
-#include "wrapper/vmalloc.h"
-#include "wrapper/perf.h"
-#include "lttng-tracer.h"
+#include <linux/cpu.h>
+#include <lttng-events.h>
+#include <wrapper/ringbuffer/frontend_types.h>
+#include <wrapper/cpu.h>
+#include <wrapper/vmalloc.h>
+#include <wrapper/perf.h>
+#include <lttng-tracer.h>
static
size_t perf_counter_get_size(size_t offset)
chan->ops->event_write(ctx, &value, sizeof(value));
}
-#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
+#if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
static
void overflow_callback(struct perf_event *event,
struct perf_sample_data *data,
void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
{
struct perf_event **events = field->u.perf_counter->e;
- int cpu;
- get_online_cpus();
- for_each_online_cpu(cpu)
- perf_event_release_kernel(events[cpu]);
- put_online_cpus();
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
+ {
+ int ret;
+
+ ret = cpuhp_state_remove_instance(lttng_hp_online,
+ &field->u.perf_counter->cpuhp_online.node);
+ WARN_ON(ret);
+ ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+ &field->u.perf_counter->cpuhp_prepare.node);
+ WARN_ON(ret);
+ }
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
+ lttng_cpus_read_lock();
+ for_each_online_cpu(cpu)
+ perf_event_release_kernel(events[cpu]);
+ lttng_cpus_read_unlock();
#ifdef CONFIG_HOTPLUG_CPU
- unregister_cpu_notifier(&field->u.perf_counter->nb);
+ unregister_cpu_notifier(&field->u.perf_counter->nb);
#endif
+ }
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
kfree(field->event_field.name);
kfree(field->u.perf_counter->attr);
- kfree(events);
+ lttng_kvfree(events);
kfree(field->u.perf_counter);
}
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
+
+int lttng_cpuhp_perf_counter_online(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct lttng_perf_counter_field *perf_field =
+ container_of(node, struct lttng_perf_counter_field,
+ cpuhp_online);
+ struct perf_event **events = perf_field->e;
+ struct perf_event_attr *attr = perf_field->attr;
+ struct perf_event *pevent;
+
+ pevent = wrapper_perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!pevent || IS_ERR(pevent))
+ return -EINVAL;
+ if (pevent->state == PERF_EVENT_STATE_ERROR) {
+ perf_event_release_kernel(pevent);
+ return -EINVAL;
+ }
+ barrier(); /* Create perf counter before setting event */
+ events[cpu] = pevent;
+ return 0;
+}
+
+int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
+ struct lttng_cpuhp_node *node)
+{
+ struct lttng_perf_counter_field *perf_field =
+ container_of(node, struct lttng_perf_counter_field,
+ cpuhp_prepare);
+ struct perf_event **events = perf_field->e;
+ struct perf_event *pevent;
+
+ pevent = events[cpu];
+ events[cpu] = NULL;
+ barrier(); /* NULLify event before perf counter teardown */
+ perf_event_release_kernel(pevent);
+ return 0;
+}
+
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
+
#ifdef CONFIG_HOTPLUG_CPU
/**
#endif
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
+
int lttng_add_perf_counter_to_ctx(uint32_t type,
uint64_t config,
const char *name,
struct perf_event **events;
struct perf_event_attr *attr;
int ret;
- int cpu;
char *name_alloc;
- events = kzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
+ events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
if (!events)
return -ENOMEM;
goto find_error;
}
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
+
+ perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
+ ret = cpuhp_state_add_instance(lttng_hp_prepare,
+ &perf_field->cpuhp_prepare.node);
+ if (ret)
+ goto cpuhp_prepare_error;
+
+ perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
+ ret = cpuhp_state_add_instance(lttng_hp_online,
+ &perf_field->cpuhp_online.node);
+ if (ret)
+ goto cpuhp_online_error;
+
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
+ {
+ int cpu;
+
#ifdef CONFIG_HOTPLUG_CPU
- perf_field->nb.notifier_call =
- lttng_perf_counter_cpu_hp_callback;
- perf_field->nb.priority = 0;
- register_cpu_notifier(&perf_field->nb);
+ perf_field->nb.notifier_call =
+ lttng_perf_counter_cpu_hp_callback;
+ perf_field->nb.priority = 0;
+ register_cpu_notifier(&perf_field->nb);
#endif
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
- cpu, NULL, overflow_callback);
- if (!events[cpu] || IS_ERR(events[cpu])) {
- ret = -EINVAL;
- goto counter_error;
- }
- if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
- ret = -EBUSY;
- goto counter_busy;
+ lttng_cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
+ cpu, NULL, overflow_callback);
+ if (!events[cpu] || IS_ERR(events[cpu])) {
+ ret = -EINVAL;
+ goto counter_error;
+ }
+ if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
+ ret = -EBUSY;
+ goto counter_busy;
+ }
}
+ lttng_cpus_read_unlock();
+ perf_field->hp_enable = 1;
}
- put_online_cpus();
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
field->destroy = lttng_destroy_perf_counter_field;
field->get_size = perf_counter_get_size;
field->record = perf_counter_record;
field->u.perf_counter = perf_field;
- perf_field->hp_enable = 1;
+ lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
+ wrapper_vmalloc_sync_mappings();
return 0;
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
+cpuhp_online_error:
+ {
+ int remove_ret;
+
+ remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
+ &perf_field->cpuhp_prepare.node);
+ WARN_ON(remove_ret);
+ }
+cpuhp_prepare_error:
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
counter_busy:
counter_error:
- for_each_online_cpu(cpu) {
- if (events[cpu] && !IS_ERR(events[cpu]))
- perf_event_release_kernel(events[cpu]);
- }
- put_online_cpus();
+ {
+ int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (events[cpu] && !IS_ERR(events[cpu]))
+ perf_event_release_kernel(events[cpu]);
+ }
+ lttng_cpus_read_unlock();
#ifdef CONFIG_HOTPLUG_CPU
- unregister_cpu_notifier(&perf_field->nb);
+ unregister_cpu_notifier(&perf_field->nb);
#endif
+ }
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
find_error:
lttng_remove_context_field(ctx, field);
append_context_error:
error_alloc_perf_field:
kfree(attr);
error_attr:
- kfree(events);
+ lttng_kvfree(events);
return ret;
}
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Mathieu Desnoyers");
-MODULE_DESCRIPTION("Linux Trace Toolkit Perf Support");