1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-perf-counters.c
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <lttng/events.h>
17 #include <lttng/events-internal.h>
18 #include <ringbuffer/frontend_types.h>
19 #include <wrapper/cpu.h>
20 #include <wrapper/vmalloc.h>
21 #include <wrapper/perf.h>
22 #include <lttng/tracer.h>
25 size_t perf_counter_get_size(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
, size_t offset
)
29 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
30 size
+= sizeof(uint64_t);
35 void perf_counter_record(void *priv
, struct lttng_kernel_probe_ctx
*probe_ctx
,
36 struct lttng_kernel_ring_buffer_ctx
*ctx
,
37 struct lttng_kernel_channel_buffer
*chan
)
39 struct lttng_perf_counter_field
*perf_field
= (struct lttng_perf_counter_field
*) priv
;
40 struct perf_event
*event
;
43 event
= perf_field
->e
[ctx
->priv
.reserve_cpu
];
45 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
48 event
->pmu
->read(event
);
49 value
= local64_read(&event
->count
);
53 * Perf chooses not to be clever and not to support enabling a
54 * perf counter before the cpu is brought up. Therefore, we need
55 * to support having events coming (e.g. scheduler events)
56 * before the counter is setup. Write an arbitrary 0 in this
61 chan
->ops
->event_write(ctx
, &value
, sizeof(value
), lttng_alignof(value
));
64 #if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
66 void overflow_callback(struct perf_event
*event
,
67 struct perf_sample_data
*data
,
73 void overflow_callback(struct perf_event
*event
, int nmi
,
74 struct perf_sample_data
*data
,
81 void lttng_destroy_perf_counter_ctx_field(void *priv
)
83 struct lttng_perf_counter_field
*perf_field
= priv
;
84 struct perf_event
**events
= perf_field
->e
;
86 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
90 ret
= cpuhp_state_remove_instance(lttng_hp_online
,
91 &perf_field
->cpuhp_online
.node
);
93 ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
94 &perf_field
->cpuhp_prepare
.node
);
97 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
101 lttng_cpus_read_lock();
102 for_each_online_cpu(cpu
)
103 perf_event_release_kernel(events
[cpu
]);
104 lttng_cpus_read_unlock();
105 #ifdef CONFIG_HOTPLUG_CPU
106 unregister_cpu_notifier(&perf_field
->nb
);
109 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
110 kfree(perf_field
->name
);
111 kfree(perf_field
->attr
);
112 kfree(perf_field
->event_field
);
113 lttng_kvfree(events
);
117 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
119 int lttng_cpuhp_perf_counter_online(unsigned int cpu
,
120 struct lttng_cpuhp_node
*node
)
122 struct lttng_perf_counter_field
*perf_field
=
123 container_of(node
, struct lttng_perf_counter_field
,
125 struct perf_event
**events
= perf_field
->e
;
126 struct perf_event_attr
*attr
= perf_field
->attr
;
127 struct perf_event
*pevent
;
129 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
130 cpu
, NULL
, overflow_callback
);
131 if (!pevent
|| IS_ERR(pevent
))
133 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
134 perf_event_release_kernel(pevent
);
137 barrier(); /* Create perf counter before setting event */
138 events
[cpu
] = pevent
;
142 int lttng_cpuhp_perf_counter_dead(unsigned int cpu
,
143 struct lttng_cpuhp_node
*node
)
145 struct lttng_perf_counter_field
*perf_field
=
146 container_of(node
, struct lttng_perf_counter_field
,
148 struct perf_event
**events
= perf_field
->e
;
149 struct perf_event
*pevent
;
151 pevent
= events
[cpu
];
153 barrier(); /* NULLify event before perf counter teardown */
154 perf_event_release_kernel(pevent
);
158 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
160 #ifdef CONFIG_HOTPLUG_CPU
163 * lttng_perf_counter_hp_callback - CPU hotplug callback
164 * @nb: notifier block
165 * @action: hotplug action to take
168 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
170 * We can setup perf counters when the cpu is online (up prepare seems to be too
174 int lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
175 unsigned long action
,
178 unsigned int cpu
= (unsigned long) hcpu
;
179 struct lttng_perf_counter_field
*perf_field
=
180 container_of(nb
, struct lttng_perf_counter_field
, nb
);
181 struct perf_event
**events
= perf_field
->e
;
182 struct perf_event_attr
*attr
= perf_field
->attr
;
183 struct perf_event
*pevent
;
185 if (!perf_field
->hp_enable
)
190 case CPU_ONLINE_FROZEN
:
191 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
192 cpu
, NULL
, overflow_callback
);
193 if (!pevent
|| IS_ERR(pevent
))
195 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
196 perf_event_release_kernel(pevent
);
199 barrier(); /* Create perf counter before setting event */
200 events
[cpu
] = pevent
;
202 case CPU_UP_CANCELED
:
203 case CPU_UP_CANCELED_FROZEN
:
205 case CPU_DEAD_FROZEN
:
206 pevent
= events
[cpu
];
208 barrier(); /* NULLify event before perf counter teardown */
209 perf_event_release_kernel(pevent
);
217 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
219 static const struct lttng_kernel_type_common
*field_type
=
220 lttng_kernel_static_type_integer_from_type(uint64_t, __BYTE_ORDER
, 10);
222 int lttng_add_perf_counter_to_ctx(uint32_t type
,
225 struct lttng_kernel_ctx
**ctx
)
227 struct lttng_kernel_ctx_field ctx_field
= { 0 };
228 struct lttng_kernel_event_field
*event_field
;
229 struct lttng_perf_counter_field
*perf_field
;
230 struct perf_event
**events
;
231 struct perf_event_attr
*attr
;
235 if (lttng_kernel_find_context(*ctx
, name
))
237 name_alloc
= kstrdup(name
, GFP_KERNEL
);
240 goto name_alloc_error
;
242 event_field
= kzalloc(sizeof(*event_field
), GFP_KERNEL
);
245 goto event_field_alloc_error
;
247 event_field
->name
= name_alloc
;
248 event_field
->type
= field_type
;
250 events
= lttng_kvzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
253 goto event_alloc_error
;
256 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
263 attr
->config
= config
;
264 attr
->size
= sizeof(struct perf_event_attr
);
268 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
271 goto error_alloc_perf_field
;
273 perf_field
->e
= events
;
274 perf_field
->attr
= attr
;
275 perf_field
->name
= name_alloc
;
276 perf_field
->event_field
= event_field
;
278 ctx_field
.event_field
= event_field
;
279 ctx_field
.get_size
= perf_counter_get_size
;
280 ctx_field
.record
= perf_counter_record
;
281 ctx_field
.destroy
= lttng_destroy_perf_counter_ctx_field
;
282 ctx_field
.priv
= perf_field
;
284 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
286 perf_field
->cpuhp_prepare
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
287 ret
= cpuhp_state_add_instance(lttng_hp_prepare
,
288 &perf_field
->cpuhp_prepare
.node
);
290 goto cpuhp_prepare_error
;
292 perf_field
->cpuhp_online
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
293 ret
= cpuhp_state_add_instance(lttng_hp_online
,
294 &perf_field
->cpuhp_online
.node
);
296 goto cpuhp_online_error
;
298 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
302 #ifdef CONFIG_HOTPLUG_CPU
303 perf_field
->nb
.notifier_call
=
304 lttng_perf_counter_cpu_hp_callback
;
305 perf_field
->nb
.priority
= 0;
306 register_cpu_notifier(&perf_field
->nb
);
308 lttng_cpus_read_lock();
309 for_each_online_cpu(cpu
) {
310 events
[cpu
] = wrapper_perf_event_create_kernel_counter(attr
,
311 cpu
, NULL
, overflow_callback
);
312 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
316 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
321 lttng_cpus_read_unlock();
322 perf_field
->hp_enable
= 1;
324 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
326 ret
= lttng_kernel_context_append(ctx
, &ctx_field
);
329 goto append_context_error
;
333 /* Error handling. */
334 append_context_error
:
335 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
340 remove_ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
341 &perf_field
->cpuhp_prepare
.node
);
345 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
351 for_each_online_cpu(cpu
) {
352 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
353 perf_event_release_kernel(events
[cpu
]);
355 lttng_cpus_read_unlock();
356 #ifdef CONFIG_HOTPLUG_CPU
357 unregister_cpu_notifier(&perf_field
->nb
);
360 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
362 error_alloc_perf_field
:
365 lttng_kvfree(events
);
368 event_field_alloc_error
: