Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / src / lttng-context-perf-counters.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * lttng-context-perf-counters.c
833ad6a0
MD
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
886d51a3 7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
833ad6a0
MD
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
c24a0d71 14#include <linux/string.h>
5ca7b8a3 15#include <linux/cpu.h>
2df37e95 16#include <lttng/events.h>
0fe45627 17#include <lttng/events-internal.h>
24591303 18#include <ringbuffer/frontend_types.h>
ffcc8734 19#include <wrapper/cpu.h>
241ae9a8 20#include <wrapper/vmalloc.h>
2df37e95 21#include <lttng/tracer.h>
833ad6a0 22
f1676205 23static
a92e844e 24size_t perf_counter_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
f1676205
MD
25{
26 size_t size = 0;
27
a90917c3 28 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
29 size += sizeof(uint64_t);
30 return size;
31}
32
833ad6a0 33static
a92e844e 34void perf_counter_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
8a57ec02 35 struct lttng_kernel_ring_buffer_ctx *ctx,
f7d06400 36 struct lttng_kernel_channel_buffer *chan)
833ad6a0 37{
2dc781e0 38 struct lttng_perf_counter_field *perf_field = (struct lttng_perf_counter_field *) priv;
833ad6a0
MD
39 struct perf_event *event;
40 uint64_t value;
41
b1199bd3 42 event = perf_field->e[ctx->priv.reserve_cpu];
0478c519 43 if (likely(event)) {
7b745a96
MD
44 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
45 value = 0;
46 } else {
47 event->pmu->read(event);
48 value = local64_read(&event->count);
49 }
f91fd73b
MD
50 } else {
51 /*
52 * Perf chooses not to be clever and not to support enabling a
53 * perf counter before the cpu is brought up. Therefore, we need
54 * to support having events coming (e.g. scheduler events)
55 * before the counter is setup. Write an arbitrary 0 in this
56 * case.
57 */
58 value = 0;
59 }
f5ffbd77 60 chan->ops->event_write(ctx, &value, sizeof(value), lttng_alignof(value));
833ad6a0
MD
61}
62
d608ab8e 63#ifdef CONFIG_PERF_EVENTS
90f5546c
MD
64static
65void overflow_callback(struct perf_event *event,
66 struct perf_sample_data *data,
67 struct pt_regs *regs)
68{
69}
70#else
833ad6a0
MD
71static
72void overflow_callback(struct perf_event *event, int nmi,
73 struct perf_sample_data *data,
74 struct pt_regs *regs)
75{
76}
90f5546c 77#endif
833ad6a0 78
2dccf128 79static
2dc781e0 80void lttng_destroy_perf_counter_ctx_field(void *priv)
2dccf128 81{
2dc781e0 82 struct lttng_perf_counter_field *perf_field = priv;
437d5aa5 83 struct perf_event **events = perf_field->e;
2dccf128 84
5f4c791e 85#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
86 {
87 int ret;
88
89 ret = cpuhp_state_remove_instance(lttng_hp_online,
437d5aa5 90 &perf_field->cpuhp_online.node);
1e367326
MD
91 WARN_ON(ret);
92 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
437d5aa5 93 &perf_field->cpuhp_prepare.node);
1e367326
MD
94 WARN_ON(ret);
95 }
5f4c791e 96#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
97 {
98 int cpu;
99
ffcc8734 100 lttng_cpus_read_lock();
1e367326
MD
101 for_each_online_cpu(cpu)
102 perf_event_release_kernel(events[cpu]);
ffcc8734 103 lttng_cpus_read_unlock();
8289661d 104#ifdef CONFIG_HOTPLUG_CPU
437d5aa5 105 unregister_cpu_notifier(&perf_field->nb);
8289661d 106#endif
1e367326 107 }
5f4c791e 108#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
437d5aa5
MD
109 kfree(perf_field->name);
110 kfree(perf_field->attr);
111 kfree(perf_field->event_field);
48f5e0b5 112 lttng_kvfree(events);
437d5aa5 113 kfree(perf_field);
2dccf128
MD
114}
115
5f4c791e 116#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
117
118int lttng_cpuhp_perf_counter_online(unsigned int cpu,
119 struct lttng_cpuhp_node *node)
120{
121 struct lttng_perf_counter_field *perf_field =
122 container_of(node, struct lttng_perf_counter_field,
123 cpuhp_online);
124 struct perf_event **events = perf_field->e;
125 struct perf_event_attr *attr = perf_field->attr;
126 struct perf_event *pevent;
127
0e41488f
MJ
128 pevent = perf_event_create_kernel_counter(attr,
129 cpu, NULL, overflow_callback, NULL);
1e367326
MD
130 if (!pevent || IS_ERR(pevent))
131 return -EINVAL;
132 if (pevent->state == PERF_EVENT_STATE_ERROR) {
133 perf_event_release_kernel(pevent);
134 return -EINVAL;
135 }
136 barrier(); /* Create perf counter before setting event */
137 events[cpu] = pevent;
138 return 0;
139}
140
141int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
142 struct lttng_cpuhp_node *node)
143{
144 struct lttng_perf_counter_field *perf_field =
145 container_of(node, struct lttng_perf_counter_field,
146 cpuhp_prepare);
147 struct perf_event **events = perf_field->e;
148 struct perf_event *pevent;
149
150 pevent = events[cpu];
151 events[cpu] = NULL;
152 barrier(); /* NULLify event before perf counter teardown */
153 perf_event_release_kernel(pevent);
154 return 0;
155}
156
5f4c791e 157#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 158
8289661d
MD
159#ifdef CONFIG_HOTPLUG_CPU
160
161/**
162 * lttng_perf_counter_hp_callback - CPU hotplug callback
163 * @nb: notifier block
164 * @action: hotplug action to take
165 * @hcpu: CPU number
166 *
167 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
168 *
169 * We can setup perf counters when the cpu is online (up prepare seems to be too
170 * soon).
171 */
172static
e8f071d5 173int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
8289661d
MD
174 unsigned long action,
175 void *hcpu)
176{
177 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
178 struct lttng_perf_counter_field *perf_field =
179 container_of(nb, struct lttng_perf_counter_field, nb);
180 struct perf_event **events = perf_field->e;
181 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 182 struct perf_event *pevent;
8289661d 183
2001023e 184 if (!perf_field->hp_enable)
8289661d
MD
185 return NOTIFY_OK;
186
187 switch (action) {
188 case CPU_ONLINE:
189 case CPU_ONLINE_FROZEN:
0e41488f
MJ
190 pevent = perf_event_create_kernel_counter(attr,
191 cpu, NULL, overflow_callback, NULL);
0478c519 192 if (!pevent || IS_ERR(pevent))
8289661d 193 return NOTIFY_BAD;
7b745a96
MD
194 if (pevent->state == PERF_EVENT_STATE_ERROR) {
195 perf_event_release_kernel(pevent);
196 return NOTIFY_BAD;
197 }
f91fd73b
MD
198 barrier(); /* Create perf counter before setting event */
199 events[cpu] = pevent;
8289661d
MD
200 break;
201 case CPU_UP_CANCELED:
202 case CPU_UP_CANCELED_FROZEN:
203 case CPU_DEAD:
204 case CPU_DEAD_FROZEN:
f91fd73b
MD
205 pevent = events[cpu];
206 events[cpu] = NULL;
207 barrier(); /* NULLify event before perf counter teardown */
208 perf_event_release_kernel(pevent);
8289661d
MD
209 break;
210 }
211 return NOTIFY_OK;
212}
213
214#endif
215
5f4c791e 216#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326 217
437d5aa5
MD
218static const struct lttng_kernel_type_common *field_type =
219 lttng_kernel_static_type_integer_from_type(uint64_t, __BYTE_ORDER, 10);
220
833ad6a0
MD
221int lttng_add_perf_counter_to_ctx(uint32_t type,
222 uint64_t config,
c24a0d71 223 const char *name,
437d5aa5 224 struct lttng_kernel_ctx **ctx)
833ad6a0 225{
2836dd4f 226 struct lttng_kernel_ctx_field ctx_field = { 0 };
437d5aa5 227 struct lttng_kernel_event_field *event_field;
2001023e 228 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
229 struct perf_event **events;
230 struct perf_event_attr *attr;
231 int ret;
c24a0d71 232 char *name_alloc;
833ad6a0 233
437d5aa5
MD
234 if (lttng_kernel_find_context(*ctx, name))
235 return -EEXIST;
236 name_alloc = kstrdup(name, GFP_KERNEL);
237 if (!name_alloc) {
238 ret = -ENOMEM;
239 goto name_alloc_error;
240 }
241 event_field = kzalloc(sizeof(*event_field), GFP_KERNEL);
242 if (!event_field) {
243 ret = -ENOMEM;
244 goto event_field_alloc_error;
245 }
246 event_field->name = name_alloc;
247 event_field->type = field_type;
248
48f5e0b5 249 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
437d5aa5
MD
250 if (!events) {
251 ret = -ENOMEM;
252 goto event_alloc_error;
253 }
833ad6a0 254
2001023e 255 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
256 if (!attr) {
257 ret = -ENOMEM;
258 goto error_attr;
259 }
260
261 attr->type = type;
262 attr->config = config;
263 attr->size = sizeof(struct perf_event_attr);
264 attr->pinned = 1;
265 attr->disabled = 0;
266
2001023e
MD
267 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
268 if (!perf_field) {
269 ret = -ENOMEM;
270 goto error_alloc_perf_field;
271 }
272 perf_field->e = events;
273 perf_field->attr = attr;
437d5aa5
MD
274 perf_field->name = name_alloc;
275 perf_field->event_field = event_field;
2001023e 276
437d5aa5
MD
277 ctx_field.event_field = event_field;
278 ctx_field.get_size = perf_counter_get_size;
279 ctx_field.record = perf_counter_record;
280 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
281 ctx_field.priv = perf_field;
8289661d 282
5f4c791e 283#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
284
285 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
286 ret = cpuhp_state_add_instance(lttng_hp_prepare,
287 &perf_field->cpuhp_prepare.node);
288 if (ret)
289 goto cpuhp_prepare_error;
290
291 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
292 ret = cpuhp_state_add_instance(lttng_hp_online,
293 &perf_field->cpuhp_online.node);
294 if (ret)
295 goto cpuhp_online_error;
296
5f4c791e 297#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
1e367326
MD
298 {
299 int cpu;
300
8289661d 301#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
302 perf_field->nb.notifier_call =
303 lttng_perf_counter_cpu_hp_callback;
304 perf_field->nb.priority = 0;
305 register_cpu_notifier(&perf_field->nb);
8289661d 306#endif
ffcc8734 307 lttng_cpus_read_lock();
1e367326 308 for_each_online_cpu(cpu) {
0e41488f
MJ
309 events[cpu] = perf_event_create_kernel_counter(attr,
310 cpu, NULL, overflow_callback, NULL);
1e367326
MD
311 if (!events[cpu] || IS_ERR(events[cpu])) {
312 ret = -EINVAL;
313 goto counter_error;
314 }
315 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
316 ret = -EBUSY;
317 goto counter_busy;
318 }
7b745a96 319 }
ffcc8734 320 lttng_cpus_read_unlock();
1e367326 321 perf_field->hp_enable = 1;
8289661d 322 }
5f4c791e 323#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
8289661d 324
437d5aa5
MD
325 ret = lttng_kernel_context_append(ctx, &ctx_field);
326 if (ret) {
327 ret = -ENOMEM;
328 goto append_context_error;
329 }
833ad6a0
MD
330 return 0;
331
437d5aa5
MD
332 /* Error handling. */
333append_context_error:
5f4c791e 334#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
1e367326
MD
335cpuhp_online_error:
336 {
337 int remove_ret;
338
339 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
340 &perf_field->cpuhp_prepare.node);
341 WARN_ON(remove_ret);
342 }
343cpuhp_prepare_error:
5f4c791e 344#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
7b745a96 345counter_busy:
8289661d 346counter_error:
ce4a2f0c
MD
347 {
348 int cpu;
349
350 for_each_online_cpu(cpu) {
351 if (events[cpu] && !IS_ERR(events[cpu]))
352 perf_event_release_kernel(events[cpu]);
353 }
ffcc8734 354 lttng_cpus_read_unlock();
8289661d 355#ifdef CONFIG_HOTPLUG_CPU
ce4a2f0c 356 unregister_cpu_notifier(&perf_field->nb);
8289661d 357#endif
ce4a2f0c 358 }
5f4c791e 359#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
2001023e
MD
360 kfree(perf_field);
361error_alloc_perf_field:
833ad6a0
MD
362 kfree(attr);
363error_attr:
48f5e0b5 364 lttng_kvfree(events);
437d5aa5
MD
365event_alloc_error:
366 kfree(event_field);
367event_field_alloc_error:
368 kfree(name_alloc);
369name_alloc_error:
833ad6a0
MD
370 return ret;
371}
This page took 0.074413 seconds and 4 git commands to generate.