lttng-abi: Document ioctl numbers reserved by lttng-abi-old.h
[lttng-modules.git] / lttng-context-perf-counters.c
CommitLineData
9f36eaed
MJ
1/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
886d51a3 3 * lttng-context-perf-counters.c
833ad6a0
MD
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
886d51a3 7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
833ad6a0
MD
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/perf_event.h>
13#include <linux/list.h>
c24a0d71 14#include <linux/string.h>
5ca7b8a3 15#include <linux/cpu.h>
241ae9a8
MD
16#include <lttng-events.h>
17#include <wrapper/ringbuffer/frontend_types.h>
18#include <wrapper/vmalloc.h>
19#include <wrapper/perf.h>
20#include <lttng-tracer.h>
833ad6a0 21
f1676205
MD
22static
23size_t perf_counter_get_size(size_t offset)
24{
25 size_t size = 0;
26
a90917c3 27 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
f1676205
MD
28 size += sizeof(uint64_t);
29 return size;
30}
31
833ad6a0
MD
32static
33void perf_counter_record(struct lttng_ctx_field *field,
34 struct lib_ring_buffer_ctx *ctx,
a90917c3 35 struct lttng_channel *chan)
833ad6a0
MD
36{
37 struct perf_event *event;
38 uint64_t value;
39
2001023e 40 event = field->u.perf_counter->e[ctx->cpu];
0478c519 41 if (likely(event)) {
7b745a96
MD
42 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
43 value = 0;
44 } else {
45 event->pmu->read(event);
46 value = local64_read(&event->count);
47 }
f91fd73b
MD
48 } else {
49 /*
50 * Perf chooses not to be clever and not to support enabling a
51 * perf counter before the cpu is brought up. Therefore, we need
52 * to support having events coming (e.g. scheduler events)
53 * before the counter is setup. Write an arbitrary 0 in this
54 * case.
55 */
56 value = 0;
57 }
a90917c3 58 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
833ad6a0
MD
59 chan->ops->event_write(ctx, &value, sizeof(value));
60}
61
90f5546c
MD
62#if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
63static
64void overflow_callback(struct perf_event *event,
65 struct perf_sample_data *data,
66 struct pt_regs *regs)
67{
68}
69#else
833ad6a0
MD
70static
71void overflow_callback(struct perf_event *event, int nmi,
72 struct perf_sample_data *data,
73 struct pt_regs *regs)
74{
75}
90f5546c 76#endif
833ad6a0 77
2dccf128
MD
78static
79void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
80{
2001023e 81 struct perf_event **events = field->u.perf_counter->e;
2dccf128 82
1e367326
MD
83#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
84 {
85 int ret;
86
87 ret = cpuhp_state_remove_instance(lttng_hp_online,
88 &field->u.perf_counter->cpuhp_online.node);
89 WARN_ON(ret);
90 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
91 &field->u.perf_counter->cpuhp_prepare.node);
92 WARN_ON(ret);
93 }
94#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
95 {
96 int cpu;
97
98 get_online_cpus();
99 for_each_online_cpu(cpu)
100 perf_event_release_kernel(events[cpu]);
101 put_online_cpus();
8289661d 102#ifdef CONFIG_HOTPLUG_CPU
1e367326 103 unregister_cpu_notifier(&field->u.perf_counter->nb);
8289661d 104#endif
1e367326
MD
105 }
106#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
c24a0d71 107 kfree(field->event_field.name);
2001023e 108 kfree(field->u.perf_counter->attr);
48f5e0b5 109 lttng_kvfree(events);
2001023e 110 kfree(field->u.perf_counter);
2dccf128
MD
111}
112
1e367326
MD
113#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
114
115int lttng_cpuhp_perf_counter_online(unsigned int cpu,
116 struct lttng_cpuhp_node *node)
117{
118 struct lttng_perf_counter_field *perf_field =
119 container_of(node, struct lttng_perf_counter_field,
120 cpuhp_online);
121 struct perf_event **events = perf_field->e;
122 struct perf_event_attr *attr = perf_field->attr;
123 struct perf_event *pevent;
124
125 pevent = wrapper_perf_event_create_kernel_counter(attr,
126 cpu, NULL, overflow_callback);
127 if (!pevent || IS_ERR(pevent))
128 return -EINVAL;
129 if (pevent->state == PERF_EVENT_STATE_ERROR) {
130 perf_event_release_kernel(pevent);
131 return -EINVAL;
132 }
133 barrier(); /* Create perf counter before setting event */
134 events[cpu] = pevent;
135 return 0;
136}
137
138int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
139 struct lttng_cpuhp_node *node)
140{
141 struct lttng_perf_counter_field *perf_field =
142 container_of(node, struct lttng_perf_counter_field,
143 cpuhp_prepare);
144 struct perf_event **events = perf_field->e;
145 struct perf_event *pevent;
146
147 pevent = events[cpu];
148 events[cpu] = NULL;
149 barrier(); /* NULLify event before perf counter teardown */
150 perf_event_release_kernel(pevent);
151 return 0;
152}
153
154#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
155
8289661d
MD
156#ifdef CONFIG_HOTPLUG_CPU
157
158/**
159 * lttng_perf_counter_hp_callback - CPU hotplug callback
160 * @nb: notifier block
161 * @action: hotplug action to take
162 * @hcpu: CPU number
163 *
164 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
165 *
166 * We can setup perf counters when the cpu is online (up prepare seems to be too
167 * soon).
168 */
169static
e8f071d5 170int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
8289661d
MD
171 unsigned long action,
172 void *hcpu)
173{
174 unsigned int cpu = (unsigned long) hcpu;
2001023e
MD
175 struct lttng_perf_counter_field *perf_field =
176 container_of(nb, struct lttng_perf_counter_field, nb);
177 struct perf_event **events = perf_field->e;
178 struct perf_event_attr *attr = perf_field->attr;
f91fd73b 179 struct perf_event *pevent;
8289661d 180
2001023e 181 if (!perf_field->hp_enable)
8289661d
MD
182 return NOTIFY_OK;
183
184 switch (action) {
185 case CPU_ONLINE:
186 case CPU_ONLINE_FROZEN:
90f5546c 187 pevent = wrapper_perf_event_create_kernel_counter(attr,
8289661d 188 cpu, NULL, overflow_callback);
0478c519 189 if (!pevent || IS_ERR(pevent))
8289661d 190 return NOTIFY_BAD;
7b745a96
MD
191 if (pevent->state == PERF_EVENT_STATE_ERROR) {
192 perf_event_release_kernel(pevent);
193 return NOTIFY_BAD;
194 }
f91fd73b
MD
195 barrier(); /* Create perf counter before setting event */
196 events[cpu] = pevent;
8289661d
MD
197 break;
198 case CPU_UP_CANCELED:
199 case CPU_UP_CANCELED_FROZEN:
200 case CPU_DEAD:
201 case CPU_DEAD_FROZEN:
f91fd73b
MD
202 pevent = events[cpu];
203 events[cpu] = NULL;
204 barrier(); /* NULLify event before perf counter teardown */
205 perf_event_release_kernel(pevent);
8289661d
MD
206 break;
207 }
208 return NOTIFY_OK;
209}
210
211#endif
212
1e367326
MD
213#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
214
833ad6a0
MD
215int lttng_add_perf_counter_to_ctx(uint32_t type,
216 uint64_t config,
c24a0d71 217 const char *name,
2dccf128 218 struct lttng_ctx **ctx)
833ad6a0
MD
219{
220 struct lttng_ctx_field *field;
2001023e 221 struct lttng_perf_counter_field *perf_field;
833ad6a0
MD
222 struct perf_event **events;
223 struct perf_event_attr *attr;
224 int ret;
c24a0d71 225 char *name_alloc;
833ad6a0 226
48f5e0b5 227 events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
833ad6a0
MD
228 if (!events)
229 return -ENOMEM;
230
2001023e 231 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
833ad6a0
MD
232 if (!attr) {
233 ret = -ENOMEM;
234 goto error_attr;
235 }
236
237 attr->type = type;
238 attr->config = config;
239 attr->size = sizeof(struct perf_event_attr);
240 attr->pinned = 1;
241 attr->disabled = 0;
242
2001023e
MD
243 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
244 if (!perf_field) {
245 ret = -ENOMEM;
246 goto error_alloc_perf_field;
247 }
248 perf_field->e = events;
249 perf_field->attr = attr;
250
c24a0d71 251 name_alloc = kstrdup(name, GFP_KERNEL);
bef96e48
MD
252 if (!name_alloc) {
253 ret = -ENOMEM;
c24a0d71 254 goto name_alloc_error;
bef96e48 255 }
c24a0d71 256
2dccf128
MD
257 field = lttng_append_context(ctx);
258 if (!field) {
259 ret = -ENOMEM;
8289661d 260 goto append_context_error;
833ad6a0 261 }
2001023e 262 if (lttng_find_context(*ctx, name_alloc)) {
44252f0f
MD
263 ret = -EEXIST;
264 goto find_error;
265 }
8289661d 266
1e367326
MD
267#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
268
269 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
270 ret = cpuhp_state_add_instance(lttng_hp_prepare,
271 &perf_field->cpuhp_prepare.node);
272 if (ret)
273 goto cpuhp_prepare_error;
274
275 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
276 ret = cpuhp_state_add_instance(lttng_hp_online,
277 &perf_field->cpuhp_online.node);
278 if (ret)
279 goto cpuhp_online_error;
280
281#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
282 {
283 int cpu;
284
8289661d 285#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
286 perf_field->nb.notifier_call =
287 lttng_perf_counter_cpu_hp_callback;
288 perf_field->nb.priority = 0;
289 register_cpu_notifier(&perf_field->nb);
8289661d 290#endif
1e367326
MD
291 get_online_cpus();
292 for_each_online_cpu(cpu) {
293 events[cpu] = wrapper_perf_event_create_kernel_counter(attr,
294 cpu, NULL, overflow_callback);
295 if (!events[cpu] || IS_ERR(events[cpu])) {
296 ret = -EINVAL;
297 goto counter_error;
298 }
299 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
300 ret = -EBUSY;
301 goto counter_busy;
302 }
7b745a96 303 }
1e367326
MD
304 put_online_cpus();
305 perf_field->hp_enable = 1;
8289661d 306 }
1e367326 307#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
8289661d 308
2dccf128 309 field->destroy = lttng_destroy_perf_counter_field;
833ad6a0 310
c24a0d71 311 field->event_field.name = name_alloc;
8070f5c0 312 field->event_field.type.atype = atype_integer;
9d7d747f 313 field->event_field.type.u.basic.integer.size = sizeof(uint64_t) * CHAR_BIT;
a90917c3 314 field->event_field.type.u.basic.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
06254b0f 315 field->event_field.type.u.basic.integer.signedness = lttng_is_signed_type(uint64_t);
8070f5c0
MD
316 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
317 field->event_field.type.u.basic.integer.base = 10;
318 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
f1676205
MD
319 field->get_size = perf_counter_get_size;
320 field->record = perf_counter_record;
2001023e 321 field->u.perf_counter = perf_field;
a9dd15da 322 lttng_context_update(*ctx);
833ad6a0
MD
323
324 wrapper_vmalloc_sync_all();
325 return 0;
326
1e367326
MD
327#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
328cpuhp_online_error:
329 {
330 int remove_ret;
331
332 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
333 &perf_field->cpuhp_prepare.node);
334 WARN_ON(remove_ret);
335 }
336cpuhp_prepare_error:
337#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
7b745a96 338counter_busy:
8289661d 339counter_error:
ce4a2f0c
MD
340 {
341 int cpu;
342
343 for_each_online_cpu(cpu) {
344 if (events[cpu] && !IS_ERR(events[cpu]))
345 perf_event_release_kernel(events[cpu]);
346 }
347 put_online_cpus();
8289661d 348#ifdef CONFIG_HOTPLUG_CPU
ce4a2f0c 349 unregister_cpu_notifier(&perf_field->nb);
8289661d 350#endif
ce4a2f0c 351 }
1e367326 352#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
44252f0f 353find_error:
8289661d
MD
354 lttng_remove_context_field(ctx, field);
355append_context_error:
356 kfree(name_alloc);
357name_alloc_error:
2001023e
MD
358 kfree(perf_field);
359error_alloc_perf_field:
833ad6a0
MD
360 kfree(attr);
361error_attr:
48f5e0b5 362 lttng_kvfree(events);
833ad6a0
MD
363 return ret;
364}
This page took 0.050535 seconds and 4 git commands to generate.