instrumentation: lttng-test.h: remove unused include version.h
[lttng-modules.git] / lttng-context-perf-counters.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-context-perf-counters.c
4 *
5 * LTTng performance monitoring counters (perf-counters) integration module.
6 *
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <linux/mm.h>
17 #include <lttng-events.h>
18 #include <wrapper/ringbuffer/frontend_types.h>
19 #include <lttng-tracer.h>
20
21 static
22 size_t perf_counter_get_size(size_t offset)
23 {
24 size_t size = 0;
25
26 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
27 size += sizeof(uint64_t);
28 return size;
29 }
30
31 static
32 void perf_counter_record(struct lttng_ctx_field *field,
33 struct lib_ring_buffer_ctx *ctx,
34 struct lttng_channel *chan)
35 {
36 struct perf_event *event;
37 uint64_t value;
38
39 event = field->u.perf_counter->e[ctx->cpu];
40 if (likely(event)) {
41 if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) {
42 value = 0;
43 } else {
44 event->pmu->read(event);
45 value = local64_read(&event->count);
46 }
47 } else {
48 /*
49 * Perf chooses not to be clever and not to support enabling a
50 * perf counter before the cpu is brought up. Therefore, we need
51 * to support having events coming (e.g. scheduler events)
52 * before the counter is setup. Write an arbitrary 0 in this
53 * case.
54 */
55 value = 0;
56 }
57 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
58 chan->ops->event_write(ctx, &value, sizeof(value));
59 }
60
61 #if defined(CONFIG_PERF_EVENTS) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,99))
62 static
63 void overflow_callback(struct perf_event *event,
64 struct perf_sample_data *data,
65 struct pt_regs *regs)
66 {
67 }
68 #else
69 static
70 void overflow_callback(struct perf_event *event, int nmi,
71 struct perf_sample_data *data,
72 struct pt_regs *regs)
73 {
74 }
75 #endif
76
77 static
78 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
79 {
80 struct perf_event **events = field->u.perf_counter->e;
81
82 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
83 {
84 int ret;
85
86 ret = cpuhp_state_remove_instance(lttng_hp_online,
87 &field->u.perf_counter->cpuhp_online.node);
88 WARN_ON(ret);
89 ret = cpuhp_state_remove_instance(lttng_hp_prepare,
90 &field->u.perf_counter->cpuhp_prepare.node);
91 WARN_ON(ret);
92 }
93 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
94 {
95 int cpu;
96
97 get_online_cpus();
98 for_each_online_cpu(cpu)
99 perf_event_release_kernel(events[cpu]);
100 put_online_cpus();
101 #ifdef CONFIG_HOTPLUG_CPU
102 unregister_cpu_notifier(&field->u.perf_counter->nb);
103 #endif
104 }
105 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
106 kfree(field->event_field.name);
107 kfree(field->u.perf_counter->attr);
108 kvfree(events);
109 kfree(field->u.perf_counter);
110 }
111
112 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
113
114 int lttng_cpuhp_perf_counter_online(unsigned int cpu,
115 struct lttng_cpuhp_node *node)
116 {
117 struct lttng_perf_counter_field *perf_field =
118 container_of(node, struct lttng_perf_counter_field,
119 cpuhp_online);
120 struct perf_event **events = perf_field->e;
121 struct perf_event_attr *attr = perf_field->attr;
122 struct perf_event *pevent;
123
124 pevent = perf_event_create_kernel_counter(attr,
125 cpu, NULL, overflow_callback, NULL);
126 if (!pevent || IS_ERR(pevent))
127 return -EINVAL;
128 if (pevent->state == PERF_EVENT_STATE_ERROR) {
129 perf_event_release_kernel(pevent);
130 return -EINVAL;
131 }
132 barrier(); /* Create perf counter before setting event */
133 events[cpu] = pevent;
134 return 0;
135 }
136
137 int lttng_cpuhp_perf_counter_dead(unsigned int cpu,
138 struct lttng_cpuhp_node *node)
139 {
140 struct lttng_perf_counter_field *perf_field =
141 container_of(node, struct lttng_perf_counter_field,
142 cpuhp_prepare);
143 struct perf_event **events = perf_field->e;
144 struct perf_event *pevent;
145
146 pevent = events[cpu];
147 events[cpu] = NULL;
148 barrier(); /* NULLify event before perf counter teardown */
149 perf_event_release_kernel(pevent);
150 return 0;
151 }
152
153 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
154
155 #ifdef CONFIG_HOTPLUG_CPU
156
157 /**
158 * lttng_perf_counter_hp_callback - CPU hotplug callback
159 * @nb: notifier block
160 * @action: hotplug action to take
161 * @hcpu: CPU number
162 *
163 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
164 *
165 * We can setup perf counters when the cpu is online (up prepare seems to be too
166 * soon).
167 */
168 static
169 int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb,
170 unsigned long action,
171 void *hcpu)
172 {
173 unsigned int cpu = (unsigned long) hcpu;
174 struct lttng_perf_counter_field *perf_field =
175 container_of(nb, struct lttng_perf_counter_field, nb);
176 struct perf_event **events = perf_field->e;
177 struct perf_event_attr *attr = perf_field->attr;
178 struct perf_event *pevent;
179
180 if (!perf_field->hp_enable)
181 return NOTIFY_OK;
182
183 switch (action) {
184 case CPU_ONLINE:
185 case CPU_ONLINE_FROZEN:
186 pevent = perf_event_create_kernel_counter(attr,
187 cpu, NULL, overflow_callback, NULL);
188 if (!pevent || IS_ERR(pevent))
189 return NOTIFY_BAD;
190 if (pevent->state == PERF_EVENT_STATE_ERROR) {
191 perf_event_release_kernel(pevent);
192 return NOTIFY_BAD;
193 }
194 barrier(); /* Create perf counter before setting event */
195 events[cpu] = pevent;
196 break;
197 case CPU_UP_CANCELED:
198 case CPU_UP_CANCELED_FROZEN:
199 case CPU_DEAD:
200 case CPU_DEAD_FROZEN:
201 pevent = events[cpu];
202 events[cpu] = NULL;
203 barrier(); /* NULLify event before perf counter teardown */
204 perf_event_release_kernel(pevent);
205 break;
206 }
207 return NOTIFY_OK;
208 }
209
210 #endif
211
212 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
213
214 int lttng_add_perf_counter_to_ctx(uint32_t type,
215 uint64_t config,
216 const char *name,
217 struct lttng_ctx **ctx)
218 {
219 struct lttng_ctx_field *field;
220 struct lttng_perf_counter_field *perf_field;
221 struct perf_event **events;
222 struct perf_event_attr *attr;
223 int ret;
224 char *name_alloc;
225
226 events = kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
227 if (!events)
228 return -ENOMEM;
229
230 attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
231 if (!attr) {
232 ret = -ENOMEM;
233 goto error_attr;
234 }
235
236 attr->type = type;
237 attr->config = config;
238 attr->size = sizeof(struct perf_event_attr);
239 attr->pinned = 1;
240 attr->disabled = 0;
241
242 perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL);
243 if (!perf_field) {
244 ret = -ENOMEM;
245 goto error_alloc_perf_field;
246 }
247 perf_field->e = events;
248 perf_field->attr = attr;
249
250 name_alloc = kstrdup(name, GFP_KERNEL);
251 if (!name_alloc) {
252 ret = -ENOMEM;
253 goto name_alloc_error;
254 }
255
256 field = lttng_append_context(ctx);
257 if (!field) {
258 ret = -ENOMEM;
259 goto append_context_error;
260 }
261 if (lttng_find_context(*ctx, name_alloc)) {
262 ret = -EEXIST;
263 goto find_error;
264 }
265
266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
267
268 perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS;
269 ret = cpuhp_state_add_instance(lttng_hp_prepare,
270 &perf_field->cpuhp_prepare.node);
271 if (ret)
272 goto cpuhp_prepare_error;
273
274 perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS;
275 ret = cpuhp_state_add_instance(lttng_hp_online,
276 &perf_field->cpuhp_online.node);
277 if (ret)
278 goto cpuhp_online_error;
279
280 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
281 {
282 int cpu;
283
284 #ifdef CONFIG_HOTPLUG_CPU
285 perf_field->nb.notifier_call =
286 lttng_perf_counter_cpu_hp_callback;
287 perf_field->nb.priority = 0;
288 register_cpu_notifier(&perf_field->nb);
289 #endif
290 get_online_cpus();
291 for_each_online_cpu(cpu) {
292 events[cpu] = perf_event_create_kernel_counter(attr,
293 cpu, NULL, overflow_callback, NULL);
294 if (!events[cpu] || IS_ERR(events[cpu])) {
295 ret = -EINVAL;
296 goto counter_error;
297 }
298 if (events[cpu]->state == PERF_EVENT_STATE_ERROR) {
299 ret = -EBUSY;
300 goto counter_busy;
301 }
302 }
303 put_online_cpus();
304 perf_field->hp_enable = 1;
305 }
306 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
307
308 field->destroy = lttng_destroy_perf_counter_field;
309
310 field->event_field.name = name_alloc;
311 field->event_field.type.atype = atype_integer;
312 field->event_field.type.u.integer.size = sizeof(uint64_t) * CHAR_BIT;
313 field->event_field.type.u.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT;
314 field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint64_t);
315 field->event_field.type.u.integer.reverse_byte_order = 0;
316 field->event_field.type.u.integer.base = 10;
317 field->event_field.type.u.integer.encoding = lttng_encode_none;
318 field->get_size = perf_counter_get_size;
319 field->record = perf_counter_record;
320 field->u.perf_counter = perf_field;
321 lttng_context_update(*ctx);
322
323 return 0;
324
325 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
326 cpuhp_online_error:
327 {
328 int remove_ret;
329
330 remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare,
331 &perf_field->cpuhp_prepare.node);
332 WARN_ON(remove_ret);
333 }
334 cpuhp_prepare_error:
335 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
336 counter_busy:
337 counter_error:
338 {
339 int cpu;
340
341 for_each_online_cpu(cpu) {
342 if (events[cpu] && !IS_ERR(events[cpu]))
343 perf_event_release_kernel(events[cpu]);
344 }
345 put_online_cpus();
346 #ifdef CONFIG_HOTPLUG_CPU
347 unregister_cpu_notifier(&perf_field->nb);
348 #endif
349 }
350 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
351 find_error:
352 lttng_remove_context_field(ctx, field);
353 append_context_error:
354 kfree(name_alloc);
355 name_alloc_error:
356 kfree(perf_field);
357 error_alloc_perf_field:
358 kfree(attr);
359 error_attr:
360 kvfree(events);
361 return ret;
362 }
This page took 0.037629 seconds and 4 git commands to generate.