1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
3 * lttng-context-perf-counters.c
5 * LTTng performance monitoring counters (perf-counters) integration module.
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/perf_event.h>
13 #include <linux/list.h>
14 #include <linux/string.h>
15 #include <linux/cpu.h>
16 #include <lttng-events.h>
17 #include <wrapper/ringbuffer/frontend_types.h>
18 #include <wrapper/cpu.h>
19 #include <wrapper/vmalloc.h>
20 #include <wrapper/perf.h>
21 #include <lttng-tracer.h>
24 size_t perf_counter_get_size(size_t offset
)
28 size
+= lib_ring_buffer_align(offset
, lttng_alignof(uint64_t));
29 size
+= sizeof(uint64_t);
34 void perf_counter_record(struct lttng_ctx_field
*field
,
35 struct lib_ring_buffer_ctx
*ctx
,
36 struct lttng_channel
*chan
)
38 struct perf_event
*event
;
41 event
= field
->u
.perf_counter
->e
[ctx
->cpu
];
43 if (unlikely(event
->state
== PERF_EVENT_STATE_ERROR
)) {
46 event
->pmu
->read(event
);
47 value
= local64_read(&event
->count
);
51 * Perf chooses not to be clever and not to support enabling a
52 * perf counter before the cpu is brought up. Therefore, we need
53 * to support having events coming (e.g. scheduler events)
54 * before the counter is setup. Write an arbitrary 0 in this
59 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(value
));
60 chan
->ops
->event_write(ctx
, &value
, sizeof(value
));
63 #if defined(CONFIG_PERF_EVENTS) && (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,1,0))
65 void overflow_callback(struct perf_event
*event
,
66 struct perf_sample_data
*data
,
72 void overflow_callback(struct perf_event
*event
, int nmi
,
73 struct perf_sample_data
*data
,
80 void lttng_destroy_perf_counter_field(struct lttng_ctx_field
*field
)
82 struct perf_event
**events
= field
->u
.perf_counter
->e
;
84 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
88 ret
= cpuhp_state_remove_instance(lttng_hp_online
,
89 &field
->u
.perf_counter
->cpuhp_online
.node
);
91 ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
92 &field
->u
.perf_counter
->cpuhp_prepare
.node
);
95 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
99 lttng_cpus_read_lock();
100 for_each_online_cpu(cpu
)
101 perf_event_release_kernel(events
[cpu
]);
102 lttng_cpus_read_unlock();
103 #ifdef CONFIG_HOTPLUG_CPU
104 unregister_cpu_notifier(&field
->u
.perf_counter
->nb
);
107 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
108 kfree(field
->event_field
.name
);
109 kfree(field
->u
.perf_counter
->attr
);
110 lttng_kvfree(events
);
111 kfree(field
->u
.perf_counter
);
114 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
116 int lttng_cpuhp_perf_counter_online(unsigned int cpu
,
117 struct lttng_cpuhp_node
*node
)
119 struct lttng_perf_counter_field
*perf_field
=
120 container_of(node
, struct lttng_perf_counter_field
,
122 struct perf_event
**events
= perf_field
->e
;
123 struct perf_event_attr
*attr
= perf_field
->attr
;
124 struct perf_event
*pevent
;
126 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
127 cpu
, NULL
, overflow_callback
);
128 if (!pevent
|| IS_ERR(pevent
))
130 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
131 perf_event_release_kernel(pevent
);
134 barrier(); /* Create perf counter before setting event */
135 events
[cpu
] = pevent
;
139 int lttng_cpuhp_perf_counter_dead(unsigned int cpu
,
140 struct lttng_cpuhp_node
*node
)
142 struct lttng_perf_counter_field
*perf_field
=
143 container_of(node
, struct lttng_perf_counter_field
,
145 struct perf_event
**events
= perf_field
->e
;
146 struct perf_event
*pevent
;
148 pevent
= events
[cpu
];
150 barrier(); /* NULLify event before perf counter teardown */
151 perf_event_release_kernel(pevent
);
155 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
157 #ifdef CONFIG_HOTPLUG_CPU
160 * lttng_perf_counter_hp_callback - CPU hotplug callback
161 * @nb: notifier block
162 * @action: hotplug action to take
165 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
167 * We can setup perf counters when the cpu is online (up prepare seems to be too
171 int lttng_perf_counter_cpu_hp_callback(struct notifier_block
*nb
,
172 unsigned long action
,
175 unsigned int cpu
= (unsigned long) hcpu
;
176 struct lttng_perf_counter_field
*perf_field
=
177 container_of(nb
, struct lttng_perf_counter_field
, nb
);
178 struct perf_event
**events
= perf_field
->e
;
179 struct perf_event_attr
*attr
= perf_field
->attr
;
180 struct perf_event
*pevent
;
182 if (!perf_field
->hp_enable
)
187 case CPU_ONLINE_FROZEN
:
188 pevent
= wrapper_perf_event_create_kernel_counter(attr
,
189 cpu
, NULL
, overflow_callback
);
190 if (!pevent
|| IS_ERR(pevent
))
192 if (pevent
->state
== PERF_EVENT_STATE_ERROR
) {
193 perf_event_release_kernel(pevent
);
196 barrier(); /* Create perf counter before setting event */
197 events
[cpu
] = pevent
;
199 case CPU_UP_CANCELED
:
200 case CPU_UP_CANCELED_FROZEN
:
202 case CPU_DEAD_FROZEN
:
203 pevent
= events
[cpu
];
205 barrier(); /* NULLify event before perf counter teardown */
206 perf_event_release_kernel(pevent
);
214 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
216 int lttng_add_perf_counter_to_ctx(uint32_t type
,
219 struct lttng_ctx
**ctx
)
221 struct lttng_ctx_field
*field
;
222 struct lttng_perf_counter_field
*perf_field
;
223 struct perf_event
**events
;
224 struct perf_event_attr
*attr
;
228 events
= lttng_kvzalloc(num_possible_cpus() * sizeof(*events
), GFP_KERNEL
);
232 attr
= kzalloc(sizeof(struct perf_event_attr
), GFP_KERNEL
);
239 attr
->config
= config
;
240 attr
->size
= sizeof(struct perf_event_attr
);
244 perf_field
= kzalloc(sizeof(struct lttng_perf_counter_field
), GFP_KERNEL
);
247 goto error_alloc_perf_field
;
249 perf_field
->e
= events
;
250 perf_field
->attr
= attr
;
252 name_alloc
= kstrdup(name
, GFP_KERNEL
);
255 goto name_alloc_error
;
258 field
= lttng_append_context(ctx
);
261 goto append_context_error
;
263 if (lttng_find_context(*ctx
, name_alloc
)) {
268 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
270 perf_field
->cpuhp_prepare
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
271 ret
= cpuhp_state_add_instance(lttng_hp_prepare
,
272 &perf_field
->cpuhp_prepare
.node
);
274 goto cpuhp_prepare_error
;
276 perf_field
->cpuhp_online
.component
= LTTNG_CONTEXT_PERF_COUNTERS
;
277 ret
= cpuhp_state_add_instance(lttng_hp_online
,
278 &perf_field
->cpuhp_online
.node
);
280 goto cpuhp_online_error
;
282 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
286 #ifdef CONFIG_HOTPLUG_CPU
287 perf_field
->nb
.notifier_call
=
288 lttng_perf_counter_cpu_hp_callback
;
289 perf_field
->nb
.priority
= 0;
290 register_cpu_notifier(&perf_field
->nb
);
292 lttng_cpus_read_lock();
293 for_each_online_cpu(cpu
) {
294 events
[cpu
] = wrapper_perf_event_create_kernel_counter(attr
,
295 cpu
, NULL
, overflow_callback
);
296 if (!events
[cpu
] || IS_ERR(events
[cpu
])) {
300 if (events
[cpu
]->state
== PERF_EVENT_STATE_ERROR
) {
305 lttng_cpus_read_unlock();
306 perf_field
->hp_enable
= 1;
308 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
310 field
->destroy
= lttng_destroy_perf_counter_field
;
312 field
->event_field
.name
= name_alloc
;
313 field
->event_field
.type
.atype
= atype_integer
;
314 field
->event_field
.type
.u
.basic
.integer
.size
= sizeof(uint64_t) * CHAR_BIT
;
315 field
->event_field
.type
.u
.basic
.integer
.alignment
= lttng_alignof(uint64_t) * CHAR_BIT
;
316 field
->event_field
.type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(uint64_t);
317 field
->event_field
.type
.u
.basic
.integer
.reverse_byte_order
= 0;
318 field
->event_field
.type
.u
.basic
.integer
.base
= 10;
319 field
->event_field
.type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
320 field
->get_size
= perf_counter_get_size
;
321 field
->record
= perf_counter_record
;
322 field
->u
.perf_counter
= perf_field
;
323 lttng_context_update(*ctx
);
325 wrapper_vmalloc_sync_mappings();
328 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
333 remove_ret
= cpuhp_state_remove_instance(lttng_hp_prepare
,
334 &perf_field
->cpuhp_prepare
.node
);
338 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
344 for_each_online_cpu(cpu
) {
345 if (events
[cpu
] && !IS_ERR(events
[cpu
]))
346 perf_event_release_kernel(events
[cpu
]);
348 lttng_cpus_read_unlock();
349 #ifdef CONFIG_HOTPLUG_CPU
350 unregister_cpu_notifier(&perf_field
->nb
);
353 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
355 lttng_remove_context_field(ctx
, field
);
356 append_context_error
:
360 error_alloc_perf_field
:
363 lttng_kvfree(events
);