| 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
| 2 | * |
| 3 | * lttng-context-perf-counters.c |
| 4 | * |
| 5 | * LTTng performance monitoring counters (perf-counters) integration module. |
| 6 | * |
| 7 | * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/slab.h> |
| 12 | #include <linux/perf_event.h> |
| 13 | #include <linux/list.h> |
| 14 | #include <linux/string.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/mm.h> |
| 17 | #include <lttng-events.h> |
| 18 | #include <wrapper/ringbuffer/frontend_types.h> |
| 19 | #include <lttng-tracer.h> |
| 20 | |
| 21 | static |
| 22 | size_t perf_counter_get_size(size_t offset) |
| 23 | { |
| 24 | size_t size = 0; |
| 25 | |
| 26 | size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
| 27 | size += sizeof(uint64_t); |
| 28 | return size; |
| 29 | } |
| 30 | |
| 31 | static |
| 32 | void perf_counter_record(struct lttng_ctx_field *field, |
| 33 | struct lib_ring_buffer_ctx *ctx, |
| 34 | struct lttng_channel *chan) |
| 35 | { |
| 36 | struct perf_event *event; |
| 37 | uint64_t value; |
| 38 | |
| 39 | event = field->u.perf_counter->e[ctx->cpu]; |
| 40 | if (likely(event)) { |
| 41 | if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) { |
| 42 | value = 0; |
| 43 | } else { |
| 44 | event->pmu->read(event); |
| 45 | value = local64_read(&event->count); |
| 46 | } |
| 47 | } else { |
| 48 | /* |
| 49 | * Perf chooses not to be clever and not to support enabling a |
| 50 | * perf counter before the cpu is brought up. Therefore, we need |
| 51 | * to support having events coming (e.g. scheduler events) |
| 52 | * before the counter is setup. Write an arbitrary 0 in this |
| 53 | * case. |
| 54 | */ |
| 55 | value = 0; |
| 56 | } |
| 57 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(value)); |
| 58 | chan->ops->event_write(ctx, &value, sizeof(value)); |
| 59 | } |
| 60 | |
| 61 | static |
| 62 | void overflow_callback(struct perf_event *event, |
| 63 | struct perf_sample_data *data, |
| 64 | struct pt_regs *regs) |
| 65 | { |
| 66 | } |
| 67 | |
| 68 | static |
| 69 | void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field) |
| 70 | { |
| 71 | struct perf_event **events = field->u.perf_counter->e; |
| 72 | |
| 73 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
| 74 | { |
| 75 | int ret; |
| 76 | |
| 77 | ret = cpuhp_state_remove_instance(lttng_hp_online, |
| 78 | &field->u.perf_counter->cpuhp_online.node); |
| 79 | WARN_ON(ret); |
| 80 | ret = cpuhp_state_remove_instance(lttng_hp_prepare, |
| 81 | &field->u.perf_counter->cpuhp_prepare.node); |
| 82 | WARN_ON(ret); |
| 83 | } |
| 84 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 85 | { |
| 86 | int cpu; |
| 87 | |
| 88 | get_online_cpus(); |
| 89 | for_each_online_cpu(cpu) |
| 90 | perf_event_release_kernel(events[cpu]); |
| 91 | put_online_cpus(); |
| 92 | #ifdef CONFIG_HOTPLUG_CPU |
| 93 | unregister_cpu_notifier(&field->u.perf_counter->nb); |
| 94 | #endif |
| 95 | } |
| 96 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 97 | kfree(field->event_field.name); |
| 98 | kfree(field->u.perf_counter->attr); |
| 99 | kvfree(events); |
| 100 | kfree(field->u.perf_counter); |
| 101 | } |
| 102 | |
| 103 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
| 104 | |
| 105 | int lttng_cpuhp_perf_counter_online(unsigned int cpu, |
| 106 | struct lttng_cpuhp_node *node) |
| 107 | { |
| 108 | struct lttng_perf_counter_field *perf_field = |
| 109 | container_of(node, struct lttng_perf_counter_field, |
| 110 | cpuhp_online); |
| 111 | struct perf_event **events = perf_field->e; |
| 112 | struct perf_event_attr *attr = perf_field->attr; |
| 113 | struct perf_event *pevent; |
| 114 | |
| 115 | pevent = perf_event_create_kernel_counter(attr, |
| 116 | cpu, NULL, overflow_callback, NULL); |
| 117 | if (!pevent || IS_ERR(pevent)) |
| 118 | return -EINVAL; |
| 119 | if (pevent->state == PERF_EVENT_STATE_ERROR) { |
| 120 | perf_event_release_kernel(pevent); |
| 121 | return -EINVAL; |
| 122 | } |
| 123 | barrier(); /* Create perf counter before setting event */ |
| 124 | events[cpu] = pevent; |
| 125 | return 0; |
| 126 | } |
| 127 | |
| 128 | int lttng_cpuhp_perf_counter_dead(unsigned int cpu, |
| 129 | struct lttng_cpuhp_node *node) |
| 130 | { |
| 131 | struct lttng_perf_counter_field *perf_field = |
| 132 | container_of(node, struct lttng_perf_counter_field, |
| 133 | cpuhp_prepare); |
| 134 | struct perf_event **events = perf_field->e; |
| 135 | struct perf_event *pevent; |
| 136 | |
| 137 | pevent = events[cpu]; |
| 138 | events[cpu] = NULL; |
| 139 | barrier(); /* NULLify event before perf counter teardown */ |
| 140 | perf_event_release_kernel(pevent); |
| 141 | return 0; |
| 142 | } |
| 143 | |
| 144 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 145 | |
| 146 | #ifdef CONFIG_HOTPLUG_CPU |
| 147 | |
| 148 | /** |
| 149 | * lttng_perf_counter_hp_callback - CPU hotplug callback |
| 150 | * @nb: notifier block |
| 151 | * @action: hotplug action to take |
| 152 | * @hcpu: CPU number |
| 153 | * |
| 154 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) |
| 155 | * |
| 156 | * We can setup perf counters when the cpu is online (up prepare seems to be too |
| 157 | * soon). |
| 158 | */ |
| 159 | static |
| 160 | int lttng_perf_counter_cpu_hp_callback(struct notifier_block *nb, |
| 161 | unsigned long action, |
| 162 | void *hcpu) |
| 163 | { |
| 164 | unsigned int cpu = (unsigned long) hcpu; |
| 165 | struct lttng_perf_counter_field *perf_field = |
| 166 | container_of(nb, struct lttng_perf_counter_field, nb); |
| 167 | struct perf_event **events = perf_field->e; |
| 168 | struct perf_event_attr *attr = perf_field->attr; |
| 169 | struct perf_event *pevent; |
| 170 | |
| 171 | if (!perf_field->hp_enable) |
| 172 | return NOTIFY_OK; |
| 173 | |
| 174 | switch (action) { |
| 175 | case CPU_ONLINE: |
| 176 | case CPU_ONLINE_FROZEN: |
| 177 | pevent = perf_event_create_kernel_counter(attr, |
| 178 | cpu, NULL, overflow_callback, NULL); |
| 179 | if (!pevent || IS_ERR(pevent)) |
| 180 | return NOTIFY_BAD; |
| 181 | if (pevent->state == PERF_EVENT_STATE_ERROR) { |
| 182 | perf_event_release_kernel(pevent); |
| 183 | return NOTIFY_BAD; |
| 184 | } |
| 185 | barrier(); /* Create perf counter before setting event */ |
| 186 | events[cpu] = pevent; |
| 187 | break; |
| 188 | case CPU_UP_CANCELED: |
| 189 | case CPU_UP_CANCELED_FROZEN: |
| 190 | case CPU_DEAD: |
| 191 | case CPU_DEAD_FROZEN: |
| 192 | pevent = events[cpu]; |
| 193 | events[cpu] = NULL; |
| 194 | barrier(); /* NULLify event before perf counter teardown */ |
| 195 | perf_event_release_kernel(pevent); |
| 196 | break; |
| 197 | } |
| 198 | return NOTIFY_OK; |
| 199 | } |
| 200 | |
| 201 | #endif |
| 202 | |
| 203 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 204 | |
| 205 | int lttng_add_perf_counter_to_ctx(uint32_t type, |
| 206 | uint64_t config, |
| 207 | const char *name, |
| 208 | struct lttng_ctx **ctx) |
| 209 | { |
| 210 | struct lttng_ctx_field *field; |
| 211 | struct lttng_perf_counter_field *perf_field; |
| 212 | struct perf_event **events; |
| 213 | struct perf_event_attr *attr; |
| 214 | int ret; |
| 215 | char *name_alloc; |
| 216 | |
| 217 | events = kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL); |
| 218 | if (!events) |
| 219 | return -ENOMEM; |
| 220 | |
| 221 | attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL); |
| 222 | if (!attr) { |
| 223 | ret = -ENOMEM; |
| 224 | goto error_attr; |
| 225 | } |
| 226 | |
| 227 | attr->type = type; |
| 228 | attr->config = config; |
| 229 | attr->size = sizeof(struct perf_event_attr); |
| 230 | attr->pinned = 1; |
| 231 | attr->disabled = 0; |
| 232 | |
| 233 | perf_field = kzalloc(sizeof(struct lttng_perf_counter_field), GFP_KERNEL); |
| 234 | if (!perf_field) { |
| 235 | ret = -ENOMEM; |
| 236 | goto error_alloc_perf_field; |
| 237 | } |
| 238 | perf_field->e = events; |
| 239 | perf_field->attr = attr; |
| 240 | |
| 241 | name_alloc = kstrdup(name, GFP_KERNEL); |
| 242 | if (!name_alloc) { |
| 243 | ret = -ENOMEM; |
| 244 | goto name_alloc_error; |
| 245 | } |
| 246 | |
| 247 | field = lttng_append_context(ctx); |
| 248 | if (!field) { |
| 249 | ret = -ENOMEM; |
| 250 | goto append_context_error; |
| 251 | } |
| 252 | if (lttng_find_context(*ctx, name_alloc)) { |
| 253 | ret = -EEXIST; |
| 254 | goto find_error; |
| 255 | } |
| 256 | |
| 257 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
| 258 | |
| 259 | perf_field->cpuhp_prepare.component = LTTNG_CONTEXT_PERF_COUNTERS; |
| 260 | ret = cpuhp_state_add_instance(lttng_hp_prepare, |
| 261 | &perf_field->cpuhp_prepare.node); |
| 262 | if (ret) |
| 263 | goto cpuhp_prepare_error; |
| 264 | |
| 265 | perf_field->cpuhp_online.component = LTTNG_CONTEXT_PERF_COUNTERS; |
| 266 | ret = cpuhp_state_add_instance(lttng_hp_online, |
| 267 | &perf_field->cpuhp_online.node); |
| 268 | if (ret) |
| 269 | goto cpuhp_online_error; |
| 270 | |
| 271 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 272 | { |
| 273 | int cpu; |
| 274 | |
| 275 | #ifdef CONFIG_HOTPLUG_CPU |
| 276 | perf_field->nb.notifier_call = |
| 277 | lttng_perf_counter_cpu_hp_callback; |
| 278 | perf_field->nb.priority = 0; |
| 279 | register_cpu_notifier(&perf_field->nb); |
| 280 | #endif |
| 281 | get_online_cpus(); |
| 282 | for_each_online_cpu(cpu) { |
| 283 | events[cpu] = perf_event_create_kernel_counter(attr, |
| 284 | cpu, NULL, overflow_callback, NULL); |
| 285 | if (!events[cpu] || IS_ERR(events[cpu])) { |
| 286 | ret = -EINVAL; |
| 287 | goto counter_error; |
| 288 | } |
| 289 | if (events[cpu]->state == PERF_EVENT_STATE_ERROR) { |
| 290 | ret = -EBUSY; |
| 291 | goto counter_busy; |
| 292 | } |
| 293 | } |
| 294 | put_online_cpus(); |
| 295 | perf_field->hp_enable = 1; |
| 296 | } |
| 297 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 298 | |
| 299 | field->destroy = lttng_destroy_perf_counter_field; |
| 300 | |
| 301 | field->event_field.name = name_alloc; |
| 302 | field->event_field.type.atype = atype_integer; |
| 303 | field->event_field.type.u.integer.size = sizeof(uint64_t) * CHAR_BIT; |
| 304 | field->event_field.type.u.integer.alignment = lttng_alignof(uint64_t) * CHAR_BIT; |
| 305 | field->event_field.type.u.integer.signedness = lttng_is_signed_type(uint64_t); |
| 306 | field->event_field.type.u.integer.reverse_byte_order = 0; |
| 307 | field->event_field.type.u.integer.base = 10; |
| 308 | field->event_field.type.u.integer.encoding = lttng_encode_none; |
| 309 | field->get_size = perf_counter_get_size; |
| 310 | field->record = perf_counter_record; |
| 311 | field->u.perf_counter = perf_field; |
| 312 | lttng_context_update(*ctx); |
| 313 | |
| 314 | return 0; |
| 315 | |
| 316 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) |
| 317 | cpuhp_online_error: |
| 318 | { |
| 319 | int remove_ret; |
| 320 | |
| 321 | remove_ret = cpuhp_state_remove_instance(lttng_hp_prepare, |
| 322 | &perf_field->cpuhp_prepare.node); |
| 323 | WARN_ON(remove_ret); |
| 324 | } |
| 325 | cpuhp_prepare_error: |
| 326 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 327 | counter_busy: |
| 328 | counter_error: |
| 329 | { |
| 330 | int cpu; |
| 331 | |
| 332 | for_each_online_cpu(cpu) { |
| 333 | if (events[cpu] && !IS_ERR(events[cpu])) |
| 334 | perf_event_release_kernel(events[cpu]); |
| 335 | } |
| 336 | put_online_cpus(); |
| 337 | #ifdef CONFIG_HOTPLUG_CPU |
| 338 | unregister_cpu_notifier(&perf_field->nb); |
| 339 | #endif |
| 340 | } |
| 341 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ |
| 342 | find_error: |
| 343 | lttng_remove_context_field(ctx, field); |
| 344 | append_context_error: |
| 345 | kfree(name_alloc); |
| 346 | name_alloc_error: |
| 347 | kfree(perf_field); |
| 348 | error_alloc_perf_field: |
| 349 | kfree(attr); |
| 350 | error_attr: |
| 351 | kvfree(events); |
| 352 | return ret; |
| 353 | } |