| 1 | /* |
| 2 | * SPDX-License-Identifier: LGPL-2.1-only |
| 3 | * |
| 4 | * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 5 | * |
| 6 | * LTTng UST performance monitoring counters (perf-counters) integration. |
| 7 | */ |
| 8 | |
| 9 | #define _LGPL_SOURCE |
| 10 | #include <limits.h> |
| 11 | #include <sys/types.h> |
| 12 | #include <unistd.h> |
| 13 | #include <string.h> |
| 14 | #include <stdlib.h> |
| 15 | #include <stdio.h> |
| 16 | #include <stdbool.h> |
| 17 | #include <stddef.h> |
| 18 | #include <stdint.h> |
| 19 | #include <sys/mman.h> |
| 20 | #include <sys/syscall.h> |
| 21 | #include <lttng/ust-arch.h> |
| 22 | #include <lttng/ust-events.h> |
| 23 | #include <lttng/ust-tracer.h> |
| 24 | #include <lttng/ust-ringbuffer-context.h> |
| 25 | #include <lttng/ust-cancelstate.h> |
| 26 | #include <urcu/system.h> |
| 27 | #include <urcu/arch.h> |
| 28 | #include <urcu/rculist.h> |
| 29 | #include "common/macros.h" |
| 30 | #include <urcu/ref.h> |
| 31 | #include "common/logging.h" |
| 32 | #include <signal.h> |
| 33 | #include <urcu/tls-compat.h> |
| 34 | #include "perf_event.h" |
| 35 | |
| 36 | #include "context-internal.h" |
| 37 | #include "lttng-tracer-core.h" |
| 38 | #include "lib/lttng-ust/events.h" |
| 39 | |
| 40 | /* |
| 41 | * We use a global perf counter key and iterate on per-thread RCU lists |
| 42 | * of fields in the fast path, even though this is not strictly speaking |
| 43 | * what would provide the best fast-path complexity, to ensure teardown |
| 44 | * of sessions vs thread exit is handled racelessly. |
| 45 | * |
| 46 | * Updates and traversals of thread_list are protected by UST lock. |
| 47 | * Updates to rcu_field_list are protected by UST lock. |
| 48 | */ |
| 49 | |
| 50 | struct lttng_perf_counter_thread_field { |
| 51 | struct lttng_perf_counter_field *field; /* Back reference */ |
| 52 | struct perf_event_mmap_page *pc; |
| 53 | struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */ |
| 54 | struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */ |
| 55 | int fd; /* Perf FD */ |
| 56 | }; |
| 57 | |
| 58 | struct lttng_perf_counter_thread { |
| 59 | struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */ |
| 60 | }; |
| 61 | |
| 62 | struct lttng_perf_counter_field { |
| 63 | struct perf_event_attr attr; |
| 64 | struct cds_list_head thread_field_list; /* Per-field list of thread fields */ |
| 65 | char *name; |
| 66 | struct lttng_ust_event_field *event_field; |
| 67 | }; |
| 68 | |
| 69 | static pthread_key_t perf_counter_key; |
| 70 | |
| 71 | /* |
| 72 | * lttng_perf_lock - Protect lttng-ust perf counter data structures |
| 73 | * |
| 74 | * Nests within the ust_lock, and therefore within the libc dl lock. |
| 75 | * Therefore, we need to allocate the TLS before nesting into this lock. |
| 76 | * Nests inside RCU bp read-side lock. Protects against concurrent |
| 77 | * fork. |
| 78 | */ |
| 79 | static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 80 | |
| 81 | /* |
| 82 | * Track whether we are tracing from a signal handler nested on an |
| 83 | * application thread. |
| 84 | */ |
| 85 | static DEFINE_URCU_TLS(int, ust_perf_mutex_nest); |
| 86 | |
| 87 | /* |
| 88 | * Force a read (imply TLS allocation for dlopen) of TLS variables. |
| 89 | */ |
| 90 | void lttng_ust_perf_counter_init_thread(int flags) |
| 91 | { |
| 92 | asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest))); |
| 93 | (void)flags; |
| 94 | } |
| 95 | |
| 96 | void lttng_perf_lock(void) |
| 97 | { |
| 98 | sigset_t sig_all_blocked, orig_mask; |
| 99 | int ret; |
| 100 | |
| 101 | if (lttng_ust_cancelstate_disable_push()) { |
| 102 | ERR("lttng_ust_cancelstate_disable_push"); |
| 103 | } |
| 104 | sigfillset(&sig_all_blocked); |
| 105 | ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); |
| 106 | if (ret) { |
| 107 | ERR("pthread_sigmask: %s", strerror(ret)); |
| 108 | } |
| 109 | if (!URCU_TLS(ust_perf_mutex_nest)++) { |
| 110 | /* |
| 111 | * Ensure the compiler don't move the store after the close() |
| 112 | * call in case close() would be marked as leaf. |
| 113 | */ |
| 114 | cmm_barrier(); |
| 115 | pthread_mutex_lock(&ust_perf_mutex); |
| 116 | } |
| 117 | ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); |
| 118 | if (ret) { |
| 119 | ERR("pthread_sigmask: %s", strerror(ret)); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | void lttng_perf_unlock(void) |
| 124 | { |
| 125 | sigset_t sig_all_blocked, orig_mask; |
| 126 | int ret; |
| 127 | |
| 128 | sigfillset(&sig_all_blocked); |
| 129 | ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask); |
| 130 | if (ret) { |
| 131 | ERR("pthread_sigmask: %s", strerror(ret)); |
| 132 | } |
| 133 | /* |
| 134 | * Ensure the compiler don't move the store before the close() |
| 135 | * call, in case close() would be marked as leaf. |
| 136 | */ |
| 137 | cmm_barrier(); |
| 138 | if (!--URCU_TLS(ust_perf_mutex_nest)) { |
| 139 | pthread_mutex_unlock(&ust_perf_mutex); |
| 140 | } |
| 141 | ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL); |
| 142 | if (ret) { |
| 143 | ERR("pthread_sigmask: %s", strerror(ret)); |
| 144 | } |
| 145 | if (lttng_ust_cancelstate_disable_pop()) { |
| 146 | ERR("lttng_ust_cancelstate_disable_pop"); |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | static |
| 151 | size_t perf_counter_get_size(void *priv __attribute__((unused)), |
| 152 | struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)), |
| 153 | size_t offset) |
| 154 | { |
| 155 | size_t size = 0; |
| 156 | |
| 157 | size += lttng_ust_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t)); |
| 158 | size += sizeof(uint64_t); |
| 159 | return size; |
| 160 | } |
| 161 | |
| 162 | static |
| 163 | uint64_t read_perf_counter_syscall( |
| 164 | struct lttng_perf_counter_thread_field *thread_field) |
| 165 | { |
| 166 | uint64_t count; |
| 167 | |
| 168 | if (caa_unlikely(thread_field->fd < 0)) |
| 169 | return 0; |
| 170 | |
| 171 | if (caa_unlikely(read(thread_field->fd, &count, sizeof(count)) |
| 172 | < sizeof(count))) |
| 173 | return 0; |
| 174 | |
| 175 | return count; |
| 176 | } |
| 177 | |
| 178 | #if defined(LTTNG_UST_ARCH_X86) |
| 179 | |
| 180 | static |
| 181 | uint64_t rdpmc(unsigned int counter) |
| 182 | { |
| 183 | unsigned int low, high; |
| 184 | |
| 185 | asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter)); |
| 186 | |
| 187 | return low | ((uint64_t) high) << 32; |
| 188 | } |
| 189 | |
| 190 | static |
| 191 | bool has_rdpmc(struct perf_event_mmap_page *pc) |
| 192 | { |
| 193 | if (caa_unlikely(!pc->cap_bit0_is_deprecated)) |
| 194 | return false; |
| 195 | /* Since Linux kernel 3.12. */ |
| 196 | return pc->cap_user_rdpmc; |
| 197 | } |
| 198 | |
| 199 | static |
| 200 | uint64_t arch_read_perf_counter( |
| 201 | struct lttng_perf_counter_thread_field *thread_field) |
| 202 | { |
| 203 | uint32_t seq, idx; |
| 204 | uint64_t count; |
| 205 | struct perf_event_mmap_page *pc = thread_field->pc; |
| 206 | |
| 207 | if (caa_unlikely(!pc)) |
| 208 | return 0; |
| 209 | |
| 210 | do { |
| 211 | seq = CMM_LOAD_SHARED(pc->lock); |
| 212 | cmm_barrier(); |
| 213 | |
| 214 | idx = pc->index; |
| 215 | if (caa_likely(has_rdpmc(pc) && idx)) { |
| 216 | int64_t pmcval; |
| 217 | |
| 218 | pmcval = rdpmc(idx - 1); |
| 219 | /* Sign-extend the pmc register result. */ |
| 220 | pmcval <<= 64 - pc->pmc_width; |
| 221 | pmcval >>= 64 - pc->pmc_width; |
| 222 | count = pc->offset + pmcval; |
| 223 | } else { |
| 224 | /* Fall-back on system call if rdpmc cannot be used. */ |
| 225 | return read_perf_counter_syscall(thread_field); |
| 226 | } |
| 227 | cmm_barrier(); |
| 228 | } while (CMM_LOAD_SHARED(pc->lock) != seq); |
| 229 | |
| 230 | return count; |
| 231 | } |
| 232 | |
| 233 | static |
| 234 | int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field) |
| 235 | { |
| 236 | struct perf_event_mmap_page *pc = thread_field->pc; |
| 237 | |
| 238 | if (!pc) |
| 239 | return 0; |
| 240 | return !has_rdpmc(pc); |
| 241 | } |
| 242 | |
| 243 | #else |
| 244 | |
| 245 | /* Generic (slow) implementation using a read system call. */ |
| 246 | static |
| 247 | uint64_t arch_read_perf_counter( |
| 248 | struct lttng_perf_counter_thread_field *thread_field) |
| 249 | { |
| 250 | return read_perf_counter_syscall(thread_field); |
| 251 | } |
| 252 | |
| 253 | static |
| 254 | int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field __attribute__((unused))) |
| 255 | { |
| 256 | return 1; |
| 257 | } |
| 258 | |
| 259 | #endif |
| 260 | |
| 261 | static |
| 262 | int sys_perf_event_open(struct perf_event_attr *attr, |
| 263 | pid_t pid, int cpu, int group_fd, |
| 264 | unsigned long flags) |
| 265 | { |
| 266 | return syscall(SYS_perf_event_open, attr, pid, cpu, |
| 267 | group_fd, flags); |
| 268 | } |
| 269 | |
| 270 | static |
| 271 | int open_perf_fd(struct perf_event_attr *attr) |
| 272 | { |
| 273 | int fd; |
| 274 | |
| 275 | fd = sys_perf_event_open(attr, 0, -1, -1, 0); |
| 276 | if (fd < 0) |
| 277 | return -1; |
| 278 | |
| 279 | return fd; |
| 280 | } |
| 281 | |
| 282 | static |
| 283 | void close_perf_fd(int fd) |
| 284 | { |
| 285 | int ret; |
| 286 | |
| 287 | if (fd < 0) |
| 288 | return; |
| 289 | |
| 290 | ret = close(fd); |
| 291 | if (ret) { |
| 292 | perror("Error closing LTTng-UST perf memory mapping FD"); |
| 293 | } |
| 294 | } |
| 295 | |
| 296 | static void setup_perf(struct lttng_perf_counter_thread_field *thread_field) |
| 297 | { |
| 298 | void *perf_addr; |
| 299 | |
| 300 | perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page), |
| 301 | PROT_READ, MAP_SHARED, thread_field->fd, 0); |
| 302 | if (perf_addr == MAP_FAILED) |
| 303 | perf_addr = NULL; |
| 304 | thread_field->pc = perf_addr; |
| 305 | |
| 306 | if (!arch_perf_keep_fd(thread_field)) { |
| 307 | close_perf_fd(thread_field->fd); |
| 308 | thread_field->fd = -1; |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | static |
| 313 | void unmap_perf_page(struct perf_event_mmap_page *pc) |
| 314 | { |
| 315 | int ret; |
| 316 | |
| 317 | if (!pc) |
| 318 | return; |
| 319 | ret = munmap(pc, sizeof(struct perf_event_mmap_page)); |
| 320 | if (ret < 0) { |
| 321 | PERROR("Error in munmap"); |
| 322 | abort(); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | static |
| 327 | struct lttng_perf_counter_thread *alloc_perf_counter_thread(void) |
| 328 | { |
| 329 | struct lttng_perf_counter_thread *perf_thread; |
| 330 | sigset_t newmask, oldmask; |
| 331 | int ret; |
| 332 | |
| 333 | ret = sigfillset(&newmask); |
| 334 | if (ret) |
| 335 | abort(); |
| 336 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
| 337 | if (ret) |
| 338 | abort(); |
| 339 | /* Check again with signals disabled */ |
| 340 | perf_thread = pthread_getspecific(perf_counter_key); |
| 341 | if (perf_thread) |
| 342 | goto skip; |
| 343 | perf_thread = zmalloc(sizeof(*perf_thread)); |
| 344 | if (!perf_thread) |
| 345 | abort(); |
| 346 | CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list); |
| 347 | ret = pthread_setspecific(perf_counter_key, perf_thread); |
| 348 | if (ret) |
| 349 | abort(); |
| 350 | skip: |
| 351 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
| 352 | if (ret) |
| 353 | abort(); |
| 354 | return perf_thread; |
| 355 | } |
| 356 | |
| 357 | static |
| 358 | struct lttng_perf_counter_thread_field * |
| 359 | add_thread_field(struct lttng_perf_counter_field *perf_field, |
| 360 | struct lttng_perf_counter_thread *perf_thread) |
| 361 | { |
| 362 | struct lttng_perf_counter_thread_field *thread_field; |
| 363 | sigset_t newmask, oldmask; |
| 364 | int ret; |
| 365 | |
| 366 | ret = sigfillset(&newmask); |
| 367 | if (ret) |
| 368 | abort(); |
| 369 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
| 370 | if (ret) |
| 371 | abort(); |
| 372 | /* Check again with signals disabled */ |
| 373 | cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list, |
| 374 | rcu_field_node) { |
| 375 | if (thread_field->field == perf_field) |
| 376 | goto skip; |
| 377 | } |
| 378 | thread_field = zmalloc(sizeof(*thread_field)); |
| 379 | if (!thread_field) |
| 380 | abort(); |
| 381 | thread_field->field = perf_field; |
| 382 | thread_field->fd = open_perf_fd(&perf_field->attr); |
| 383 | if (thread_field->fd >= 0) |
| 384 | setup_perf(thread_field); |
| 385 | /* |
| 386 | * Note: thread_field->pc can be NULL if setup_perf() fails. |
| 387 | * Also, thread_field->fd can be -1 if open_perf_fd() fails. |
| 388 | */ |
| 389 | lttng_perf_lock(); |
| 390 | cds_list_add_rcu(&thread_field->rcu_field_node, |
| 391 | &perf_thread->rcu_field_list); |
| 392 | cds_list_add(&thread_field->thread_field_node, |
| 393 | &perf_field->thread_field_list); |
| 394 | lttng_perf_unlock(); |
| 395 | skip: |
| 396 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
| 397 | if (ret) |
| 398 | abort(); |
| 399 | return thread_field; |
| 400 | } |
| 401 | |
| 402 | static |
| 403 | struct lttng_perf_counter_thread_field * |
| 404 | get_thread_field(struct lttng_perf_counter_field *field) |
| 405 | { |
| 406 | struct lttng_perf_counter_thread *perf_thread; |
| 407 | struct lttng_perf_counter_thread_field *thread_field; |
| 408 | |
| 409 | perf_thread = pthread_getspecific(perf_counter_key); |
| 410 | if (!perf_thread) |
| 411 | perf_thread = alloc_perf_counter_thread(); |
| 412 | cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list, |
| 413 | rcu_field_node) { |
| 414 | if (thread_field->field == field) |
| 415 | return thread_field; |
| 416 | } |
| 417 | /* perf_counter_thread_field not found, need to add one */ |
| 418 | return add_thread_field(field, perf_thread); |
| 419 | } |
| 420 | |
| 421 | static |
| 422 | uint64_t wrapper_perf_counter_read(void *priv) |
| 423 | { |
| 424 | struct lttng_perf_counter_field *perf_field; |
| 425 | struct lttng_perf_counter_thread_field *perf_thread_field; |
| 426 | |
| 427 | perf_field = (struct lttng_perf_counter_field *) priv; |
| 428 | perf_thread_field = get_thread_field(perf_field); |
| 429 | return arch_read_perf_counter(perf_thread_field); |
| 430 | } |
| 431 | |
| 432 | static |
| 433 | void perf_counter_record(void *priv, |
| 434 | struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)), |
| 435 | struct lttng_ust_ring_buffer_ctx *ctx, |
| 436 | struct lttng_ust_channel_buffer *chan) |
| 437 | { |
| 438 | uint64_t value; |
| 439 | |
| 440 | value = wrapper_perf_counter_read(priv); |
| 441 | chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value)); |
| 442 | } |
| 443 | |
| 444 | static |
| 445 | void perf_counter_get_value(void *priv, |
| 446 | struct lttng_ust_probe_ctx *probe_ctx __attribute__((unused)), |
| 447 | struct lttng_ust_ctx_value *value) |
| 448 | { |
| 449 | value->u.u64 = wrapper_perf_counter_read(priv); |
| 450 | } |
| 451 | |
| 452 | /* Called with perf lock held */ |
| 453 | static |
| 454 | void lttng_destroy_perf_thread_field( |
| 455 | struct lttng_perf_counter_thread_field *thread_field) |
| 456 | { |
| 457 | close_perf_fd(thread_field->fd); |
| 458 | unmap_perf_page(thread_field->pc); |
| 459 | cds_list_del_rcu(&thread_field->rcu_field_node); |
| 460 | cds_list_del(&thread_field->thread_field_node); |
| 461 | free(thread_field); |
| 462 | } |
| 463 | |
| 464 | static |
| 465 | void lttng_destroy_perf_thread_key(void *_key) |
| 466 | { |
| 467 | struct lttng_perf_counter_thread *perf_thread = _key; |
| 468 | struct lttng_perf_counter_thread_field *pos, *p; |
| 469 | |
| 470 | lttng_perf_lock(); |
| 471 | cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list, |
| 472 | rcu_field_node) |
| 473 | lttng_destroy_perf_thread_field(pos); |
| 474 | lttng_perf_unlock(); |
| 475 | free(perf_thread); |
| 476 | } |
| 477 | |
| 478 | /* Called with UST lock held */ |
| 479 | static |
| 480 | void lttng_destroy_perf_counter_ctx_field(void *priv) |
| 481 | { |
| 482 | struct lttng_perf_counter_field *perf_field; |
| 483 | struct lttng_perf_counter_thread_field *pos, *p; |
| 484 | |
| 485 | perf_field = (struct lttng_perf_counter_field *) priv; |
| 486 | free(perf_field->name); |
| 487 | /* |
| 488 | * This put is performed when no threads can concurrently |
| 489 | * perform a "get" concurrently, thanks to urcu-bp grace |
| 490 | * period. Holding the lttng perf lock protects against |
| 491 | * concurrent modification of the per-thread thread field |
| 492 | * list. |
| 493 | */ |
| 494 | lttng_perf_lock(); |
| 495 | cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list, |
| 496 | thread_field_node) |
| 497 | lttng_destroy_perf_thread_field(pos); |
| 498 | lttng_perf_unlock(); |
| 499 | free(perf_field->event_field); |
| 500 | free(perf_field); |
| 501 | } |
| 502 | |
| 503 | #ifdef LTTNG_UST_ARCH_ARMV7 |
| 504 | |
| 505 | static |
| 506 | int perf_get_exclude_kernel(void) |
| 507 | { |
| 508 | return 0; |
| 509 | } |
| 510 | |
| 511 | #else /* LTTNG_UST_ARCH_ARMV7 */ |
| 512 | |
| 513 | static |
| 514 | int perf_get_exclude_kernel(void) |
| 515 | { |
| 516 | return 1; |
| 517 | } |
| 518 | |
| 519 | #endif /* LTTNG_UST_ARCH_ARMV7 */ |
| 520 | |
| 521 | static const struct lttng_ust_type_common *ust_type = |
| 522 | lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT, |
| 523 | lttng_ust_rb_alignof(uint64_t) * CHAR_BIT, |
| 524 | lttng_ust_is_signed_type(uint64_t), |
| 525 | LTTNG_UST_BYTE_ORDER, 10); |
| 526 | |
| 527 | /* Called with UST lock held */ |
| 528 | int lttng_add_perf_counter_to_ctx(uint32_t type, |
| 529 | uint64_t config, |
| 530 | const char *name, |
| 531 | struct lttng_ust_ctx **ctx) |
| 532 | { |
| 533 | struct lttng_ust_ctx_field ctx_field; |
| 534 | struct lttng_ust_event_field *event_field; |
| 535 | struct lttng_perf_counter_field *perf_field; |
| 536 | char *name_alloc; |
| 537 | int ret; |
| 538 | |
| 539 | if (lttng_find_context(*ctx, name)) { |
| 540 | ret = -EEXIST; |
| 541 | goto find_error; |
| 542 | } |
| 543 | name_alloc = strdup(name); |
| 544 | if (!name_alloc) { |
| 545 | ret = -ENOMEM; |
| 546 | goto name_alloc_error; |
| 547 | } |
| 548 | event_field = zmalloc(sizeof(*event_field)); |
| 549 | if (!event_field) { |
| 550 | ret = -ENOMEM; |
| 551 | goto event_field_alloc_error; |
| 552 | } |
| 553 | event_field->name = name_alloc; |
| 554 | event_field->type = ust_type; |
| 555 | |
| 556 | perf_field = zmalloc(sizeof(*perf_field)); |
| 557 | if (!perf_field) { |
| 558 | ret = -ENOMEM; |
| 559 | goto perf_field_alloc_error; |
| 560 | } |
| 561 | perf_field->attr.type = type; |
| 562 | perf_field->attr.config = config; |
| 563 | perf_field->attr.exclude_kernel = perf_get_exclude_kernel(); |
| 564 | CDS_INIT_LIST_HEAD(&perf_field->thread_field_list); |
| 565 | perf_field->name = name_alloc; |
| 566 | perf_field->event_field = event_field; |
| 567 | |
| 568 | /* Ensure that this perf counter can be used in this process. */ |
| 569 | ret = open_perf_fd(&perf_field->attr); |
| 570 | if (ret < 0) { |
| 571 | ret = -ENODEV; |
| 572 | goto setup_error; |
| 573 | } |
| 574 | close_perf_fd(ret); |
| 575 | |
| 576 | ctx_field.event_field = event_field; |
| 577 | ctx_field.get_size = perf_counter_get_size; |
| 578 | ctx_field.record = perf_counter_record; |
| 579 | ctx_field.get_value = perf_counter_get_value; |
| 580 | ctx_field.destroy = lttng_destroy_perf_counter_ctx_field; |
| 581 | ctx_field.priv = perf_field; |
| 582 | |
| 583 | ret = lttng_ust_context_append(ctx, &ctx_field); |
| 584 | if (ret) { |
| 585 | ret = -ENOMEM; |
| 586 | goto append_context_error; |
| 587 | } |
| 588 | return 0; |
| 589 | |
| 590 | append_context_error: |
| 591 | setup_error: |
| 592 | free(perf_field); |
| 593 | perf_field_alloc_error: |
| 594 | free(event_field); |
| 595 | event_field_alloc_error: |
| 596 | free(name_alloc); |
| 597 | name_alloc_error: |
| 598 | find_error: |
| 599 | return ret; |
| 600 | } |
| 601 | |
| 602 | int lttng_perf_counter_init(void) |
| 603 | { |
| 604 | int ret; |
| 605 | |
| 606 | ret = pthread_key_create(&perf_counter_key, |
| 607 | lttng_destroy_perf_thread_key); |
| 608 | if (ret) |
| 609 | ret = -ret; |
| 610 | return ret; |
| 611 | } |
| 612 | |
| 613 | void lttng_perf_counter_exit(void) |
| 614 | { |
| 615 | int ret; |
| 616 | |
| 617 | ret = pthread_key_delete(perf_counter_key); |
| 618 | if (ret) { |
| 619 | errno = ret; |
| 620 | PERROR("Error in pthread_key_delete"); |
| 621 | } |
| 622 | } |