Refactoring: Channel structures
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
d58d1454
MD
10#include <sys/types.h>
11#include <unistd.h>
12#include <string.h>
13#include <stdlib.h>
14#include <stdio.h>
649fb6b3 15#include <stdbool.h>
b4051ad8 16#include <stddef.h>
fb31eb73 17#include <stdint.h>
d58d1454
MD
18#include <sys/mman.h>
19#include <sys/syscall.h>
d58d1454
MD
20#include <lttng/ust-events.h>
21#include <lttng/ust-tracer.h>
0466ac28 22#include <lttng/ringbuffer-context.h>
d58d1454
MD
23#include <urcu/system.h>
24#include <urcu/arch.h>
25#include <urcu/rculist.h>
864a1eda 26#include <ust-helper.h>
d58d1454
MD
27#include <urcu/ref.h>
28#include <usterr-signal-safe.h>
29#include <signal.h>
20142124 30#include <urcu/tls-compat.h>
77d7fa98 31#include "perf_event.h"
fc80554e
MJ
32
33#include "context-internal.h"
d58d1454 34#include "lttng-tracer-core.h"
bd8c1787 35#include "ust-events-internal.h"
d58d1454
MD
36
37/*
38 * We use a global perf counter key and iterate on per-thread RCU lists
39 * of fields in the fast path, even though this is not strictly speaking
40 * what would provide the best fast-path complexity, to ensure teardown
41 * of sessions vs thread exit is handled racelessly.
42 *
43 * Updates and traversals of thread_list are protected by UST lock.
44 * Updates to rcu_field_list are protected by UST lock.
45 */
46
47struct lttng_perf_counter_thread_field {
48 struct lttng_perf_counter_field *field; /* Back reference */
49 struct perf_event_mmap_page *pc;
50 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
51 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 52 int fd; /* Perf FD */
d58d1454
MD
53};
54
55struct lttng_perf_counter_thread {
56 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
57};
58
59struct lttng_perf_counter_field {
60 struct perf_event_attr attr;
61 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
62};
63
64static pthread_key_t perf_counter_key;
65
20142124
MD
66/*
67 * lttng_perf_lock - Protect lttng-ust perf counter data structures
68 *
69 * Nests within the ust_lock, and therefore within the libc dl lock.
70 * Therefore, we need to fixup the TLS before nesting into this lock.
71 * Nests inside RCU bp read-side lock. Protects against concurrent
72 * fork.
73 */
74static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
75
76/*
77 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
78 * restored on unlock. Protected by ust_perf_mutex.
79 */
80static int ust_perf_saved_cancelstate;
81
82/*
83 * Track whether we are tracing from a signal handler nested on an
84 * application thread.
85 */
86static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
87
88/*
89 * Force a read (imply TLS fixup for dlopen) of TLS variables.
90 */
91void lttng_ust_fixup_perf_counter_tls(void)
92{
93 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
94}
95
96void lttng_perf_lock(void)
97{
98 sigset_t sig_all_blocked, orig_mask;
99 int ret, oldstate;
100
101 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
102 if (ret) {
103 ERR("pthread_setcancelstate: %s", strerror(ret));
104 }
105 sigfillset(&sig_all_blocked);
106 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
107 if (ret) {
108 ERR("pthread_sigmask: %s", strerror(ret));
109 }
110 if (!URCU_TLS(ust_perf_mutex_nest)++) {
111 /*
112 * Ensure the compiler don't move the store after the close()
113 * call in case close() would be marked as leaf.
114 */
115 cmm_barrier();
116 pthread_mutex_lock(&ust_perf_mutex);
117 ust_perf_saved_cancelstate = oldstate;
118 }
119 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
120 if (ret) {
121 ERR("pthread_sigmask: %s", strerror(ret));
122 }
123}
124
125void lttng_perf_unlock(void)
126{
127 sigset_t sig_all_blocked, orig_mask;
128 int ret, newstate, oldstate;
129 bool restore_cancel = false;
130
131 sigfillset(&sig_all_blocked);
132 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
133 if (ret) {
134 ERR("pthread_sigmask: %s", strerror(ret));
135 }
136 /*
137 * Ensure the compiler don't move the store before the close()
138 * call, in case close() would be marked as leaf.
139 */
140 cmm_barrier();
141 if (!--URCU_TLS(ust_perf_mutex_nest)) {
142 newstate = ust_perf_saved_cancelstate;
143 restore_cancel = true;
144 pthread_mutex_unlock(&ust_perf_mutex);
145 }
146 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
147 if (ret) {
148 ERR("pthread_sigmask: %s", strerror(ret));
149 }
150 if (restore_cancel) {
151 ret = pthread_setcancelstate(newstate, &oldstate);
152 if (ret) {
153 ERR("pthread_setcancelstate: %s", strerror(ret));
154 }
155 }
156}
157
d58d1454 158static
daacdbfc 159size_t perf_counter_get_size(struct lttng_ust_ctx_field *field, size_t offset)
d58d1454
MD
160{
161 size_t size = 0;
162
163 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
164 size += sizeof(uint64_t);
165 return size;
166}
167
a3a8d943
MD
168static
169uint64_t read_perf_counter_syscall(
170 struct lttng_perf_counter_thread_field *thread_field)
171{
172 uint64_t count;
173
174 if (caa_unlikely(thread_field->fd < 0))
175 return 0;
176
177 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
178 < sizeof(count)))
179 return 0;
180
181 return count;
182}
183
d58d1454
MD
184#if defined(__x86_64__) || defined(__i386__)
185
186static
187uint64_t rdpmc(unsigned int counter)
188{
189 unsigned int low, high;
190
191 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
192
193 return low | ((uint64_t) high) << 32;
194}
195
77d7fa98
MD
196static
197bool has_rdpmc(struct perf_event_mmap_page *pc)
198{
199 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
200 return false;
201 /* Since Linux kernel 3.12. */
202 return pc->cap_user_rdpmc;
203}
204
d58d1454 205static
a3a8d943 206uint64_t arch_read_perf_counter(
d286ad50 207 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
208{
209 uint32_t seq, idx;
210 uint64_t count;
d286ad50 211 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
212
213 if (caa_unlikely(!pc))
214 return 0;
215
216 do {
217 seq = CMM_LOAD_SHARED(pc->lock);
218 cmm_barrier();
219
220 idx = pc->index;
77d7fa98 221 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
222 int64_t pmcval;
223
224 pmcval = rdpmc(idx - 1);
225 /* Sign-extend the pmc register result. */
226 pmcval <<= 64 - pc->pmc_width;
227 pmcval >>= 64 - pc->pmc_width;
228 count = pc->offset + pmcval;
229 } else {
a3a8d943
MD
230 /* Fall-back on system call if rdpmc cannot be used. */
231 return read_perf_counter_syscall(thread_field);
4f58f54f 232 }
d58d1454
MD
233 cmm_barrier();
234 } while (CMM_LOAD_SHARED(pc->lock) != seq);
235
236 return count;
237}
238
d286ad50 239static
a3a8d943 240int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 241{
a3a8d943 242 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 243
a3a8d943 244 if (!pc)
d286ad50 245 return 0;
77d7fa98 246 return !has_rdpmc(pc);
a3a8d943 247}
d286ad50 248
a3a8d943 249#else
d286ad50 250
a3a8d943
MD
251/* Generic (slow) implementation using a read system call. */
252static
253uint64_t arch_read_perf_counter(
254 struct lttng_perf_counter_thread_field *thread_field)
255{
256 return read_perf_counter_syscall(thread_field);
d286ad50
JD
257}
258
a3a8d943
MD
259static
260int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
261{
262 return 1;
263}
d286ad50 264
a3a8d943 265#endif
d286ad50 266
d58d1454
MD
267static
268int sys_perf_event_open(struct perf_event_attr *attr,
269 pid_t pid, int cpu, int group_fd,
270 unsigned long flags)
271{
272 return syscall(SYS_perf_event_open, attr, pid, cpu,
273 group_fd, flags);
274}
275
276static
b9389e6e 277int open_perf_fd(struct perf_event_attr *attr)
d58d1454 278{
b9389e6e 279 int fd;
d58d1454
MD
280
281 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
282 if (fd < 0)
b9389e6e
JD
283 return -1;
284
285 return fd;
286}
287
d286ad50
JD
288static
289void close_perf_fd(int fd)
290{
291 int ret;
292
293 if (fd < 0)
294 return;
295
296 ret = close(fd);
297 if (ret) {
298 perror("Error closing LTTng-UST perf memory mapping FD");
299 }
300}
301
77d7fa98 302static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
303{
304 void *perf_addr;
d58d1454
MD
305
306 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 307 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 308 if (perf_addr == MAP_FAILED)
b9389e6e 309 perf_addr = NULL;
77d7fa98 310 thread_field->pc = perf_addr;
b9389e6e 311
a3a8d943 312 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
313 close_perf_fd(thread_field->fd);
314 thread_field->fd = -1;
6c2125af 315 }
d58d1454
MD
316}
317
318static
319void unmap_perf_page(struct perf_event_mmap_page *pc)
320{
321 int ret;
322
323 if (!pc)
324 return;
325 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
326 if (ret < 0) {
327 PERROR("Error in munmap");
328 abort();
329 }
330}
331
332static
333struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
334{
335 struct lttng_perf_counter_thread *perf_thread;
336 sigset_t newmask, oldmask;
337 int ret;
338
339 ret = sigfillset(&newmask);
340 if (ret)
341 abort();
342 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
343 if (ret)
344 abort();
345 /* Check again with signals disabled */
346 perf_thread = pthread_getspecific(perf_counter_key);
347 if (perf_thread)
348 goto skip;
349 perf_thread = zmalloc(sizeof(*perf_thread));
350 if (!perf_thread)
351 abort();
352 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
353 ret = pthread_setspecific(perf_counter_key, perf_thread);
354 if (ret)
355 abort();
356skip:
357 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
358 if (ret)
359 abort();
360 return perf_thread;
361}
362
363static
364struct lttng_perf_counter_thread_field *
365 add_thread_field(struct lttng_perf_counter_field *perf_field,
366 struct lttng_perf_counter_thread *perf_thread)
367{
368 struct lttng_perf_counter_thread_field *thread_field;
369 sigset_t newmask, oldmask;
370 int ret;
371
372 ret = sigfillset(&newmask);
373 if (ret)
374 abort();
375 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
376 if (ret)
377 abort();
378 /* Check again with signals disabled */
379 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
380 rcu_field_node) {
381 if (thread_field->field == perf_field)
382 goto skip;
383 }
384 thread_field = zmalloc(sizeof(*thread_field));
385 if (!thread_field)
386 abort();
387 thread_field->field = perf_field;
b9389e6e
JD
388 thread_field->fd = open_perf_fd(&perf_field->attr);
389 if (thread_field->fd >= 0)
77d7fa98 390 setup_perf(thread_field);
b9389e6e
JD
391 /*
392 * Note: thread_field->pc can be NULL if setup_perf() fails.
393 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
394 */
20142124 395 lttng_perf_lock();
d58d1454
MD
396 cds_list_add_rcu(&thread_field->rcu_field_node,
397 &perf_thread->rcu_field_list);
398 cds_list_add(&thread_field->thread_field_node,
399 &perf_field->thread_field_list);
20142124 400 lttng_perf_unlock();
d58d1454
MD
401skip:
402 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
403 if (ret)
404 abort();
405 return thread_field;
406}
407
408static
409struct lttng_perf_counter_thread_field *
410 get_thread_field(struct lttng_perf_counter_field *field)
411{
412 struct lttng_perf_counter_thread *perf_thread;
413 struct lttng_perf_counter_thread_field *thread_field;
414
415 perf_thread = pthread_getspecific(perf_counter_key);
416 if (!perf_thread)
417 perf_thread = alloc_perf_counter_thread();
418 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
419 rcu_field_node) {
420 if (thread_field->field == field)
421 return thread_field;
422 }
423 /* perf_counter_thread_field not found, need to add one */
424 return add_thread_field(field, perf_thread);
425}
426
427static
daacdbfc 428uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field)
d58d1454
MD
429{
430 struct lttng_perf_counter_field *perf_field;
431 struct lttng_perf_counter_thread_field *perf_thread_field;
432
daacdbfc 433 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454 434 perf_thread_field = get_thread_field(perf_field);
a3a8d943 435 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
436}
437
438static
daacdbfc 439void perf_counter_record(struct lttng_ust_ctx_field *field,
d58d1454 440 struct lttng_ust_lib_ring_buffer_ctx *ctx,
e7bc0ef6 441 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
442{
443 uint64_t value;
444
445 value = wrapper_perf_counter_read(field);
446 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
447 chan->ops->event_write(ctx, &value, sizeof(value));
448}
449
450static
daacdbfc
MD
451void perf_counter_get_value(struct lttng_ust_ctx_field *field,
452 struct lttng_ust_ctx_value *value)
d58d1454 453{
6e9ac4ae 454 value->u.s64 = wrapper_perf_counter_read(field);
d58d1454
MD
455}
456
20142124 457/* Called with perf lock held */
d58d1454
MD
458static
459void lttng_destroy_perf_thread_field(
460 struct lttng_perf_counter_thread_field *thread_field)
461{
b9389e6e 462 close_perf_fd(thread_field->fd);
d58d1454
MD
463 unmap_perf_page(thread_field->pc);
464 cds_list_del_rcu(&thread_field->rcu_field_node);
465 cds_list_del(&thread_field->thread_field_node);
466 free(thread_field);
467}
468
469static
470void lttng_destroy_perf_thread_key(void *_key)
471{
472 struct lttng_perf_counter_thread *perf_thread = _key;
473 struct lttng_perf_counter_thread_field *pos, *p;
474
20142124 475 lttng_perf_lock();
d58d1454
MD
476 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
477 rcu_field_node)
478 lttng_destroy_perf_thread_field(pos);
20142124 479 lttng_perf_unlock();
d58d1454
MD
480 free(perf_thread);
481}
482
483/* Called with UST lock held */
484static
daacdbfc 485void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field)
d58d1454
MD
486{
487 struct lttng_perf_counter_field *perf_field;
488 struct lttng_perf_counter_thread_field *pos, *p;
489
daacdbfc
MD
490 free((char *) field->event_field->name);
491 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454
MD
492 /*
493 * This put is performed when no threads can concurrently
494 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
495 * period. Holding the lttng perf lock protects against
496 * concurrent modification of the per-thread thread field
497 * list.
d58d1454 498 */
20142124 499 lttng_perf_lock();
d58d1454
MD
500 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
501 thread_field_node)
502 lttng_destroy_perf_thread_field(pos);
20142124 503 lttng_perf_unlock();
d58d1454
MD
504 free(perf_field);
505}
506
d286ad50
JD
507#ifdef __ARM_ARCH_7A__
508
509static
510int perf_get_exclude_kernel(void)
511{
512 return 0;
513}
514
515#else /* __ARM_ARCH_7A__ */
516
517static
518int perf_get_exclude_kernel(void)
519{
520 return 1;
521}
522
523#endif /* __ARM_ARCH_7A__ */
524
d58d1454
MD
525/* Called with UST lock held */
526int lttng_add_perf_counter_to_ctx(uint32_t type,
527 uint64_t config,
528 const char *name,
daacdbfc 529 struct lttng_ust_ctx **ctx)
d58d1454 530{
daacdbfc 531 struct lttng_ust_ctx_field *field;
a084756d 532 struct lttng_ust_type_common *ust_type;
d58d1454 533 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
534 char *name_alloc;
535 int ret;
536
537 name_alloc = strdup(name);
538 if (!name_alloc) {
539 ret = -ENOMEM;
540 goto name_alloc_error;
541 }
542 perf_field = zmalloc(sizeof(*perf_field));
543 if (!perf_field) {
544 ret = -ENOMEM;
545 goto perf_field_alloc_error;
546 }
a084756d
MD
547 ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT,
548 lttng_alignof(uint64_t) * CHAR_BIT,
549 lttng_is_signed_type(uint64_t),
550 BYTE_ORDER, 10);
ea8efbbe 551 if (!ust_type) {
a084756d
MD
552 ret = -ENOMEM;
553 goto type_alloc_error;
554 }
d58d1454
MD
555 field = lttng_append_context(ctx);
556 if (!field) {
557 ret = -ENOMEM;
558 goto append_context_error;
559 }
560 if (lttng_find_context(*ctx, name_alloc)) {
561 ret = -EEXIST;
562 goto find_error;
563 }
564
565 field->destroy = lttng_destroy_perf_counter_field;
566
daacdbfc 567 field->event_field->name = name_alloc;
a084756d 568 field->event_field->type = ust_type;
d58d1454
MD
569 field->get_size = perf_counter_get_size;
570 field->record = perf_counter_record;
571 field->get_value = perf_counter_get_value;
572
573 perf_field->attr.type = type;
574 perf_field->attr.config = config;
d286ad50 575 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 576 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
daacdbfc 577 field->priv = perf_field;
d58d1454
MD
578
579 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
580 ret = open_perf_fd(&perf_field->attr);
581 if (ret < 0) {
d58d1454
MD
582 ret = -ENODEV;
583 goto setup_error;
584 }
b9389e6e 585 close_perf_fd(ret);
d58d1454
MD
586
587 /*
588 * Contexts can only be added before tracing is started, so we
589 * don't have to synchronize against concurrent threads using
590 * the field here.
591 */
592
b2cc986a 593 lttng_context_update(*ctx);
d58d1454
MD
594 return 0;
595
596setup_error:
597find_error:
598 lttng_remove_context_field(ctx, field);
599append_context_error:
a084756d
MD
600 lttng_ust_destroy_type(ust_type);
601type_alloc_error:
d58d1454
MD
602 free(perf_field);
603perf_field_alloc_error:
604 free(name_alloc);
605name_alloc_error:
606 return ret;
607}
608
609int lttng_perf_counter_init(void)
610{
611 int ret;
612
613 ret = pthread_key_create(&perf_counter_key,
614 lttng_destroy_perf_thread_key);
615 if (ret)
616 ret = -ret;
617 return ret;
618}
619
620void lttng_perf_counter_exit(void)
621{
622 int ret;
623
624 ret = pthread_key_delete(perf_counter_key);
625 if (ret) {
626 errno = ret;
627 PERROR("Error in pthread_key_delete");
628 }
629}
This page took 0.054256 seconds and 4 git commands to generate.