2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * LTTng UST performance monitoring counters (perf-counters) integration.
11 #include <sys/types.h>
20 #include <sys/syscall.h>
21 #include <lttng/ust-arch.h>
22 #include <lttng/ust-events.h>
23 #include <lttng/ust-tracer.h>
24 #include <lttng/ust-ringbuffer-context.h>
25 #include <urcu/system.h>
26 #include <urcu/arch.h>
27 #include <urcu/rculist.h>
28 #include "common/macros.h"
30 #include "common/logging.h"
32 #include <urcu/tls-compat.h>
33 #include "perf_event.h"
35 #include "context-internal.h"
36 #include "lttng-tracer-core.h"
37 #include "lib/lttng-ust/events.h"
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
49 struct lttng_perf_counter_thread_field
{
50 struct lttng_perf_counter_field
*field
; /* Back reference */
51 struct perf_event_mmap_page
*pc
;
52 struct cds_list_head thread_field_node
; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node
; /* RCU per-thread list of fields (node) */
57 struct lttng_perf_counter_thread
{
58 struct cds_list_head rcu_field_list
; /* RCU per-thread list of fields */
61 struct lttng_perf_counter_field
{
62 struct perf_event_attr attr
;
63 struct cds_list_head thread_field_list
; /* Per-field list of thread fields */
65 struct lttng_ust_event_field
*event_field
;
68 static pthread_key_t perf_counter_key
;
71 * lttng_perf_lock - Protect lttng-ust perf counter data structures
73 * Nests within the ust_lock, and therefore within the libc dl lock.
74 * Therefore, we need to allocate the TLS before nesting into this lock.
75 * Nests inside RCU bp read-side lock. Protects against concurrent
78 static pthread_mutex_t ust_perf_mutex
= PTHREAD_MUTEX_INITIALIZER
;
81 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
82 * restored on unlock. Protected by ust_perf_mutex.
84 static int ust_perf_saved_cancelstate
;
87 * Track whether we are tracing from a signal handler nested on an
90 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest
);
93 * Force a read (imply TLS allocation for dlopen) of TLS variables.
95 void lttng_ust_perf_counter_alloc_tls(void)
97 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest
)));
100 void lttng_perf_lock(void)
102 sigset_t sig_all_blocked
, orig_mask
;
105 ret
= pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, &oldstate
);
107 ERR("pthread_setcancelstate: %s", strerror(ret
));
109 sigfillset(&sig_all_blocked
);
110 ret
= pthread_sigmask(SIG_SETMASK
, &sig_all_blocked
, &orig_mask
);
112 ERR("pthread_sigmask: %s", strerror(ret
));
114 if (!URCU_TLS(ust_perf_mutex_nest
)++) {
116 * Ensure the compiler don't move the store after the close()
117 * call in case close() would be marked as leaf.
120 pthread_mutex_lock(&ust_perf_mutex
);
121 ust_perf_saved_cancelstate
= oldstate
;
123 ret
= pthread_sigmask(SIG_SETMASK
, &orig_mask
, NULL
);
125 ERR("pthread_sigmask: %s", strerror(ret
));
129 void lttng_perf_unlock(void)
131 sigset_t sig_all_blocked
, orig_mask
;
132 int ret
, newstate
, oldstate
;
133 bool restore_cancel
= false;
135 sigfillset(&sig_all_blocked
);
136 ret
= pthread_sigmask(SIG_SETMASK
, &sig_all_blocked
, &orig_mask
);
138 ERR("pthread_sigmask: %s", strerror(ret
));
141 * Ensure the compiler don't move the store before the close()
142 * call, in case close() would be marked as leaf.
145 if (!--URCU_TLS(ust_perf_mutex_nest
)) {
146 newstate
= ust_perf_saved_cancelstate
;
147 restore_cancel
= true;
148 pthread_mutex_unlock(&ust_perf_mutex
);
150 ret
= pthread_sigmask(SIG_SETMASK
, &orig_mask
, NULL
);
152 ERR("pthread_sigmask: %s", strerror(ret
));
154 if (restore_cancel
) {
155 ret
= pthread_setcancelstate(newstate
, &oldstate
);
157 ERR("pthread_setcancelstate: %s", strerror(ret
));
163 size_t perf_counter_get_size(void *priv
__attribute__((unused
)),
164 struct lttng_ust_probe_ctx
*probe_ctx
__attribute__((unused
)),
169 size
+= lttng_ust_ring_buffer_align(offset
, lttng_ust_rb_alignof(uint64_t));
170 size
+= sizeof(uint64_t);
175 uint64_t read_perf_counter_syscall(
176 struct lttng_perf_counter_thread_field
*thread_field
)
180 if (caa_unlikely(thread_field
->fd
< 0))
183 if (caa_unlikely(read(thread_field
->fd
, &count
, sizeof(count
))
190 #if defined(LTTNG_UST_ARCH_X86)
193 uint64_t rdpmc(unsigned int counter
)
195 unsigned int low
, high
;
197 asm volatile("rdpmc" : "=a" (low
), "=d" (high
) : "c" (counter
));
199 return low
| ((uint64_t) high
) << 32;
203 bool has_rdpmc(struct perf_event_mmap_page
*pc
)
205 if (caa_unlikely(!pc
->cap_bit0_is_deprecated
))
207 /* Since Linux kernel 3.12. */
208 return pc
->cap_user_rdpmc
;
212 uint64_t arch_read_perf_counter(
213 struct lttng_perf_counter_thread_field
*thread_field
)
217 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
219 if (caa_unlikely(!pc
))
223 seq
= CMM_LOAD_SHARED(pc
->lock
);
227 if (caa_likely(has_rdpmc(pc
) && idx
)) {
230 pmcval
= rdpmc(idx
- 1);
231 /* Sign-extend the pmc register result. */
232 pmcval
<<= 64 - pc
->pmc_width
;
233 pmcval
>>= 64 - pc
->pmc_width
;
234 count
= pc
->offset
+ pmcval
;
236 /* Fall-back on system call if rdpmc cannot be used. */
237 return read_perf_counter_syscall(thread_field
);
240 } while (CMM_LOAD_SHARED(pc
->lock
) != seq
);
246 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
)
248 struct perf_event_mmap_page
*pc
= thread_field
->pc
;
252 return !has_rdpmc(pc
);
257 /* Generic (slow) implementation using a read system call. */
259 uint64_t arch_read_perf_counter(
260 struct lttng_perf_counter_thread_field
*thread_field
)
262 return read_perf_counter_syscall(thread_field
);
266 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field
*thread_field
__attribute__((unused
)))
274 int sys_perf_event_open(struct perf_event_attr
*attr
,
275 pid_t pid
, int cpu
, int group_fd
,
278 return syscall(SYS_perf_event_open
, attr
, pid
, cpu
,
283 int open_perf_fd(struct perf_event_attr
*attr
)
287 fd
= sys_perf_event_open(attr
, 0, -1, -1, 0);
295 void close_perf_fd(int fd
)
304 perror("Error closing LTTng-UST perf memory mapping FD");
308 static void setup_perf(struct lttng_perf_counter_thread_field
*thread_field
)
312 perf_addr
= mmap(NULL
, sizeof(struct perf_event_mmap_page
),
313 PROT_READ
, MAP_SHARED
, thread_field
->fd
, 0);
314 if (perf_addr
== MAP_FAILED
)
316 thread_field
->pc
= perf_addr
;
318 if (!arch_perf_keep_fd(thread_field
)) {
319 close_perf_fd(thread_field
->fd
);
320 thread_field
->fd
= -1;
325 void unmap_perf_page(struct perf_event_mmap_page
*pc
)
331 ret
= munmap(pc
, sizeof(struct perf_event_mmap_page
));
333 PERROR("Error in munmap");
339 struct lttng_perf_counter_thread
*alloc_perf_counter_thread(void)
341 struct lttng_perf_counter_thread
*perf_thread
;
342 sigset_t newmask
, oldmask
;
345 ret
= sigfillset(&newmask
);
348 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
351 /* Check again with signals disabled */
352 perf_thread
= pthread_getspecific(perf_counter_key
);
355 perf_thread
= zmalloc(sizeof(*perf_thread
));
358 CDS_INIT_LIST_HEAD(&perf_thread
->rcu_field_list
);
359 ret
= pthread_setspecific(perf_counter_key
, perf_thread
);
363 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
370 struct lttng_perf_counter_thread_field
*
371 add_thread_field(struct lttng_perf_counter_field
*perf_field
,
372 struct lttng_perf_counter_thread
*perf_thread
)
374 struct lttng_perf_counter_thread_field
*thread_field
;
375 sigset_t newmask
, oldmask
;
378 ret
= sigfillset(&newmask
);
381 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
384 /* Check again with signals disabled */
385 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
387 if (thread_field
->field
== perf_field
)
390 thread_field
= zmalloc(sizeof(*thread_field
));
393 thread_field
->field
= perf_field
;
394 thread_field
->fd
= open_perf_fd(&perf_field
->attr
);
395 if (thread_field
->fd
>= 0)
396 setup_perf(thread_field
);
398 * Note: thread_field->pc can be NULL if setup_perf() fails.
399 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
402 cds_list_add_rcu(&thread_field
->rcu_field_node
,
403 &perf_thread
->rcu_field_list
);
404 cds_list_add(&thread_field
->thread_field_node
,
405 &perf_field
->thread_field_list
);
408 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
415 struct lttng_perf_counter_thread_field
*
416 get_thread_field(struct lttng_perf_counter_field
*field
)
418 struct lttng_perf_counter_thread
*perf_thread
;
419 struct lttng_perf_counter_thread_field
*thread_field
;
421 perf_thread
= pthread_getspecific(perf_counter_key
);
423 perf_thread
= alloc_perf_counter_thread();
424 cds_list_for_each_entry_rcu(thread_field
, &perf_thread
->rcu_field_list
,
426 if (thread_field
->field
== field
)
429 /* perf_counter_thread_field not found, need to add one */
430 return add_thread_field(field
, perf_thread
);
434 uint64_t wrapper_perf_counter_read(void *priv
)
436 struct lttng_perf_counter_field
*perf_field
;
437 struct lttng_perf_counter_thread_field
*perf_thread_field
;
439 perf_field
= (struct lttng_perf_counter_field
*) priv
;
440 perf_thread_field
= get_thread_field(perf_field
);
441 return arch_read_perf_counter(perf_thread_field
);
445 void perf_counter_record(void *priv
,
446 struct lttng_ust_probe_ctx
*probe_ctx
__attribute__((unused
)),
447 struct lttng_ust_ring_buffer_ctx
*ctx
,
448 struct lttng_ust_channel_buffer
*chan
)
452 value
= wrapper_perf_counter_read(priv
);
453 chan
->ops
->event_write(ctx
, &value
, sizeof(value
), lttng_ust_rb_alignof(value
));
457 void perf_counter_get_value(void *priv
,
458 struct lttng_ust_probe_ctx
*probe_ctx
__attribute__((unused
)),
459 struct lttng_ust_ctx_value
*value
)
461 value
->u
.u64
= wrapper_perf_counter_read(priv
);
464 /* Called with perf lock held */
466 void lttng_destroy_perf_thread_field(
467 struct lttng_perf_counter_thread_field
*thread_field
)
469 close_perf_fd(thread_field
->fd
);
470 unmap_perf_page(thread_field
->pc
);
471 cds_list_del_rcu(&thread_field
->rcu_field_node
);
472 cds_list_del(&thread_field
->thread_field_node
);
477 void lttng_destroy_perf_thread_key(void *_key
)
479 struct lttng_perf_counter_thread
*perf_thread
= _key
;
480 struct lttng_perf_counter_thread_field
*pos
, *p
;
483 cds_list_for_each_entry_safe(pos
, p
, &perf_thread
->rcu_field_list
,
485 lttng_destroy_perf_thread_field(pos
);
490 /* Called with UST lock held */
492 void lttng_destroy_perf_counter_ctx_field(void *priv
)
494 struct lttng_perf_counter_field
*perf_field
;
495 struct lttng_perf_counter_thread_field
*pos
, *p
;
497 perf_field
= (struct lttng_perf_counter_field
*) priv
;
498 free(perf_field
->name
);
500 * This put is performed when no threads can concurrently
501 * perform a "get" concurrently, thanks to urcu-bp grace
502 * period. Holding the lttng perf lock protects against
503 * concurrent modification of the per-thread thread field
507 cds_list_for_each_entry_safe(pos
, p
, &perf_field
->thread_field_list
,
509 lttng_destroy_perf_thread_field(pos
);
511 free(perf_field
->event_field
);
515 #ifdef LTTNG_UST_ARCH_ARMV7
518 int perf_get_exclude_kernel(void)
523 #else /* LTTNG_UST_ARCH_ARMV7 */
526 int perf_get_exclude_kernel(void)
531 #endif /* LTTNG_UST_ARCH_ARMV7 */
533 static const struct lttng_ust_type_common
*ust_type
=
534 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT
,
535 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT
,
536 lttng_ust_is_signed_type(uint64_t),
537 LTTNG_UST_BYTE_ORDER
, 10);
539 /* Called with UST lock held */
540 int lttng_add_perf_counter_to_ctx(uint32_t type
,
543 struct lttng_ust_ctx
**ctx
)
545 struct lttng_ust_ctx_field ctx_field
;
546 struct lttng_ust_event_field
*event_field
;
547 struct lttng_perf_counter_field
*perf_field
;
551 if (lttng_find_context(*ctx
, name
)) {
555 name_alloc
= strdup(name
);
558 goto name_alloc_error
;
560 event_field
= zmalloc(sizeof(*event_field
));
563 goto event_field_alloc_error
;
565 event_field
->name
= name_alloc
;
566 event_field
->type
= ust_type
;
568 perf_field
= zmalloc(sizeof(*perf_field
));
571 goto perf_field_alloc_error
;
573 perf_field
->attr
.type
= type
;
574 perf_field
->attr
.config
= config
;
575 perf_field
->attr
.exclude_kernel
= perf_get_exclude_kernel();
576 CDS_INIT_LIST_HEAD(&perf_field
->thread_field_list
);
577 perf_field
->name
= name_alloc
;
578 perf_field
->event_field
= event_field
;
580 /* Ensure that this perf counter can be used in this process. */
581 ret
= open_perf_fd(&perf_field
->attr
);
588 ctx_field
.event_field
= event_field
;
589 ctx_field
.get_size
= perf_counter_get_size
;
590 ctx_field
.record
= perf_counter_record
;
591 ctx_field
.get_value
= perf_counter_get_value
;
592 ctx_field
.destroy
= lttng_destroy_perf_counter_ctx_field
;
593 ctx_field
.priv
= perf_field
;
595 ret
= lttng_ust_context_append(ctx
, &ctx_field
);
598 goto append_context_error
;
602 append_context_error
:
605 perf_field_alloc_error
:
607 event_field_alloc_error
:
614 int lttng_perf_counter_init(void)
618 ret
= pthread_key_create(&perf_counter_key
,
619 lttng_destroy_perf_thread_key
);
625 void lttng_perf_counter_exit(void)
629 ret
= pthread_key_delete(perf_counter_key
);
632 PERROR("Error in pthread_key_delete");