cleanup: explicitly mark unused parameters (-Wunused-parameter)
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
CommitLineData
d58d1454 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
d58d1454
MD
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * LTTng UST performance monitoring counters (perf-counters) integration.
d58d1454
MD
7 */
8
3fbec7dc 9#define _LGPL_SOURCE
9af5d97a 10#include <limits.h>
d58d1454
MD
11#include <sys/types.h>
12#include <unistd.h>
13#include <string.h>
14#include <stdlib.h>
15#include <stdio.h>
649fb6b3 16#include <stdbool.h>
b4051ad8 17#include <stddef.h>
fb31eb73 18#include <stdint.h>
d58d1454
MD
19#include <sys/mman.h>
20#include <sys/syscall.h>
2eba8e39 21#include <lttng/ust-arch.h>
d58d1454
MD
22#include <lttng/ust-events.h>
23#include <lttng/ust-tracer.h>
0466ac28 24#include <lttng/ringbuffer-context.h>
d58d1454
MD
25#include <urcu/system.h>
26#include <urcu/arch.h>
27#include <urcu/rculist.h>
864a1eda 28#include <ust-helper.h>
d58d1454
MD
29#include <urcu/ref.h>
30#include <usterr-signal-safe.h>
31#include <signal.h>
20142124 32#include <urcu/tls-compat.h>
77d7fa98 33#include "perf_event.h"
fc80554e
MJ
34
35#include "context-internal.h"
d58d1454 36#include "lttng-tracer-core.h"
bd8c1787 37#include "ust-events-internal.h"
d58d1454
MD
38
39/*
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
44 *
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
47 */
48
49struct lttng_perf_counter_thread_field {
50 struct lttng_perf_counter_field *field; /* Back reference */
51 struct perf_event_mmap_page *pc;
52 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
b9389e6e 54 int fd; /* Perf FD */
d58d1454
MD
55};
56
57struct lttng_perf_counter_thread {
58 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
59};
60
61struct lttng_perf_counter_field {
62 struct perf_event_attr attr;
63 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
64};
65
66static pthread_key_t perf_counter_key;
67
20142124
MD
68/*
69 * lttng_perf_lock - Protect lttng-ust perf counter data structures
70 *
71 * Nests within the ust_lock, and therefore within the libc dl lock.
72 * Therefore, we need to fixup the TLS before nesting into this lock.
73 * Nests inside RCU bp read-side lock. Protects against concurrent
74 * fork.
75 */
76static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
77
78/*
79 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
80 * restored on unlock. Protected by ust_perf_mutex.
81 */
82static int ust_perf_saved_cancelstate;
83
84/*
85 * Track whether we are tracing from a signal handler nested on an
86 * application thread.
87 */
88static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
89
90/*
91 * Force a read (imply TLS fixup for dlopen) of TLS variables.
92 */
93void lttng_ust_fixup_perf_counter_tls(void)
94{
95 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
96}
97
98void lttng_perf_lock(void)
99{
100 sigset_t sig_all_blocked, orig_mask;
101 int ret, oldstate;
102
103 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
104 if (ret) {
105 ERR("pthread_setcancelstate: %s", strerror(ret));
106 }
107 sigfillset(&sig_all_blocked);
108 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
109 if (ret) {
110 ERR("pthread_sigmask: %s", strerror(ret));
111 }
112 if (!URCU_TLS(ust_perf_mutex_nest)++) {
113 /*
114 * Ensure the compiler don't move the store after the close()
115 * call in case close() would be marked as leaf.
116 */
117 cmm_barrier();
118 pthread_mutex_lock(&ust_perf_mutex);
119 ust_perf_saved_cancelstate = oldstate;
120 }
121 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
122 if (ret) {
123 ERR("pthread_sigmask: %s", strerror(ret));
124 }
125}
126
127void lttng_perf_unlock(void)
128{
129 sigset_t sig_all_blocked, orig_mask;
130 int ret, newstate, oldstate;
131 bool restore_cancel = false;
132
133 sigfillset(&sig_all_blocked);
134 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
135 if (ret) {
136 ERR("pthread_sigmask: %s", strerror(ret));
137 }
138 /*
139 * Ensure the compiler don't move the store before the close()
140 * call, in case close() would be marked as leaf.
141 */
142 cmm_barrier();
143 if (!--URCU_TLS(ust_perf_mutex_nest)) {
144 newstate = ust_perf_saved_cancelstate;
145 restore_cancel = true;
146 pthread_mutex_unlock(&ust_perf_mutex);
147 }
148 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
149 if (ret) {
150 ERR("pthread_sigmask: %s", strerror(ret));
151 }
152 if (restore_cancel) {
153 ret = pthread_setcancelstate(newstate, &oldstate);
154 if (ret) {
155 ERR("pthread_setcancelstate: %s", strerror(ret));
156 }
157 }
158}
159
d58d1454 160static
2208d8b5
MJ
161size_t perf_counter_get_size(struct lttng_ust_ctx_field *field __attribute__((unused)),
162 size_t offset)
d58d1454
MD
163{
164 size_t size = 0;
165
dc325c1d 166 size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
d58d1454
MD
167 size += sizeof(uint64_t);
168 return size;
169}
170
a3a8d943
MD
171static
172uint64_t read_perf_counter_syscall(
173 struct lttng_perf_counter_thread_field *thread_field)
174{
175 uint64_t count;
176
177 if (caa_unlikely(thread_field->fd < 0))
178 return 0;
179
180 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
181 < sizeof(count)))
182 return 0;
183
184 return count;
185}
186
2eba8e39 187#if defined(LTTNG_UST_ARCH_X86)
d58d1454
MD
188
189static
190uint64_t rdpmc(unsigned int counter)
191{
192 unsigned int low, high;
193
194 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
195
196 return low | ((uint64_t) high) << 32;
197}
198
77d7fa98
MD
199static
200bool has_rdpmc(struct perf_event_mmap_page *pc)
201{
202 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
203 return false;
204 /* Since Linux kernel 3.12. */
205 return pc->cap_user_rdpmc;
206}
207
d58d1454 208static
a3a8d943 209uint64_t arch_read_perf_counter(
d286ad50 210 struct lttng_perf_counter_thread_field *thread_field)
d58d1454
MD
211{
212 uint32_t seq, idx;
213 uint64_t count;
d286ad50 214 struct perf_event_mmap_page *pc = thread_field->pc;
d58d1454
MD
215
216 if (caa_unlikely(!pc))
217 return 0;
218
219 do {
220 seq = CMM_LOAD_SHARED(pc->lock);
221 cmm_barrier();
222
223 idx = pc->index;
77d7fa98 224 if (caa_likely(has_rdpmc(pc) && idx)) {
4f58f54f
MD
225 int64_t pmcval;
226
227 pmcval = rdpmc(idx - 1);
228 /* Sign-extend the pmc register result. */
229 pmcval <<= 64 - pc->pmc_width;
230 pmcval >>= 64 - pc->pmc_width;
231 count = pc->offset + pmcval;
232 } else {
a3a8d943
MD
233 /* Fall-back on system call if rdpmc cannot be used. */
234 return read_perf_counter_syscall(thread_field);
4f58f54f 235 }
d58d1454
MD
236 cmm_barrier();
237 } while (CMM_LOAD_SHARED(pc->lock) != seq);
238
239 return count;
240}
241
d286ad50 242static
a3a8d943 243int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
d286ad50 244{
a3a8d943 245 struct perf_event_mmap_page *pc = thread_field->pc;
d286ad50 246
a3a8d943 247 if (!pc)
d286ad50 248 return 0;
77d7fa98 249 return !has_rdpmc(pc);
a3a8d943 250}
d286ad50 251
a3a8d943 252#else
d286ad50 253
a3a8d943
MD
254/* Generic (slow) implementation using a read system call. */
255static
256uint64_t arch_read_perf_counter(
257 struct lttng_perf_counter_thread_field *thread_field)
258{
259 return read_perf_counter_syscall(thread_field);
d286ad50
JD
260}
261
a3a8d943
MD
262static
263int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
264{
265 return 1;
266}
d286ad50 267
a3a8d943 268#endif
d286ad50 269
d58d1454
MD
270static
271int sys_perf_event_open(struct perf_event_attr *attr,
272 pid_t pid, int cpu, int group_fd,
273 unsigned long flags)
274{
275 return syscall(SYS_perf_event_open, attr, pid, cpu,
276 group_fd, flags);
277}
278
279static
b9389e6e 280int open_perf_fd(struct perf_event_attr *attr)
d58d1454 281{
b9389e6e 282 int fd;
d58d1454
MD
283
284 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
285 if (fd < 0)
b9389e6e
JD
286 return -1;
287
288 return fd;
289}
290
d286ad50
JD
291static
292void close_perf_fd(int fd)
293{
294 int ret;
295
296 if (fd < 0)
297 return;
298
299 ret = close(fd);
300 if (ret) {
301 perror("Error closing LTTng-UST perf memory mapping FD");
302 }
303}
304
77d7fa98 305static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
b9389e6e
JD
306{
307 void *perf_addr;
d58d1454
MD
308
309 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
b9389e6e 310 PROT_READ, MAP_SHARED, thread_field->fd, 0);
d58d1454 311 if (perf_addr == MAP_FAILED)
b9389e6e 312 perf_addr = NULL;
77d7fa98 313 thread_field->pc = perf_addr;
b9389e6e 314
a3a8d943 315 if (!arch_perf_keep_fd(thread_field)) {
b9389e6e
JD
316 close_perf_fd(thread_field->fd);
317 thread_field->fd = -1;
6c2125af 318 }
d58d1454
MD
319}
320
321static
322void unmap_perf_page(struct perf_event_mmap_page *pc)
323{
324 int ret;
325
326 if (!pc)
327 return;
328 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
329 if (ret < 0) {
330 PERROR("Error in munmap");
331 abort();
332 }
333}
334
335static
336struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
337{
338 struct lttng_perf_counter_thread *perf_thread;
339 sigset_t newmask, oldmask;
340 int ret;
341
342 ret = sigfillset(&newmask);
343 if (ret)
344 abort();
345 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
346 if (ret)
347 abort();
348 /* Check again with signals disabled */
349 perf_thread = pthread_getspecific(perf_counter_key);
350 if (perf_thread)
351 goto skip;
352 perf_thread = zmalloc(sizeof(*perf_thread));
353 if (!perf_thread)
354 abort();
355 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
356 ret = pthread_setspecific(perf_counter_key, perf_thread);
357 if (ret)
358 abort();
359skip:
360 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
361 if (ret)
362 abort();
363 return perf_thread;
364}
365
366static
367struct lttng_perf_counter_thread_field *
368 add_thread_field(struct lttng_perf_counter_field *perf_field,
369 struct lttng_perf_counter_thread *perf_thread)
370{
371 struct lttng_perf_counter_thread_field *thread_field;
372 sigset_t newmask, oldmask;
373 int ret;
374
375 ret = sigfillset(&newmask);
376 if (ret)
377 abort();
378 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
379 if (ret)
380 abort();
381 /* Check again with signals disabled */
382 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
383 rcu_field_node) {
384 if (thread_field->field == perf_field)
385 goto skip;
386 }
387 thread_field = zmalloc(sizeof(*thread_field));
388 if (!thread_field)
389 abort();
390 thread_field->field = perf_field;
b9389e6e
JD
391 thread_field->fd = open_perf_fd(&perf_field->attr);
392 if (thread_field->fd >= 0)
77d7fa98 393 setup_perf(thread_field);
b9389e6e
JD
394 /*
395 * Note: thread_field->pc can be NULL if setup_perf() fails.
396 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
397 */
20142124 398 lttng_perf_lock();
d58d1454
MD
399 cds_list_add_rcu(&thread_field->rcu_field_node,
400 &perf_thread->rcu_field_list);
401 cds_list_add(&thread_field->thread_field_node,
402 &perf_field->thread_field_list);
20142124 403 lttng_perf_unlock();
d58d1454
MD
404skip:
405 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
406 if (ret)
407 abort();
408 return thread_field;
409}
410
411static
412struct lttng_perf_counter_thread_field *
413 get_thread_field(struct lttng_perf_counter_field *field)
414{
415 struct lttng_perf_counter_thread *perf_thread;
416 struct lttng_perf_counter_thread_field *thread_field;
417
418 perf_thread = pthread_getspecific(perf_counter_key);
419 if (!perf_thread)
420 perf_thread = alloc_perf_counter_thread();
421 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
422 rcu_field_node) {
423 if (thread_field->field == field)
424 return thread_field;
425 }
426 /* perf_counter_thread_field not found, need to add one */
427 return add_thread_field(field, perf_thread);
428}
429
430static
daacdbfc 431uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field)
d58d1454
MD
432{
433 struct lttng_perf_counter_field *perf_field;
434 struct lttng_perf_counter_thread_field *perf_thread_field;
435
daacdbfc 436 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454 437 perf_thread_field = get_thread_field(perf_field);
a3a8d943 438 return arch_read_perf_counter(perf_thread_field);
d58d1454
MD
439}
440
441static
daacdbfc 442void perf_counter_record(struct lttng_ust_ctx_field *field,
d58d1454 443 struct lttng_ust_lib_ring_buffer_ctx *ctx,
e7bc0ef6 444 struct lttng_ust_channel_buffer *chan)
d58d1454
MD
445{
446 uint64_t value;
447
448 value = wrapper_perf_counter_read(field);
8936b6c0 449 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
d58d1454
MD
450}
451
452static
daacdbfc
MD
453void perf_counter_get_value(struct lttng_ust_ctx_field *field,
454 struct lttng_ust_ctx_value *value)
d58d1454 455{
6e9ac4ae 456 value->u.s64 = wrapper_perf_counter_read(field);
d58d1454
MD
457}
458
20142124 459/* Called with perf lock held */
d58d1454
MD
460static
461void lttng_destroy_perf_thread_field(
462 struct lttng_perf_counter_thread_field *thread_field)
463{
b9389e6e 464 close_perf_fd(thread_field->fd);
d58d1454
MD
465 unmap_perf_page(thread_field->pc);
466 cds_list_del_rcu(&thread_field->rcu_field_node);
467 cds_list_del(&thread_field->thread_field_node);
468 free(thread_field);
469}
470
471static
472void lttng_destroy_perf_thread_key(void *_key)
473{
474 struct lttng_perf_counter_thread *perf_thread = _key;
475 struct lttng_perf_counter_thread_field *pos, *p;
476
20142124 477 lttng_perf_lock();
d58d1454
MD
478 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
479 rcu_field_node)
480 lttng_destroy_perf_thread_field(pos);
20142124 481 lttng_perf_unlock();
d58d1454
MD
482 free(perf_thread);
483}
484
485/* Called with UST lock held */
486static
daacdbfc 487void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field)
d58d1454
MD
488{
489 struct lttng_perf_counter_field *perf_field;
490 struct lttng_perf_counter_thread_field *pos, *p;
491
daacdbfc
MD
492 free((char *) field->event_field->name);
493 perf_field = (struct lttng_perf_counter_field *) field->priv;
d58d1454
MD
494 /*
495 * This put is performed when no threads can concurrently
496 * perform a "get" concurrently, thanks to urcu-bp grace
20142124
MD
497 * period. Holding the lttng perf lock protects against
498 * concurrent modification of the per-thread thread field
499 * list.
d58d1454 500 */
20142124 501 lttng_perf_lock();
d58d1454
MD
502 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
503 thread_field_node)
504 lttng_destroy_perf_thread_field(pos);
20142124 505 lttng_perf_unlock();
d58d1454
MD
506 free(perf_field);
507}
508
2eba8e39 509#ifdef LTTNG_UST_ARCH_ARMV7
d286ad50
JD
510
511static
512int perf_get_exclude_kernel(void)
513{
514 return 0;
515}
516
2eba8e39 517#else /* LTTNG_UST_ARCH_ARMV7 */
d286ad50
JD
518
519static
520int perf_get_exclude_kernel(void)
521{
522 return 1;
523}
524
2eba8e39 525#endif /* LTTNG_UST_ARCH_ARMV7 */
d286ad50 526
d58d1454
MD
527/* Called with UST lock held */
528int lttng_add_perf_counter_to_ctx(uint32_t type,
529 uint64_t config,
530 const char *name,
daacdbfc 531 struct lttng_ust_ctx **ctx)
d58d1454 532{
daacdbfc 533 struct lttng_ust_ctx_field *field;
a084756d 534 struct lttng_ust_type_common *ust_type;
d58d1454 535 struct lttng_perf_counter_field *perf_field;
d58d1454
MD
536 char *name_alloc;
537 int ret;
538
539 name_alloc = strdup(name);
540 if (!name_alloc) {
541 ret = -ENOMEM;
542 goto name_alloc_error;
543 }
544 perf_field = zmalloc(sizeof(*perf_field));
545 if (!perf_field) {
546 ret = -ENOMEM;
547 goto perf_field_alloc_error;
548 }
a084756d 549 ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT,
dc325c1d 550 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
eae3c729 551 lttng_ust_is_signed_type(uint64_t),
a084756d 552 BYTE_ORDER, 10);
ea8efbbe 553 if (!ust_type) {
a084756d
MD
554 ret = -ENOMEM;
555 goto type_alloc_error;
556 }
d58d1454
MD
557 field = lttng_append_context(ctx);
558 if (!field) {
559 ret = -ENOMEM;
560 goto append_context_error;
561 }
562 if (lttng_find_context(*ctx, name_alloc)) {
563 ret = -EEXIST;
564 goto find_error;
565 }
566
567 field->destroy = lttng_destroy_perf_counter_field;
568
daacdbfc 569 field->event_field->name = name_alloc;
a084756d 570 field->event_field->type = ust_type;
d58d1454
MD
571 field->get_size = perf_counter_get_size;
572 field->record = perf_counter_record;
573 field->get_value = perf_counter_get_value;
574
575 perf_field->attr.type = type;
576 perf_field->attr.config = config;
d286ad50 577 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
d58d1454 578 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
daacdbfc 579 field->priv = perf_field;
d58d1454
MD
580
581 /* Ensure that this perf counter can be used in this process. */
b9389e6e
JD
582 ret = open_perf_fd(&perf_field->attr);
583 if (ret < 0) {
d58d1454
MD
584 ret = -ENODEV;
585 goto setup_error;
586 }
b9389e6e 587 close_perf_fd(ret);
d58d1454
MD
588
589 /*
590 * Contexts can only be added before tracing is started, so we
591 * don't have to synchronize against concurrent threads using
592 * the field here.
593 */
594
b2cc986a 595 lttng_context_update(*ctx);
d58d1454
MD
596 return 0;
597
598setup_error:
599find_error:
600 lttng_remove_context_field(ctx, field);
601append_context_error:
a084756d
MD
602 lttng_ust_destroy_type(ust_type);
603type_alloc_error:
d58d1454
MD
604 free(perf_field);
605perf_field_alloc_error:
606 free(name_alloc);
607name_alloc_error:
608 return ret;
609}
610
611int lttng_perf_counter_init(void)
612{
613 int ret;
614
615 ret = pthread_key_create(&perf_counter_key,
616 lttng_destroy_perf_thread_key);
617 if (ret)
618 ret = -ret;
619 return ret;
620}
621
622void lttng_perf_counter_exit(void)
623{
624 int ret;
625
626 ret = pthread_key_delete(perf_counter_key);
627 if (ret) {
628 errno = ret;
629 PERROR("Error in pthread_key_delete");
630 }
631}
This page took 0.055726 seconds and 4 git commands to generate.