Move all sources to 'src/'
[lttng-ust.git] / src / liblttng-ust / lttng-context-perf-counters.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST performance monitoring counters (perf-counters) integration.
7 */
8
9 #define _LGPL_SOURCE
10 #include <limits.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <lttng/ust-arch.h>
22 #include <lttng/ust-events.h>
23 #include <lttng/ust-tracer.h>
24 #include <lttng/ringbuffer-context.h>
25 #include <urcu/system.h>
26 #include <urcu/arch.h>
27 #include <urcu/rculist.h>
28 #include <ust-helper.h>
29 #include <urcu/ref.h>
30 #include <usterr-signal-safe.h>
31 #include <signal.h>
32 #include <urcu/tls-compat.h>
33 #include "perf_event.h"
34
35 #include "context-internal.h"
36 #include "lttng-tracer-core.h"
37 #include "ust-events-internal.h"
38
39 /*
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
44 *
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
47 */
48
49 struct lttng_perf_counter_thread_field {
50 struct lttng_perf_counter_field *field; /* Back reference */
51 struct perf_event_mmap_page *pc;
52 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
54 int fd; /* Perf FD */
55 };
56
57 struct lttng_perf_counter_thread {
58 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
59 };
60
61 struct lttng_perf_counter_field {
62 struct perf_event_attr attr;
63 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
64 char *name;
65 };
66
67 static pthread_key_t perf_counter_key;
68
69 /*
70 * lttng_perf_lock - Protect lttng-ust perf counter data structures
71 *
72 * Nests within the ust_lock, and therefore within the libc dl lock.
73 * Therefore, we need to fixup the TLS before nesting into this lock.
74 * Nests inside RCU bp read-side lock. Protects against concurrent
75 * fork.
76 */
77 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
78
79 /*
80 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
81 * restored on unlock. Protected by ust_perf_mutex.
82 */
83 static int ust_perf_saved_cancelstate;
84
85 /*
86 * Track whether we are tracing from a signal handler nested on an
87 * application thread.
88 */
89 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
90
91 /*
92 * Force a read (imply TLS fixup for dlopen) of TLS variables.
93 */
94 void lttng_ust_fixup_perf_counter_tls(void)
95 {
96 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
97 }
98
99 void lttng_perf_lock(void)
100 {
101 sigset_t sig_all_blocked, orig_mask;
102 int ret, oldstate;
103
104 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
105 if (ret) {
106 ERR("pthread_setcancelstate: %s", strerror(ret));
107 }
108 sigfillset(&sig_all_blocked);
109 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
110 if (ret) {
111 ERR("pthread_sigmask: %s", strerror(ret));
112 }
113 if (!URCU_TLS(ust_perf_mutex_nest)++) {
114 /*
115 * Ensure the compiler don't move the store after the close()
116 * call in case close() would be marked as leaf.
117 */
118 cmm_barrier();
119 pthread_mutex_lock(&ust_perf_mutex);
120 ust_perf_saved_cancelstate = oldstate;
121 }
122 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
123 if (ret) {
124 ERR("pthread_sigmask: %s", strerror(ret));
125 }
126 }
127
128 void lttng_perf_unlock(void)
129 {
130 sigset_t sig_all_blocked, orig_mask;
131 int ret, newstate, oldstate;
132 bool restore_cancel = false;
133
134 sigfillset(&sig_all_blocked);
135 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
136 if (ret) {
137 ERR("pthread_sigmask: %s", strerror(ret));
138 }
139 /*
140 * Ensure the compiler don't move the store before the close()
141 * call, in case close() would be marked as leaf.
142 */
143 cmm_barrier();
144 if (!--URCU_TLS(ust_perf_mutex_nest)) {
145 newstate = ust_perf_saved_cancelstate;
146 restore_cancel = true;
147 pthread_mutex_unlock(&ust_perf_mutex);
148 }
149 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
150 if (ret) {
151 ERR("pthread_sigmask: %s", strerror(ret));
152 }
153 if (restore_cancel) {
154 ret = pthread_setcancelstate(newstate, &oldstate);
155 if (ret) {
156 ERR("pthread_setcancelstate: %s", strerror(ret));
157 }
158 }
159 }
160
161 static
162 size_t perf_counter_get_size(void *priv __attribute__((unused)),
163 size_t offset)
164 {
165 size_t size = 0;
166
167 size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
168 size += sizeof(uint64_t);
169 return size;
170 }
171
172 static
173 uint64_t read_perf_counter_syscall(
174 struct lttng_perf_counter_thread_field *thread_field)
175 {
176 uint64_t count;
177
178 if (caa_unlikely(thread_field->fd < 0))
179 return 0;
180
181 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
182 < sizeof(count)))
183 return 0;
184
185 return count;
186 }
187
188 #if defined(LTTNG_UST_ARCH_X86)
189
190 static
191 uint64_t rdpmc(unsigned int counter)
192 {
193 unsigned int low, high;
194
195 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
196
197 return low | ((uint64_t) high) << 32;
198 }
199
200 static
201 bool has_rdpmc(struct perf_event_mmap_page *pc)
202 {
203 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
204 return false;
205 /* Since Linux kernel 3.12. */
206 return pc->cap_user_rdpmc;
207 }
208
209 static
210 uint64_t arch_read_perf_counter(
211 struct lttng_perf_counter_thread_field *thread_field)
212 {
213 uint32_t seq, idx;
214 uint64_t count;
215 struct perf_event_mmap_page *pc = thread_field->pc;
216
217 if (caa_unlikely(!pc))
218 return 0;
219
220 do {
221 seq = CMM_LOAD_SHARED(pc->lock);
222 cmm_barrier();
223
224 idx = pc->index;
225 if (caa_likely(has_rdpmc(pc) && idx)) {
226 int64_t pmcval;
227
228 pmcval = rdpmc(idx - 1);
229 /* Sign-extend the pmc register result. */
230 pmcval <<= 64 - pc->pmc_width;
231 pmcval >>= 64 - pc->pmc_width;
232 count = pc->offset + pmcval;
233 } else {
234 /* Fall-back on system call if rdpmc cannot be used. */
235 return read_perf_counter_syscall(thread_field);
236 }
237 cmm_barrier();
238 } while (CMM_LOAD_SHARED(pc->lock) != seq);
239
240 return count;
241 }
242
243 static
244 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
245 {
246 struct perf_event_mmap_page *pc = thread_field->pc;
247
248 if (!pc)
249 return 0;
250 return !has_rdpmc(pc);
251 }
252
253 #else
254
255 /* Generic (slow) implementation using a read system call. */
256 static
257 uint64_t arch_read_perf_counter(
258 struct lttng_perf_counter_thread_field *thread_field)
259 {
260 return read_perf_counter_syscall(thread_field);
261 }
262
263 static
264 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
265 {
266 return 1;
267 }
268
269 #endif
270
271 static
272 int sys_perf_event_open(struct perf_event_attr *attr,
273 pid_t pid, int cpu, int group_fd,
274 unsigned long flags)
275 {
276 return syscall(SYS_perf_event_open, attr, pid, cpu,
277 group_fd, flags);
278 }
279
280 static
281 int open_perf_fd(struct perf_event_attr *attr)
282 {
283 int fd;
284
285 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
286 if (fd < 0)
287 return -1;
288
289 return fd;
290 }
291
292 static
293 void close_perf_fd(int fd)
294 {
295 int ret;
296
297 if (fd < 0)
298 return;
299
300 ret = close(fd);
301 if (ret) {
302 perror("Error closing LTTng-UST perf memory mapping FD");
303 }
304 }
305
306 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
307 {
308 void *perf_addr;
309
310 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
311 PROT_READ, MAP_SHARED, thread_field->fd, 0);
312 if (perf_addr == MAP_FAILED)
313 perf_addr = NULL;
314 thread_field->pc = perf_addr;
315
316 if (!arch_perf_keep_fd(thread_field)) {
317 close_perf_fd(thread_field->fd);
318 thread_field->fd = -1;
319 }
320 }
321
322 static
323 void unmap_perf_page(struct perf_event_mmap_page *pc)
324 {
325 int ret;
326
327 if (!pc)
328 return;
329 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
330 if (ret < 0) {
331 PERROR("Error in munmap");
332 abort();
333 }
334 }
335
336 static
337 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
338 {
339 struct lttng_perf_counter_thread *perf_thread;
340 sigset_t newmask, oldmask;
341 int ret;
342
343 ret = sigfillset(&newmask);
344 if (ret)
345 abort();
346 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
347 if (ret)
348 abort();
349 /* Check again with signals disabled */
350 perf_thread = pthread_getspecific(perf_counter_key);
351 if (perf_thread)
352 goto skip;
353 perf_thread = zmalloc(sizeof(*perf_thread));
354 if (!perf_thread)
355 abort();
356 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
357 ret = pthread_setspecific(perf_counter_key, perf_thread);
358 if (ret)
359 abort();
360 skip:
361 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
362 if (ret)
363 abort();
364 return perf_thread;
365 }
366
367 static
368 struct lttng_perf_counter_thread_field *
369 add_thread_field(struct lttng_perf_counter_field *perf_field,
370 struct lttng_perf_counter_thread *perf_thread)
371 {
372 struct lttng_perf_counter_thread_field *thread_field;
373 sigset_t newmask, oldmask;
374 int ret;
375
376 ret = sigfillset(&newmask);
377 if (ret)
378 abort();
379 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
380 if (ret)
381 abort();
382 /* Check again with signals disabled */
383 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
384 rcu_field_node) {
385 if (thread_field->field == perf_field)
386 goto skip;
387 }
388 thread_field = zmalloc(sizeof(*thread_field));
389 if (!thread_field)
390 abort();
391 thread_field->field = perf_field;
392 thread_field->fd = open_perf_fd(&perf_field->attr);
393 if (thread_field->fd >= 0)
394 setup_perf(thread_field);
395 /*
396 * Note: thread_field->pc can be NULL if setup_perf() fails.
397 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
398 */
399 lttng_perf_lock();
400 cds_list_add_rcu(&thread_field->rcu_field_node,
401 &perf_thread->rcu_field_list);
402 cds_list_add(&thread_field->thread_field_node,
403 &perf_field->thread_field_list);
404 lttng_perf_unlock();
405 skip:
406 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
407 if (ret)
408 abort();
409 return thread_field;
410 }
411
412 static
413 struct lttng_perf_counter_thread_field *
414 get_thread_field(struct lttng_perf_counter_field *field)
415 {
416 struct lttng_perf_counter_thread *perf_thread;
417 struct lttng_perf_counter_thread_field *thread_field;
418
419 perf_thread = pthread_getspecific(perf_counter_key);
420 if (!perf_thread)
421 perf_thread = alloc_perf_counter_thread();
422 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
423 rcu_field_node) {
424 if (thread_field->field == field)
425 return thread_field;
426 }
427 /* perf_counter_thread_field not found, need to add one */
428 return add_thread_field(field, perf_thread);
429 }
430
431 static
432 uint64_t wrapper_perf_counter_read(void *priv)
433 {
434 struct lttng_perf_counter_field *perf_field;
435 struct lttng_perf_counter_thread_field *perf_thread_field;
436
437 perf_field = (struct lttng_perf_counter_field *) priv;
438 perf_thread_field = get_thread_field(perf_field);
439 return arch_read_perf_counter(perf_thread_field);
440 }
441
442 static
443 void perf_counter_record(void *priv,
444 struct lttng_ust_lib_ring_buffer_ctx *ctx,
445 struct lttng_ust_channel_buffer *chan)
446 {
447 uint64_t value;
448
449 value = wrapper_perf_counter_read(priv);
450 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
451 }
452
453 static
454 void perf_counter_get_value(void *priv,
455 struct lttng_ust_ctx_value *value)
456 {
457 value->u.s64 = wrapper_perf_counter_read(priv);
458 }
459
460 /* Called with perf lock held */
461 static
462 void lttng_destroy_perf_thread_field(
463 struct lttng_perf_counter_thread_field *thread_field)
464 {
465 close_perf_fd(thread_field->fd);
466 unmap_perf_page(thread_field->pc);
467 cds_list_del_rcu(&thread_field->rcu_field_node);
468 cds_list_del(&thread_field->thread_field_node);
469 free(thread_field);
470 }
471
472 static
473 void lttng_destroy_perf_thread_key(void *_key)
474 {
475 struct lttng_perf_counter_thread *perf_thread = _key;
476 struct lttng_perf_counter_thread_field *pos, *p;
477
478 lttng_perf_lock();
479 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
480 rcu_field_node)
481 lttng_destroy_perf_thread_field(pos);
482 lttng_perf_unlock();
483 free(perf_thread);
484 }
485
486 /* Called with UST lock held */
487 static
488 void lttng_destroy_perf_counter_ctx_field(void *priv)
489 {
490 struct lttng_perf_counter_field *perf_field;
491 struct lttng_perf_counter_thread_field *pos, *p;
492
493 perf_field = (struct lttng_perf_counter_field *) priv;
494 free(perf_field->name);
495 /*
496 * This put is performed when no threads can concurrently
497 * perform a "get" concurrently, thanks to urcu-bp grace
498 * period. Holding the lttng perf lock protects against
499 * concurrent modification of the per-thread thread field
500 * list.
501 */
502 lttng_perf_lock();
503 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
504 thread_field_node)
505 lttng_destroy_perf_thread_field(pos);
506 lttng_perf_unlock();
507 free(perf_field);
508 }
509
510 #ifdef LTTNG_UST_ARCH_ARMV7
511
512 static
513 int perf_get_exclude_kernel(void)
514 {
515 return 0;
516 }
517
518 #else /* LTTNG_UST_ARCH_ARMV7 */
519
520 static
521 int perf_get_exclude_kernel(void)
522 {
523 return 1;
524 }
525
526 #endif /* LTTNG_UST_ARCH_ARMV7 */
527
528 static const struct lttng_ust_type_common *ust_type =
529 lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
530 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
531 lttng_ust_is_signed_type(uint64_t),
532 BYTE_ORDER, 10);
533
534 /* Called with UST lock held */
535 int lttng_add_perf_counter_to_ctx(uint32_t type,
536 uint64_t config,
537 const char *name,
538 struct lttng_ust_ctx **ctx)
539 {
540 struct lttng_ust_ctx_field ctx_field;
541 struct lttng_ust_event_field *event_field;
542 struct lttng_perf_counter_field *perf_field;
543 char *name_alloc;
544 int ret;
545
546 if (lttng_find_context(*ctx, name)) {
547 ret = -EEXIST;
548 goto find_error;
549 }
550 name_alloc = strdup(name);
551 if (!name_alloc) {
552 ret = -ENOMEM;
553 goto name_alloc_error;
554 }
555 event_field = zmalloc(sizeof(*event_field));
556 if (!event_field) {
557 ret = -ENOMEM;
558 goto event_field_alloc_error;
559 }
560 event_field->name = name_alloc;
561 event_field->type = ust_type;
562
563 perf_field = zmalloc(sizeof(*perf_field));
564 if (!perf_field) {
565 ret = -ENOMEM;
566 goto perf_field_alloc_error;
567 }
568 perf_field->attr.type = type;
569 perf_field->attr.config = config;
570 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
571 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
572 perf_field->name = name_alloc;
573
574 /* Ensure that this perf counter can be used in this process. */
575 ret = open_perf_fd(&perf_field->attr);
576 if (ret < 0) {
577 ret = -ENODEV;
578 goto setup_error;
579 }
580 close_perf_fd(ret);
581
582 ctx_field.event_field = event_field;
583 ctx_field.get_size = perf_counter_get_size;
584 ctx_field.record = perf_counter_record;
585 ctx_field.get_value = perf_counter_get_value;
586 ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
587 ctx_field.priv = perf_field;
588
589 ret = lttng_ust_context_append(ctx, &ctx_field);
590 if (ret) {
591 ret = -ENOMEM;
592 goto append_context_error;
593 }
594 return 0;
595
596 append_context_error:
597 setup_error:
598 free(perf_field);
599 perf_field_alloc_error:
600 free(event_field);
601 event_field_alloc_error:
602 free(name_alloc);
603 name_alloc_error:
604 find_error:
605 return ret;
606 }
607
608 int lttng_perf_counter_init(void)
609 {
610 int ret;
611
612 ret = pthread_key_create(&perf_counter_key,
613 lttng_destroy_perf_thread_key);
614 if (ret)
615 ret = -ret;
616 return ret;
617 }
618
619 void lttng_perf_counter_exit(void)
620 {
621 int ret;
622
623 ret = pthread_key_delete(perf_counter_key);
624 if (ret) {
625 errno = ret;
626 PERROR("Error in pthread_key_delete");
627 }
628 }
This page took 0.051392 seconds and 4 git commands to generate.