Refactoring: hide internal fields of ring buffer context
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST performance monitoring counters (perf-counters) integration.
7 */
8
9 #define _LGPL_SOURCE
10 #include <limits.h>
11 #include <sys/types.h>
12 #include <unistd.h>
13 #include <string.h>
14 #include <stdlib.h>
15 #include <stdio.h>
16 #include <stdbool.h>
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <sys/mman.h>
20 #include <sys/syscall.h>
21 #include <lttng/ust-arch.h>
22 #include <lttng/ust-events.h>
23 #include <lttng/ust-tracer.h>
24 #include <lttng/ringbuffer-context.h>
25 #include <urcu/system.h>
26 #include <urcu/arch.h>
27 #include <urcu/rculist.h>
28 #include <ust-helper.h>
29 #include <urcu/ref.h>
30 #include <usterr-signal-safe.h>
31 #include <signal.h>
32 #include <urcu/tls-compat.h>
33 #include "perf_event.h"
34
35 #include "context-internal.h"
36 #include "lttng-tracer-core.h"
37 #include "ust-events-internal.h"
38
39 /*
40 * We use a global perf counter key and iterate on per-thread RCU lists
41 * of fields in the fast path, even though this is not strictly speaking
42 * what would provide the best fast-path complexity, to ensure teardown
43 * of sessions vs thread exit is handled racelessly.
44 *
45 * Updates and traversals of thread_list are protected by UST lock.
46 * Updates to rcu_field_list are protected by UST lock.
47 */
48
49 struct lttng_perf_counter_thread_field {
50 struct lttng_perf_counter_field *field; /* Back reference */
51 struct perf_event_mmap_page *pc;
52 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
53 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
54 int fd; /* Perf FD */
55 };
56
57 struct lttng_perf_counter_thread {
58 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
59 };
60
61 struct lttng_perf_counter_field {
62 struct perf_event_attr attr;
63 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
64 };
65
66 static pthread_key_t perf_counter_key;
67
68 /*
69 * lttng_perf_lock - Protect lttng-ust perf counter data structures
70 *
71 * Nests within the ust_lock, and therefore within the libc dl lock.
72 * Therefore, we need to fixup the TLS before nesting into this lock.
73 * Nests inside RCU bp read-side lock. Protects against concurrent
74 * fork.
75 */
76 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
77
78 /*
79 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
80 * restored on unlock. Protected by ust_perf_mutex.
81 */
82 static int ust_perf_saved_cancelstate;
83
84 /*
85 * Track whether we are tracing from a signal handler nested on an
86 * application thread.
87 */
88 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
89
90 /*
91 * Force a read (imply TLS fixup for dlopen) of TLS variables.
92 */
93 void lttng_ust_fixup_perf_counter_tls(void)
94 {
95 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
96 }
97
98 void lttng_perf_lock(void)
99 {
100 sigset_t sig_all_blocked, orig_mask;
101 int ret, oldstate;
102
103 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
104 if (ret) {
105 ERR("pthread_setcancelstate: %s", strerror(ret));
106 }
107 sigfillset(&sig_all_blocked);
108 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
109 if (ret) {
110 ERR("pthread_sigmask: %s", strerror(ret));
111 }
112 if (!URCU_TLS(ust_perf_mutex_nest)++) {
113 /*
114 * Ensure the compiler don't move the store after the close()
115 * call in case close() would be marked as leaf.
116 */
117 cmm_barrier();
118 pthread_mutex_lock(&ust_perf_mutex);
119 ust_perf_saved_cancelstate = oldstate;
120 }
121 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
122 if (ret) {
123 ERR("pthread_sigmask: %s", strerror(ret));
124 }
125 }
126
127 void lttng_perf_unlock(void)
128 {
129 sigset_t sig_all_blocked, orig_mask;
130 int ret, newstate, oldstate;
131 bool restore_cancel = false;
132
133 sigfillset(&sig_all_blocked);
134 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
135 if (ret) {
136 ERR("pthread_sigmask: %s", strerror(ret));
137 }
138 /*
139 * Ensure the compiler don't move the store before the close()
140 * call, in case close() would be marked as leaf.
141 */
142 cmm_barrier();
143 if (!--URCU_TLS(ust_perf_mutex_nest)) {
144 newstate = ust_perf_saved_cancelstate;
145 restore_cancel = true;
146 pthread_mutex_unlock(&ust_perf_mutex);
147 }
148 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
149 if (ret) {
150 ERR("pthread_sigmask: %s", strerror(ret));
151 }
152 if (restore_cancel) {
153 ret = pthread_setcancelstate(newstate, &oldstate);
154 if (ret) {
155 ERR("pthread_setcancelstate: %s", strerror(ret));
156 }
157 }
158 }
159
160 static
161 size_t perf_counter_get_size(struct lttng_ust_ctx_field *field, size_t offset)
162 {
163 size_t size = 0;
164
165 size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
166 size += sizeof(uint64_t);
167 return size;
168 }
169
170 static
171 uint64_t read_perf_counter_syscall(
172 struct lttng_perf_counter_thread_field *thread_field)
173 {
174 uint64_t count;
175
176 if (caa_unlikely(thread_field->fd < 0))
177 return 0;
178
179 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
180 < sizeof(count)))
181 return 0;
182
183 return count;
184 }
185
186 #if defined(LTTNG_UST_ARCH_X86)
187
188 static
189 uint64_t rdpmc(unsigned int counter)
190 {
191 unsigned int low, high;
192
193 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
194
195 return low | ((uint64_t) high) << 32;
196 }
197
198 static
199 bool has_rdpmc(struct perf_event_mmap_page *pc)
200 {
201 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
202 return false;
203 /* Since Linux kernel 3.12. */
204 return pc->cap_user_rdpmc;
205 }
206
207 static
208 uint64_t arch_read_perf_counter(
209 struct lttng_perf_counter_thread_field *thread_field)
210 {
211 uint32_t seq, idx;
212 uint64_t count;
213 struct perf_event_mmap_page *pc = thread_field->pc;
214
215 if (caa_unlikely(!pc))
216 return 0;
217
218 do {
219 seq = CMM_LOAD_SHARED(pc->lock);
220 cmm_barrier();
221
222 idx = pc->index;
223 if (caa_likely(has_rdpmc(pc) && idx)) {
224 int64_t pmcval;
225
226 pmcval = rdpmc(idx - 1);
227 /* Sign-extend the pmc register result. */
228 pmcval <<= 64 - pc->pmc_width;
229 pmcval >>= 64 - pc->pmc_width;
230 count = pc->offset + pmcval;
231 } else {
232 /* Fall-back on system call if rdpmc cannot be used. */
233 return read_perf_counter_syscall(thread_field);
234 }
235 cmm_barrier();
236 } while (CMM_LOAD_SHARED(pc->lock) != seq);
237
238 return count;
239 }
240
241 static
242 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
243 {
244 struct perf_event_mmap_page *pc = thread_field->pc;
245
246 if (!pc)
247 return 0;
248 return !has_rdpmc(pc);
249 }
250
251 #else
252
253 /* Generic (slow) implementation using a read system call. */
254 static
255 uint64_t arch_read_perf_counter(
256 struct lttng_perf_counter_thread_field *thread_field)
257 {
258 return read_perf_counter_syscall(thread_field);
259 }
260
261 static
262 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
263 {
264 return 1;
265 }
266
267 #endif
268
269 static
270 int sys_perf_event_open(struct perf_event_attr *attr,
271 pid_t pid, int cpu, int group_fd,
272 unsigned long flags)
273 {
274 return syscall(SYS_perf_event_open, attr, pid, cpu,
275 group_fd, flags);
276 }
277
278 static
279 int open_perf_fd(struct perf_event_attr *attr)
280 {
281 int fd;
282
283 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
284 if (fd < 0)
285 return -1;
286
287 return fd;
288 }
289
290 static
291 void close_perf_fd(int fd)
292 {
293 int ret;
294
295 if (fd < 0)
296 return;
297
298 ret = close(fd);
299 if (ret) {
300 perror("Error closing LTTng-UST perf memory mapping FD");
301 }
302 }
303
304 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
305 {
306 void *perf_addr;
307
308 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
309 PROT_READ, MAP_SHARED, thread_field->fd, 0);
310 if (perf_addr == MAP_FAILED)
311 perf_addr = NULL;
312 thread_field->pc = perf_addr;
313
314 if (!arch_perf_keep_fd(thread_field)) {
315 close_perf_fd(thread_field->fd);
316 thread_field->fd = -1;
317 }
318 }
319
320 static
321 void unmap_perf_page(struct perf_event_mmap_page *pc)
322 {
323 int ret;
324
325 if (!pc)
326 return;
327 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
328 if (ret < 0) {
329 PERROR("Error in munmap");
330 abort();
331 }
332 }
333
334 static
335 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
336 {
337 struct lttng_perf_counter_thread *perf_thread;
338 sigset_t newmask, oldmask;
339 int ret;
340
341 ret = sigfillset(&newmask);
342 if (ret)
343 abort();
344 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
345 if (ret)
346 abort();
347 /* Check again with signals disabled */
348 perf_thread = pthread_getspecific(perf_counter_key);
349 if (perf_thread)
350 goto skip;
351 perf_thread = zmalloc(sizeof(*perf_thread));
352 if (!perf_thread)
353 abort();
354 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
355 ret = pthread_setspecific(perf_counter_key, perf_thread);
356 if (ret)
357 abort();
358 skip:
359 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
360 if (ret)
361 abort();
362 return perf_thread;
363 }
364
365 static
366 struct lttng_perf_counter_thread_field *
367 add_thread_field(struct lttng_perf_counter_field *perf_field,
368 struct lttng_perf_counter_thread *perf_thread)
369 {
370 struct lttng_perf_counter_thread_field *thread_field;
371 sigset_t newmask, oldmask;
372 int ret;
373
374 ret = sigfillset(&newmask);
375 if (ret)
376 abort();
377 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
378 if (ret)
379 abort();
380 /* Check again with signals disabled */
381 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
382 rcu_field_node) {
383 if (thread_field->field == perf_field)
384 goto skip;
385 }
386 thread_field = zmalloc(sizeof(*thread_field));
387 if (!thread_field)
388 abort();
389 thread_field->field = perf_field;
390 thread_field->fd = open_perf_fd(&perf_field->attr);
391 if (thread_field->fd >= 0)
392 setup_perf(thread_field);
393 /*
394 * Note: thread_field->pc can be NULL if setup_perf() fails.
395 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
396 */
397 lttng_perf_lock();
398 cds_list_add_rcu(&thread_field->rcu_field_node,
399 &perf_thread->rcu_field_list);
400 cds_list_add(&thread_field->thread_field_node,
401 &perf_field->thread_field_list);
402 lttng_perf_unlock();
403 skip:
404 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
405 if (ret)
406 abort();
407 return thread_field;
408 }
409
410 static
411 struct lttng_perf_counter_thread_field *
412 get_thread_field(struct lttng_perf_counter_field *field)
413 {
414 struct lttng_perf_counter_thread *perf_thread;
415 struct lttng_perf_counter_thread_field *thread_field;
416
417 perf_thread = pthread_getspecific(perf_counter_key);
418 if (!perf_thread)
419 perf_thread = alloc_perf_counter_thread();
420 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
421 rcu_field_node) {
422 if (thread_field->field == field)
423 return thread_field;
424 }
425 /* perf_counter_thread_field not found, need to add one */
426 return add_thread_field(field, perf_thread);
427 }
428
429 static
430 uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field)
431 {
432 struct lttng_perf_counter_field *perf_field;
433 struct lttng_perf_counter_thread_field *perf_thread_field;
434
435 perf_field = (struct lttng_perf_counter_field *) field->priv;
436 perf_thread_field = get_thread_field(perf_field);
437 return arch_read_perf_counter(perf_thread_field);
438 }
439
440 static
441 void perf_counter_record(struct lttng_ust_ctx_field *field,
442 struct lttng_ust_lib_ring_buffer_ctx *ctx,
443 struct lttng_ust_channel_buffer *chan)
444 {
445 uint64_t value;
446
447 value = wrapper_perf_counter_read(field);
448 chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
449 }
450
451 static
452 void perf_counter_get_value(struct lttng_ust_ctx_field *field,
453 struct lttng_ust_ctx_value *value)
454 {
455 value->u.s64 = wrapper_perf_counter_read(field);
456 }
457
458 /* Called with perf lock held */
459 static
460 void lttng_destroy_perf_thread_field(
461 struct lttng_perf_counter_thread_field *thread_field)
462 {
463 close_perf_fd(thread_field->fd);
464 unmap_perf_page(thread_field->pc);
465 cds_list_del_rcu(&thread_field->rcu_field_node);
466 cds_list_del(&thread_field->thread_field_node);
467 free(thread_field);
468 }
469
470 static
471 void lttng_destroy_perf_thread_key(void *_key)
472 {
473 struct lttng_perf_counter_thread *perf_thread = _key;
474 struct lttng_perf_counter_thread_field *pos, *p;
475
476 lttng_perf_lock();
477 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
478 rcu_field_node)
479 lttng_destroy_perf_thread_field(pos);
480 lttng_perf_unlock();
481 free(perf_thread);
482 }
483
484 /* Called with UST lock held */
485 static
486 void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field)
487 {
488 struct lttng_perf_counter_field *perf_field;
489 struct lttng_perf_counter_thread_field *pos, *p;
490
491 free((char *) field->event_field->name);
492 perf_field = (struct lttng_perf_counter_field *) field->priv;
493 /*
494 * This put is performed when no threads can concurrently
495 * perform a "get" concurrently, thanks to urcu-bp grace
496 * period. Holding the lttng perf lock protects against
497 * concurrent modification of the per-thread thread field
498 * list.
499 */
500 lttng_perf_lock();
501 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
502 thread_field_node)
503 lttng_destroy_perf_thread_field(pos);
504 lttng_perf_unlock();
505 free(perf_field);
506 }
507
508 #ifdef LTTNG_UST_ARCH_ARMV7
509
510 static
511 int perf_get_exclude_kernel(void)
512 {
513 return 0;
514 }
515
516 #else /* LTTNG_UST_ARCH_ARMV7 */
517
518 static
519 int perf_get_exclude_kernel(void)
520 {
521 return 1;
522 }
523
524 #endif /* LTTNG_UST_ARCH_ARMV7 */
525
526 /* Called with UST lock held */
527 int lttng_add_perf_counter_to_ctx(uint32_t type,
528 uint64_t config,
529 const char *name,
530 struct lttng_ust_ctx **ctx)
531 {
532 struct lttng_ust_ctx_field *field;
533 struct lttng_ust_type_common *ust_type;
534 struct lttng_perf_counter_field *perf_field;
535 char *name_alloc;
536 int ret;
537
538 name_alloc = strdup(name);
539 if (!name_alloc) {
540 ret = -ENOMEM;
541 goto name_alloc_error;
542 }
543 perf_field = zmalloc(sizeof(*perf_field));
544 if (!perf_field) {
545 ret = -ENOMEM;
546 goto perf_field_alloc_error;
547 }
548 ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT,
549 lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
550 lttng_ust_is_signed_type(uint64_t),
551 BYTE_ORDER, 10);
552 if (!ust_type) {
553 ret = -ENOMEM;
554 goto type_alloc_error;
555 }
556 field = lttng_append_context(ctx);
557 if (!field) {
558 ret = -ENOMEM;
559 goto append_context_error;
560 }
561 if (lttng_find_context(*ctx, name_alloc)) {
562 ret = -EEXIST;
563 goto find_error;
564 }
565
566 field->destroy = lttng_destroy_perf_counter_field;
567
568 field->event_field->name = name_alloc;
569 field->event_field->type = ust_type;
570 field->get_size = perf_counter_get_size;
571 field->record = perf_counter_record;
572 field->get_value = perf_counter_get_value;
573
574 perf_field->attr.type = type;
575 perf_field->attr.config = config;
576 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
577 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
578 field->priv = perf_field;
579
580 /* Ensure that this perf counter can be used in this process. */
581 ret = open_perf_fd(&perf_field->attr);
582 if (ret < 0) {
583 ret = -ENODEV;
584 goto setup_error;
585 }
586 close_perf_fd(ret);
587
588 /*
589 * Contexts can only be added before tracing is started, so we
590 * don't have to synchronize against concurrent threads using
591 * the field here.
592 */
593
594 lttng_context_update(*ctx);
595 return 0;
596
597 setup_error:
598 find_error:
599 lttng_remove_context_field(ctx, field);
600 append_context_error:
601 lttng_ust_destroy_type(ust_type);
602 type_alloc_error:
603 free(perf_field);
604 perf_field_alloc_error:
605 free(name_alloc);
606 name_alloc_error:
607 return ret;
608 }
609
610 int lttng_perf_counter_init(void)
611 {
612 int ret;
613
614 ret = pthread_key_create(&perf_counter_key,
615 lttng_destroy_perf_thread_key);
616 if (ret)
617 ret = -ret;
618 return ret;
619 }
620
621 void lttng_perf_counter_exit(void)
622 {
623 int ret;
624
625 ret = pthread_key_delete(perf_counter_key);
626 if (ret) {
627 errno = ret;
628 PERROR("Error in pthread_key_delete");
629 }
630 }
This page took 0.044026 seconds and 5 git commands to generate.