Refactoring: Privatize ring buffer config header
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST performance monitoring counters (perf-counters) integration.
7 */
8
9 #define _LGPL_SOURCE
10 #include <sys/types.h>
11 #include <unistd.h>
12 #include <string.h>
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <stdbool.h>
16 #include <stddef.h>
17 #include <stdint.h>
18 #include <sys/mman.h>
19 #include <sys/syscall.h>
20 #include <lttng/ust-events.h>
21 #include <lttng/ust-tracer.h>
22 #include <lttng/ringbuffer-context.h>
23 #include <urcu/system.h>
24 #include <urcu/arch.h>
25 #include <urcu/rculist.h>
26 #include <ust-helper.h>
27 #include <urcu/ref.h>
28 #include <usterr-signal-safe.h>
29 #include <signal.h>
30 #include <urcu/tls-compat.h>
31 #include "perf_event.h"
32
33 #include "context-internal.h"
34 #include "lttng-tracer-core.h"
35 #include "ust-events-internal.h"
36
37 /*
38 * We use a global perf counter key and iterate on per-thread RCU lists
39 * of fields in the fast path, even though this is not strictly speaking
40 * what would provide the best fast-path complexity, to ensure teardown
41 * of sessions vs thread exit is handled racelessly.
42 *
43 * Updates and traversals of thread_list are protected by UST lock.
44 * Updates to rcu_field_list are protected by UST lock.
45 */
46
47 struct lttng_perf_counter_thread_field {
48 struct lttng_perf_counter_field *field; /* Back reference */
49 struct perf_event_mmap_page *pc;
50 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
51 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
52 int fd; /* Perf FD */
53 };
54
55 struct lttng_perf_counter_thread {
56 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
57 };
58
59 struct lttng_perf_counter_field {
60 struct perf_event_attr attr;
61 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
62 };
63
64 static pthread_key_t perf_counter_key;
65
66 /*
67 * lttng_perf_lock - Protect lttng-ust perf counter data structures
68 *
69 * Nests within the ust_lock, and therefore within the libc dl lock.
70 * Therefore, we need to fixup the TLS before nesting into this lock.
71 * Nests inside RCU bp read-side lock. Protects against concurrent
72 * fork.
73 */
74 static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
75
76 /*
77 * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
78 * restored on unlock. Protected by ust_perf_mutex.
79 */
80 static int ust_perf_saved_cancelstate;
81
82 /*
83 * Track whether we are tracing from a signal handler nested on an
84 * application thread.
85 */
86 static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
87
88 /*
89 * Force a read (imply TLS fixup for dlopen) of TLS variables.
90 */
91 void lttng_ust_fixup_perf_counter_tls(void)
92 {
93 asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
94 }
95
96 void lttng_perf_lock(void)
97 {
98 sigset_t sig_all_blocked, orig_mask;
99 int ret, oldstate;
100
101 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
102 if (ret) {
103 ERR("pthread_setcancelstate: %s", strerror(ret));
104 }
105 sigfillset(&sig_all_blocked);
106 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
107 if (ret) {
108 ERR("pthread_sigmask: %s", strerror(ret));
109 }
110 if (!URCU_TLS(ust_perf_mutex_nest)++) {
111 /*
112 * Ensure the compiler don't move the store after the close()
113 * call in case close() would be marked as leaf.
114 */
115 cmm_barrier();
116 pthread_mutex_lock(&ust_perf_mutex);
117 ust_perf_saved_cancelstate = oldstate;
118 }
119 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
120 if (ret) {
121 ERR("pthread_sigmask: %s", strerror(ret));
122 }
123 }
124
125 void lttng_perf_unlock(void)
126 {
127 sigset_t sig_all_blocked, orig_mask;
128 int ret, newstate, oldstate;
129 bool restore_cancel = false;
130
131 sigfillset(&sig_all_blocked);
132 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
133 if (ret) {
134 ERR("pthread_sigmask: %s", strerror(ret));
135 }
136 /*
137 * Ensure the compiler don't move the store before the close()
138 * call, in case close() would be marked as leaf.
139 */
140 cmm_barrier();
141 if (!--URCU_TLS(ust_perf_mutex_nest)) {
142 newstate = ust_perf_saved_cancelstate;
143 restore_cancel = true;
144 pthread_mutex_unlock(&ust_perf_mutex);
145 }
146 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
147 if (ret) {
148 ERR("pthread_sigmask: %s", strerror(ret));
149 }
150 if (restore_cancel) {
151 ret = pthread_setcancelstate(newstate, &oldstate);
152 if (ret) {
153 ERR("pthread_setcancelstate: %s", strerror(ret));
154 }
155 }
156 }
157
158 static
159 size_t perf_counter_get_size(struct lttng_ust_ctx_field *field, size_t offset)
160 {
161 size_t size = 0;
162
163 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
164 size += sizeof(uint64_t);
165 return size;
166 }
167
168 static
169 uint64_t read_perf_counter_syscall(
170 struct lttng_perf_counter_thread_field *thread_field)
171 {
172 uint64_t count;
173
174 if (caa_unlikely(thread_field->fd < 0))
175 return 0;
176
177 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
178 < sizeof(count)))
179 return 0;
180
181 return count;
182 }
183
184 #if defined(__x86_64__) || defined(__i386__)
185
186 static
187 uint64_t rdpmc(unsigned int counter)
188 {
189 unsigned int low, high;
190
191 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
192
193 return low | ((uint64_t) high) << 32;
194 }
195
196 static
197 bool has_rdpmc(struct perf_event_mmap_page *pc)
198 {
199 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
200 return false;
201 /* Since Linux kernel 3.12. */
202 return pc->cap_user_rdpmc;
203 }
204
205 static
206 uint64_t arch_read_perf_counter(
207 struct lttng_perf_counter_thread_field *thread_field)
208 {
209 uint32_t seq, idx;
210 uint64_t count;
211 struct perf_event_mmap_page *pc = thread_field->pc;
212
213 if (caa_unlikely(!pc))
214 return 0;
215
216 do {
217 seq = CMM_LOAD_SHARED(pc->lock);
218 cmm_barrier();
219
220 idx = pc->index;
221 if (caa_likely(has_rdpmc(pc) && idx)) {
222 int64_t pmcval;
223
224 pmcval = rdpmc(idx - 1);
225 /* Sign-extend the pmc register result. */
226 pmcval <<= 64 - pc->pmc_width;
227 pmcval >>= 64 - pc->pmc_width;
228 count = pc->offset + pmcval;
229 } else {
230 /* Fall-back on system call if rdpmc cannot be used. */
231 return read_perf_counter_syscall(thread_field);
232 }
233 cmm_barrier();
234 } while (CMM_LOAD_SHARED(pc->lock) != seq);
235
236 return count;
237 }
238
239 static
240 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
241 {
242 struct perf_event_mmap_page *pc = thread_field->pc;
243
244 if (!pc)
245 return 0;
246 return !has_rdpmc(pc);
247 }
248
249 #else
250
251 /* Generic (slow) implementation using a read system call. */
252 static
253 uint64_t arch_read_perf_counter(
254 struct lttng_perf_counter_thread_field *thread_field)
255 {
256 return read_perf_counter_syscall(thread_field);
257 }
258
259 static
260 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
261 {
262 return 1;
263 }
264
265 #endif
266
267 static
268 int sys_perf_event_open(struct perf_event_attr *attr,
269 pid_t pid, int cpu, int group_fd,
270 unsigned long flags)
271 {
272 return syscall(SYS_perf_event_open, attr, pid, cpu,
273 group_fd, flags);
274 }
275
276 static
277 int open_perf_fd(struct perf_event_attr *attr)
278 {
279 int fd;
280
281 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
282 if (fd < 0)
283 return -1;
284
285 return fd;
286 }
287
288 static
289 void close_perf_fd(int fd)
290 {
291 int ret;
292
293 if (fd < 0)
294 return;
295
296 ret = close(fd);
297 if (ret) {
298 perror("Error closing LTTng-UST perf memory mapping FD");
299 }
300 }
301
302 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
303 {
304 void *perf_addr;
305
306 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
307 PROT_READ, MAP_SHARED, thread_field->fd, 0);
308 if (perf_addr == MAP_FAILED)
309 perf_addr = NULL;
310 thread_field->pc = perf_addr;
311
312 if (!arch_perf_keep_fd(thread_field)) {
313 close_perf_fd(thread_field->fd);
314 thread_field->fd = -1;
315 }
316 }
317
318 static
319 void unmap_perf_page(struct perf_event_mmap_page *pc)
320 {
321 int ret;
322
323 if (!pc)
324 return;
325 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
326 if (ret < 0) {
327 PERROR("Error in munmap");
328 abort();
329 }
330 }
331
332 static
333 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
334 {
335 struct lttng_perf_counter_thread *perf_thread;
336 sigset_t newmask, oldmask;
337 int ret;
338
339 ret = sigfillset(&newmask);
340 if (ret)
341 abort();
342 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
343 if (ret)
344 abort();
345 /* Check again with signals disabled */
346 perf_thread = pthread_getspecific(perf_counter_key);
347 if (perf_thread)
348 goto skip;
349 perf_thread = zmalloc(sizeof(*perf_thread));
350 if (!perf_thread)
351 abort();
352 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
353 ret = pthread_setspecific(perf_counter_key, perf_thread);
354 if (ret)
355 abort();
356 skip:
357 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
358 if (ret)
359 abort();
360 return perf_thread;
361 }
362
363 static
364 struct lttng_perf_counter_thread_field *
365 add_thread_field(struct lttng_perf_counter_field *perf_field,
366 struct lttng_perf_counter_thread *perf_thread)
367 {
368 struct lttng_perf_counter_thread_field *thread_field;
369 sigset_t newmask, oldmask;
370 int ret;
371
372 ret = sigfillset(&newmask);
373 if (ret)
374 abort();
375 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
376 if (ret)
377 abort();
378 /* Check again with signals disabled */
379 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
380 rcu_field_node) {
381 if (thread_field->field == perf_field)
382 goto skip;
383 }
384 thread_field = zmalloc(sizeof(*thread_field));
385 if (!thread_field)
386 abort();
387 thread_field->field = perf_field;
388 thread_field->fd = open_perf_fd(&perf_field->attr);
389 if (thread_field->fd >= 0)
390 setup_perf(thread_field);
391 /*
392 * Note: thread_field->pc can be NULL if setup_perf() fails.
393 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
394 */
395 lttng_perf_lock();
396 cds_list_add_rcu(&thread_field->rcu_field_node,
397 &perf_thread->rcu_field_list);
398 cds_list_add(&thread_field->thread_field_node,
399 &perf_field->thread_field_list);
400 lttng_perf_unlock();
401 skip:
402 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
403 if (ret)
404 abort();
405 return thread_field;
406 }
407
408 static
409 struct lttng_perf_counter_thread_field *
410 get_thread_field(struct lttng_perf_counter_field *field)
411 {
412 struct lttng_perf_counter_thread *perf_thread;
413 struct lttng_perf_counter_thread_field *thread_field;
414
415 perf_thread = pthread_getspecific(perf_counter_key);
416 if (!perf_thread)
417 perf_thread = alloc_perf_counter_thread();
418 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
419 rcu_field_node) {
420 if (thread_field->field == field)
421 return thread_field;
422 }
423 /* perf_counter_thread_field not found, need to add one */
424 return add_thread_field(field, perf_thread);
425 }
426
427 static
428 uint64_t wrapper_perf_counter_read(struct lttng_ust_ctx_field *field)
429 {
430 struct lttng_perf_counter_field *perf_field;
431 struct lttng_perf_counter_thread_field *perf_thread_field;
432
433 perf_field = (struct lttng_perf_counter_field *) field->priv;
434 perf_thread_field = get_thread_field(perf_field);
435 return arch_read_perf_counter(perf_thread_field);
436 }
437
438 static
439 void perf_counter_record(struct lttng_ust_ctx_field *field,
440 struct lttng_ust_lib_ring_buffer_ctx *ctx,
441 struct lttng_channel *chan)
442 {
443 uint64_t value;
444
445 value = wrapper_perf_counter_read(field);
446 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
447 chan->ops->event_write(ctx, &value, sizeof(value));
448 }
449
450 static
451 void perf_counter_get_value(struct lttng_ust_ctx_field *field,
452 struct lttng_ust_ctx_value *value)
453 {
454 value->u.s64 = wrapper_perf_counter_read(field);
455 }
456
457 /* Called with perf lock held */
458 static
459 void lttng_destroy_perf_thread_field(
460 struct lttng_perf_counter_thread_field *thread_field)
461 {
462 close_perf_fd(thread_field->fd);
463 unmap_perf_page(thread_field->pc);
464 cds_list_del_rcu(&thread_field->rcu_field_node);
465 cds_list_del(&thread_field->thread_field_node);
466 free(thread_field);
467 }
468
469 static
470 void lttng_destroy_perf_thread_key(void *_key)
471 {
472 struct lttng_perf_counter_thread *perf_thread = _key;
473 struct lttng_perf_counter_thread_field *pos, *p;
474
475 lttng_perf_lock();
476 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
477 rcu_field_node)
478 lttng_destroy_perf_thread_field(pos);
479 lttng_perf_unlock();
480 free(perf_thread);
481 }
482
483 /* Called with UST lock held */
484 static
485 void lttng_destroy_perf_counter_field(struct lttng_ust_ctx_field *field)
486 {
487 struct lttng_perf_counter_field *perf_field;
488 struct lttng_perf_counter_thread_field *pos, *p;
489
490 free((char *) field->event_field->name);
491 perf_field = (struct lttng_perf_counter_field *) field->priv;
492 /*
493 * This put is performed when no threads can concurrently
494 * perform a "get" concurrently, thanks to urcu-bp grace
495 * period. Holding the lttng perf lock protects against
496 * concurrent modification of the per-thread thread field
497 * list.
498 */
499 lttng_perf_lock();
500 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
501 thread_field_node)
502 lttng_destroy_perf_thread_field(pos);
503 lttng_perf_unlock();
504 free(perf_field);
505 }
506
507 #ifdef __ARM_ARCH_7A__
508
509 static
510 int perf_get_exclude_kernel(void)
511 {
512 return 0;
513 }
514
515 #else /* __ARM_ARCH_7A__ */
516
517 static
518 int perf_get_exclude_kernel(void)
519 {
520 return 1;
521 }
522
523 #endif /* __ARM_ARCH_7A__ */
524
525 /* Called with UST lock held */
526 int lttng_add_perf_counter_to_ctx(uint32_t type,
527 uint64_t config,
528 const char *name,
529 struct lttng_ust_ctx **ctx)
530 {
531 struct lttng_ust_ctx_field *field;
532 struct lttng_ust_type_common *ust_type;
533 struct lttng_perf_counter_field *perf_field;
534 char *name_alloc;
535 int ret;
536
537 name_alloc = strdup(name);
538 if (!name_alloc) {
539 ret = -ENOMEM;
540 goto name_alloc_error;
541 }
542 perf_field = zmalloc(sizeof(*perf_field));
543 if (!perf_field) {
544 ret = -ENOMEM;
545 goto perf_field_alloc_error;
546 }
547 ust_type = lttng_ust_create_type_integer(sizeof(uint64_t) * CHAR_BIT,
548 lttng_alignof(uint64_t) * CHAR_BIT,
549 lttng_is_signed_type(uint64_t),
550 BYTE_ORDER, 10);
551 if (!type) {
552 ret = -ENOMEM;
553 goto type_alloc_error;
554 }
555 field = lttng_append_context(ctx);
556 if (!field) {
557 ret = -ENOMEM;
558 goto append_context_error;
559 }
560 if (lttng_find_context(*ctx, name_alloc)) {
561 ret = -EEXIST;
562 goto find_error;
563 }
564
565 field->destroy = lttng_destroy_perf_counter_field;
566
567 field->event_field->name = name_alloc;
568 field->event_field->type = ust_type;
569 field->get_size = perf_counter_get_size;
570 field->record = perf_counter_record;
571 field->get_value = perf_counter_get_value;
572
573 perf_field->attr.type = type;
574 perf_field->attr.config = config;
575 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
576 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
577 field->priv = perf_field;
578
579 /* Ensure that this perf counter can be used in this process. */
580 ret = open_perf_fd(&perf_field->attr);
581 if (ret < 0) {
582 ret = -ENODEV;
583 goto setup_error;
584 }
585 close_perf_fd(ret);
586
587 /*
588 * Contexts can only be added before tracing is started, so we
589 * don't have to synchronize against concurrent threads using
590 * the field here.
591 */
592
593 lttng_context_update(*ctx);
594 return 0;
595
596 setup_error:
597 find_error:
598 lttng_remove_context_field(ctx, field);
599 append_context_error:
600 lttng_ust_destroy_type(ust_type);
601 type_alloc_error:
602 free(perf_field);
603 perf_field_alloc_error:
604 free(name_alloc);
605 name_alloc_error:
606 return ret;
607 }
608
609 int lttng_perf_counter_init(void)
610 {
611 int ret;
612
613 ret = pthread_key_create(&perf_counter_key,
614 lttng_destroy_perf_thread_key);
615 if (ret)
616 ret = -ret;
617 return ret;
618 }
619
620 void lttng_perf_counter_exit(void)
621 {
622 int ret;
623
624 ret = pthread_key_delete(perf_counter_key);
625 if (ret) {
626 errno = ret;
627 PERROR("Error in pthread_key_delete");
628 }
629 }
This page took 0.041888 seconds and 5 git commands to generate.