Code cleanup in contexts
[lttng-ust.git] / liblttng-ust / lttng-context-perf-counters.c
1 /*
2 * lttng-context-perf-counters.c
3 *
4 * LTTng UST performance monitoring counters (perf-counters) integration.
5 *
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <sys/types.h>
25 #include <unistd.h>
26 #include <string.h>
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdbool.h>
30 #include <sys/mman.h>
31 #include <sys/syscall.h>
32 #include <lttng/ust-events.h>
33 #include <lttng/ust-tracer.h>
34 #include <lttng/ringbuffer-config.h>
35 #include <urcu/system.h>
36 #include <urcu/arch.h>
37 #include <urcu/rculist.h>
38 #include <helper.h>
39 #include <urcu/ref.h>
40 #include <usterr-signal-safe.h>
41 #include <signal.h>
42 #include "perf_event.h"
43 #include "lttng-tracer-core.h"
44
45 /*
46 * We use a global perf counter key and iterate on per-thread RCU lists
47 * of fields in the fast path, even though this is not strictly speaking
48 * what would provide the best fast-path complexity, to ensure teardown
49 * of sessions vs thread exit is handled racelessly.
50 *
51 * Updates and traversals of thread_list are protected by UST lock.
52 * Updates to rcu_field_list are protected by UST lock.
53 */
54
55 struct lttng_perf_counter_thread_field {
56 struct lttng_perf_counter_field *field; /* Back reference */
57 struct perf_event_mmap_page *pc;
58 struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
59 struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
60 int fd; /* Perf FD */
61 };
62
63 struct lttng_perf_counter_thread {
64 struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
65 };
66
67 struct lttng_perf_counter_field {
68 struct perf_event_attr attr;
69 struct cds_list_head thread_field_list; /* Per-field list of thread fields */
70 };
71
72 static pthread_key_t perf_counter_key;
73
74 static
75 size_t perf_counter_get_size(struct lttng_ctx_field *field, size_t offset)
76 {
77 size_t size = 0;
78
79 size += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
80 size += sizeof(uint64_t);
81 return size;
82 }
83
84 static
85 uint64_t read_perf_counter_syscall(
86 struct lttng_perf_counter_thread_field *thread_field)
87 {
88 uint64_t count;
89
90 if (caa_unlikely(thread_field->fd < 0))
91 return 0;
92
93 if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
94 < sizeof(count)))
95 return 0;
96
97 return count;
98 }
99
100 #if defined(__x86_64__) || defined(__i386__)
101
102 static
103 uint64_t rdpmc(unsigned int counter)
104 {
105 unsigned int low, high;
106
107 asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
108
109 return low | ((uint64_t) high) << 32;
110 }
111
112 static
113 bool has_rdpmc(struct perf_event_mmap_page *pc)
114 {
115 if (caa_unlikely(!pc->cap_bit0_is_deprecated))
116 return false;
117 /* Since Linux kernel 3.12. */
118 return pc->cap_user_rdpmc;
119 }
120
121 static
122 uint64_t arch_read_perf_counter(
123 struct lttng_perf_counter_thread_field *thread_field)
124 {
125 uint32_t seq, idx;
126 uint64_t count;
127 struct perf_event_mmap_page *pc = thread_field->pc;
128
129 if (caa_unlikely(!pc))
130 return 0;
131
132 do {
133 seq = CMM_LOAD_SHARED(pc->lock);
134 cmm_barrier();
135
136 idx = pc->index;
137 if (caa_likely(has_rdpmc(pc) && idx)) {
138 int64_t pmcval;
139
140 pmcval = rdpmc(idx - 1);
141 /* Sign-extend the pmc register result. */
142 pmcval <<= 64 - pc->pmc_width;
143 pmcval >>= 64 - pc->pmc_width;
144 count = pc->offset + pmcval;
145 } else {
146 /* Fall-back on system call if rdpmc cannot be used. */
147 return read_perf_counter_syscall(thread_field);
148 }
149 cmm_barrier();
150 } while (CMM_LOAD_SHARED(pc->lock) != seq);
151
152 return count;
153 }
154
155 static
156 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
157 {
158 struct perf_event_mmap_page *pc = thread_field->pc;
159
160 if (!pc)
161 return 0;
162 return !has_rdpmc(pc);
163 }
164
165 #else
166
167 /* Generic (slow) implementation using a read system call. */
168 static
169 uint64_t arch_read_perf_counter(
170 struct lttng_perf_counter_thread_field *thread_field)
171 {
172 return read_perf_counter_syscall(thread_field);
173 }
174
175 static
176 int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
177 {
178 return 1;
179 }
180
181 #endif
182
183 static
184 int sys_perf_event_open(struct perf_event_attr *attr,
185 pid_t pid, int cpu, int group_fd,
186 unsigned long flags)
187 {
188 return syscall(SYS_perf_event_open, attr, pid, cpu,
189 group_fd, flags);
190 }
191
192 static
193 int open_perf_fd(struct perf_event_attr *attr)
194 {
195 int fd;
196
197 fd = sys_perf_event_open(attr, 0, -1, -1, 0);
198 if (fd < 0)
199 return -1;
200
201 return fd;
202 }
203
204 static
205 void close_perf_fd(int fd)
206 {
207 int ret;
208
209 if (fd < 0)
210 return;
211
212 ret = close(fd);
213 if (ret) {
214 perror("Error closing LTTng-UST perf memory mapping FD");
215 }
216 }
217
218 static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
219 {
220 void *perf_addr;
221
222 perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
223 PROT_READ, MAP_SHARED, thread_field->fd, 0);
224 if (perf_addr == MAP_FAILED)
225 perf_addr = NULL;
226 thread_field->pc = perf_addr;
227
228 if (!arch_perf_keep_fd(thread_field)) {
229 close_perf_fd(thread_field->fd);
230 thread_field->fd = -1;
231 }
232 }
233
234 static
235 void unmap_perf_page(struct perf_event_mmap_page *pc)
236 {
237 int ret;
238
239 if (!pc)
240 return;
241 ret = munmap(pc, sizeof(struct perf_event_mmap_page));
242 if (ret < 0) {
243 PERROR("Error in munmap");
244 abort();
245 }
246 }
247
248 static
249 struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
250 {
251 struct lttng_perf_counter_thread *perf_thread;
252 sigset_t newmask, oldmask;
253 int ret;
254
255 ret = sigfillset(&newmask);
256 if (ret)
257 abort();
258 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
259 if (ret)
260 abort();
261 /* Check again with signals disabled */
262 perf_thread = pthread_getspecific(perf_counter_key);
263 if (perf_thread)
264 goto skip;
265 perf_thread = zmalloc(sizeof(*perf_thread));
266 if (!perf_thread)
267 abort();
268 CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
269 ret = pthread_setspecific(perf_counter_key, perf_thread);
270 if (ret)
271 abort();
272 skip:
273 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
274 if (ret)
275 abort();
276 return perf_thread;
277 }
278
279 static
280 struct lttng_perf_counter_thread_field *
281 add_thread_field(struct lttng_perf_counter_field *perf_field,
282 struct lttng_perf_counter_thread *perf_thread)
283 {
284 struct lttng_perf_counter_thread_field *thread_field;
285 sigset_t newmask, oldmask;
286 int ret;
287
288 ret = sigfillset(&newmask);
289 if (ret)
290 abort();
291 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
292 if (ret)
293 abort();
294 /* Check again with signals disabled */
295 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
296 rcu_field_node) {
297 if (thread_field->field == perf_field)
298 goto skip;
299 }
300 thread_field = zmalloc(sizeof(*thread_field));
301 if (!thread_field)
302 abort();
303 thread_field->field = perf_field;
304 thread_field->fd = open_perf_fd(&perf_field->attr);
305 if (thread_field->fd >= 0)
306 setup_perf(thread_field);
307 /*
308 * Note: thread_field->pc can be NULL if setup_perf() fails.
309 * Also, thread_field->fd can be -1 if open_perf_fd() fails.
310 */
311 ust_lock_nocheck();
312 cds_list_add_rcu(&thread_field->rcu_field_node,
313 &perf_thread->rcu_field_list);
314 cds_list_add(&thread_field->thread_field_node,
315 &perf_field->thread_field_list);
316 ust_unlock();
317 skip:
318 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
319 if (ret)
320 abort();
321 return thread_field;
322 }
323
324 static
325 struct lttng_perf_counter_thread_field *
326 get_thread_field(struct lttng_perf_counter_field *field)
327 {
328 struct lttng_perf_counter_thread *perf_thread;
329 struct lttng_perf_counter_thread_field *thread_field;
330
331 perf_thread = pthread_getspecific(perf_counter_key);
332 if (!perf_thread)
333 perf_thread = alloc_perf_counter_thread();
334 cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
335 rcu_field_node) {
336 if (thread_field->field == field)
337 return thread_field;
338 }
339 /* perf_counter_thread_field not found, need to add one */
340 return add_thread_field(field, perf_thread);
341 }
342
343 static
344 uint64_t wrapper_perf_counter_read(struct lttng_ctx_field *field)
345 {
346 struct lttng_perf_counter_field *perf_field;
347 struct lttng_perf_counter_thread_field *perf_thread_field;
348
349 perf_field = field->u.perf_counter;
350 perf_thread_field = get_thread_field(perf_field);
351 return arch_read_perf_counter(perf_thread_field);
352 }
353
354 static
355 void perf_counter_record(struct lttng_ctx_field *field,
356 struct lttng_ust_lib_ring_buffer_ctx *ctx,
357 struct lttng_channel *chan)
358 {
359 uint64_t value;
360
361 value = wrapper_perf_counter_read(field);
362 lib_ring_buffer_align_ctx(ctx, lttng_alignof(value));
363 chan->ops->event_write(ctx, &value, sizeof(value));
364 }
365
366 static
367 void perf_counter_get_value(struct lttng_ctx_field *field,
368 struct lttng_ctx_value *value)
369 {
370 value->u.s64 = wrapper_perf_counter_read(field);
371 }
372
373 /* Called with UST lock held */
374 static
375 void lttng_destroy_perf_thread_field(
376 struct lttng_perf_counter_thread_field *thread_field)
377 {
378 close_perf_fd(thread_field->fd);
379 unmap_perf_page(thread_field->pc);
380 cds_list_del_rcu(&thread_field->rcu_field_node);
381 cds_list_del(&thread_field->thread_field_node);
382 free(thread_field);
383 }
384
385 static
386 void lttng_destroy_perf_thread_key(void *_key)
387 {
388 struct lttng_perf_counter_thread *perf_thread = _key;
389 struct lttng_perf_counter_thread_field *pos, *p;
390
391 ust_lock_nocheck();
392 cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
393 rcu_field_node)
394 lttng_destroy_perf_thread_field(pos);
395 ust_unlock();
396 free(perf_thread);
397 }
398
399 /* Called with UST lock held */
400 static
401 void lttng_destroy_perf_counter_field(struct lttng_ctx_field *field)
402 {
403 struct lttng_perf_counter_field *perf_field;
404 struct lttng_perf_counter_thread_field *pos, *p;
405
406 free((char *) field->event_field.name);
407 perf_field = field->u.perf_counter;
408 /*
409 * This put is performed when no threads can concurrently
410 * perform a "get" concurrently, thanks to urcu-bp grace
411 * period.
412 */
413 cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
414 thread_field_node)
415 lttng_destroy_perf_thread_field(pos);
416 free(perf_field);
417 }
418
419 #ifdef __ARM_ARCH_7A__
420
421 static
422 int perf_get_exclude_kernel(void)
423 {
424 return 0;
425 }
426
427 #else /* __ARM_ARCH_7A__ */
428
429 static
430 int perf_get_exclude_kernel(void)
431 {
432 return 1;
433 }
434
435 #endif /* __ARM_ARCH_7A__ */
436
437 /* Called with UST lock held */
438 int lttng_add_perf_counter_to_ctx(uint32_t type,
439 uint64_t config,
440 const char *name,
441 struct lttng_ctx **ctx)
442 {
443 struct lttng_ctx_field *field;
444 struct lttng_perf_counter_field *perf_field;
445 char *name_alloc;
446 int ret;
447
448 name_alloc = strdup(name);
449 if (!name_alloc) {
450 ret = -ENOMEM;
451 goto name_alloc_error;
452 }
453 perf_field = zmalloc(sizeof(*perf_field));
454 if (!perf_field) {
455 ret = -ENOMEM;
456 goto perf_field_alloc_error;
457 }
458 field = lttng_append_context(ctx);
459 if (!field) {
460 ret = -ENOMEM;
461 goto append_context_error;
462 }
463 if (lttng_find_context(*ctx, name_alloc)) {
464 ret = -EEXIST;
465 goto find_error;
466 }
467
468 field->destroy = lttng_destroy_perf_counter_field;
469
470 field->event_field.name = name_alloc;
471 field->event_field.type.atype = atype_integer;
472 field->event_field.type.u.basic.integer.size =
473 sizeof(uint64_t) * CHAR_BIT;
474 field->event_field.type.u.basic.integer.alignment =
475 lttng_alignof(uint64_t) * CHAR_BIT;
476 field->event_field.type.u.basic.integer.signedness =
477 lttng_is_signed_type(uint64_t);
478 field->event_field.type.u.basic.integer.reverse_byte_order = 0;
479 field->event_field.type.u.basic.integer.base = 10;
480 field->event_field.type.u.basic.integer.encoding = lttng_encode_none;
481 field->get_size = perf_counter_get_size;
482 field->record = perf_counter_record;
483 field->get_value = perf_counter_get_value;
484
485 perf_field->attr.type = type;
486 perf_field->attr.config = config;
487 perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
488 CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
489 field->u.perf_counter = perf_field;
490
491 /* Ensure that this perf counter can be used in this process. */
492 ret = open_perf_fd(&perf_field->attr);
493 if (ret < 0) {
494 ret = -ENODEV;
495 goto setup_error;
496 }
497 close_perf_fd(ret);
498
499 /*
500 * Contexts can only be added before tracing is started, so we
501 * don't have to synchronize against concurrent threads using
502 * the field here.
503 */
504
505 lttng_context_update(*ctx);
506 return 0;
507
508 setup_error:
509 find_error:
510 lttng_remove_context_field(ctx, field);
511 append_context_error:
512 free(perf_field);
513 perf_field_alloc_error:
514 free(name_alloc);
515 name_alloc_error:
516 return ret;
517 }
518
519 int lttng_perf_counter_init(void)
520 {
521 int ret;
522
523 ret = pthread_key_create(&perf_counter_key,
524 lttng_destroy_perf_thread_key);
525 if (ret)
526 ret = -ret;
527 return ret;
528 }
529
530 void lttng_perf_counter_exit(void)
531 {
532 int ret;
533
534 ret = pthread_key_delete(perf_counter_key);
535 if (ret) {
536 errno = ret;
537 PERROR("Error in pthread_key_delete");
538 }
539 }
This page took 0.039436 seconds and 4 git commands to generate.