Document last supported kernel version for stable-2.11 branch
[lttng-modules.git] / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/jhash.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/uuid.h>
31
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng-kernel-version.h>
39 #include <lttng-events.h>
40 #include <lttng-tracer.h>
41 #include <lttng-abi-old.h>
42 #include <lttng-endian.h>
43 #include <lttng-string-utils.h>
44 #include <wrapper/vzalloc.h>
45 #include <wrapper/ringbuffer/backend.h>
46 #include <wrapper/ringbuffer/frontend.h>
47 #include <wrapper/time.h>
48
49 #define METADATA_CACHE_DEFAULT_SIZE 4096
50
51 static LIST_HEAD(sessions);
52 static LIST_HEAD(lttng_transport_list);
53 /*
54 * Protect the sessions and metadata caches.
55 */
56 static DEFINE_MUTEX(sessions_mutex);
57 static struct kmem_cache *event_cache;
58
59 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
60 static void lttng_session_sync_enablers(struct lttng_session *session);
61 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
62
63 static void _lttng_event_destroy(struct lttng_event *event);
64 static void _lttng_channel_destroy(struct lttng_channel *chan);
65 static int _lttng_event_unregister(struct lttng_event *event);
66 static
67 int _lttng_event_metadata_statedump(struct lttng_session *session,
68 struct lttng_channel *chan,
69 struct lttng_event *event);
70 static
71 int _lttng_session_metadata_statedump(struct lttng_session *session);
72 static
73 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
74 static
75 int _lttng_field_statedump(struct lttng_session *session,
76 const struct lttng_event_field *field,
77 size_t nesting);
78
79 void synchronize_trace(void)
80 {
81 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
82 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
83 synchronize_rcu();
84 #else
85 synchronize_sched();
86 #endif
87
88 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
89 #ifdef CONFIG_PREEMPT_RT_FULL
90 synchronize_rcu();
91 #endif
92 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
93 #ifdef CONFIG_PREEMPT_RT
94 synchronize_rcu();
95 #endif
96 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
97 }
98
99 void lttng_lock_sessions(void)
100 {
101 mutex_lock(&sessions_mutex);
102 }
103
104 void lttng_unlock_sessions(void)
105 {
106 mutex_unlock(&sessions_mutex);
107 }
108
109 /*
110 * Called with sessions lock held.
111 */
112 int lttng_session_active(void)
113 {
114 struct lttng_session *iter;
115
116 list_for_each_entry(iter, &sessions, list) {
117 if (iter->active)
118 return 1;
119 }
120 return 0;
121 }
122
123 struct lttng_session *lttng_session_create(void)
124 {
125 struct lttng_session *session;
126 struct lttng_metadata_cache *metadata_cache;
127 int i;
128
129 mutex_lock(&sessions_mutex);
130 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
131 if (!session)
132 goto err;
133 INIT_LIST_HEAD(&session->chan);
134 INIT_LIST_HEAD(&session->events);
135 lttng_guid_gen(&session->uuid);
136
137 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
138 GFP_KERNEL);
139 if (!metadata_cache)
140 goto err_free_session;
141 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
142 if (!metadata_cache->data)
143 goto err_free_cache;
144 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
145 kref_init(&metadata_cache->refcount);
146 mutex_init(&metadata_cache->lock);
147 session->metadata_cache = metadata_cache;
148 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
149 memcpy(&metadata_cache->uuid, &session->uuid,
150 sizeof(metadata_cache->uuid));
151 INIT_LIST_HEAD(&session->enablers_head);
152 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
153 INIT_HLIST_HEAD(&session->events_ht.table[i]);
154 list_add(&session->list, &sessions);
155 mutex_unlock(&sessions_mutex);
156 return session;
157
158 err_free_cache:
159 kfree(metadata_cache);
160 err_free_session:
161 lttng_kvfree(session);
162 err:
163 mutex_unlock(&sessions_mutex);
164 return NULL;
165 }
166
167 void metadata_cache_destroy(struct kref *kref)
168 {
169 struct lttng_metadata_cache *cache =
170 container_of(kref, struct lttng_metadata_cache, refcount);
171 vfree(cache->data);
172 kfree(cache);
173 }
174
175 void lttng_session_destroy(struct lttng_session *session)
176 {
177 struct lttng_channel *chan, *tmpchan;
178 struct lttng_event *event, *tmpevent;
179 struct lttng_metadata_stream *metadata_stream;
180 struct lttng_enabler *enabler, *tmpenabler;
181 int ret;
182
183 mutex_lock(&sessions_mutex);
184 WRITE_ONCE(session->active, 0);
185 list_for_each_entry(chan, &session->chan, list) {
186 ret = lttng_syscalls_unregister(chan);
187 WARN_ON(ret);
188 }
189 list_for_each_entry(event, &session->events, list) {
190 ret = _lttng_event_unregister(event);
191 WARN_ON(ret);
192 }
193 synchronize_trace(); /* Wait for in-flight events to complete */
194 list_for_each_entry(chan, &session->chan, list) {
195 ret = lttng_syscalls_destroy(chan);
196 WARN_ON(ret);
197 }
198 list_for_each_entry_safe(enabler, tmpenabler,
199 &session->enablers_head, node)
200 lttng_enabler_destroy(enabler);
201 list_for_each_entry_safe(event, tmpevent, &session->events, list)
202 _lttng_event_destroy(event);
203 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
204 BUG_ON(chan->channel_type == METADATA_CHANNEL);
205 _lttng_channel_destroy(chan);
206 }
207 mutex_lock(&session->metadata_cache->lock);
208 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
209 _lttng_metadata_channel_hangup(metadata_stream);
210 mutex_unlock(&session->metadata_cache->lock);
211 if (session->pid_tracker)
212 lttng_pid_tracker_destroy(session->pid_tracker);
213 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
214 list_del(&session->list);
215 mutex_unlock(&sessions_mutex);
216 lttng_kvfree(session);
217 }
218
219 int lttng_session_statedump(struct lttng_session *session)
220 {
221 int ret;
222
223 mutex_lock(&sessions_mutex);
224 ret = lttng_statedump_start(session);
225 mutex_unlock(&sessions_mutex);
226 return ret;
227 }
228
229 int lttng_session_enable(struct lttng_session *session)
230 {
231 int ret = 0;
232 struct lttng_channel *chan;
233
234 mutex_lock(&sessions_mutex);
235 if (session->active) {
236 ret = -EBUSY;
237 goto end;
238 }
239
240 /* Set transient enabler state to "enabled" */
241 session->tstate = 1;
242
243 /*
244 * Snapshot the number of events per channel to know the type of header
245 * we need to use.
246 */
247 list_for_each_entry(chan, &session->chan, list) {
248 if (chan->header_type)
249 continue; /* don't change it if session stop/restart */
250 if (chan->free_event_id < 31)
251 chan->header_type = 1; /* compact */
252 else
253 chan->header_type = 2; /* large */
254 }
255
256 /* We need to sync enablers with session before activation. */
257 lttng_session_sync_enablers(session);
258
259 /* Clear each stream's quiescent state. */
260 list_for_each_entry(chan, &session->chan, list) {
261 if (chan->channel_type != METADATA_CHANNEL)
262 lib_ring_buffer_clear_quiescent_channel(chan->chan);
263 }
264
265 WRITE_ONCE(session->active, 1);
266 WRITE_ONCE(session->been_active, 1);
267 ret = _lttng_session_metadata_statedump(session);
268 if (ret) {
269 WRITE_ONCE(session->active, 0);
270 goto end;
271 }
272 ret = lttng_statedump_start(session);
273 if (ret)
274 WRITE_ONCE(session->active, 0);
275 end:
276 mutex_unlock(&sessions_mutex);
277 return ret;
278 }
279
280 int lttng_session_disable(struct lttng_session *session)
281 {
282 int ret = 0;
283 struct lttng_channel *chan;
284
285 mutex_lock(&sessions_mutex);
286 if (!session->active) {
287 ret = -EBUSY;
288 goto end;
289 }
290 WRITE_ONCE(session->active, 0);
291
292 /* Set transient enabler state to "disabled" */
293 session->tstate = 0;
294 lttng_session_sync_enablers(session);
295
296 /* Set each stream's quiescent state. */
297 list_for_each_entry(chan, &session->chan, list) {
298 if (chan->channel_type != METADATA_CHANNEL)
299 lib_ring_buffer_set_quiescent_channel(chan->chan);
300 }
301 end:
302 mutex_unlock(&sessions_mutex);
303 return ret;
304 }
305
306 int lttng_session_metadata_regenerate(struct lttng_session *session)
307 {
308 int ret = 0;
309 struct lttng_channel *chan;
310 struct lttng_event *event;
311 struct lttng_metadata_cache *cache = session->metadata_cache;
312 struct lttng_metadata_stream *stream;
313
314 mutex_lock(&sessions_mutex);
315 if (!session->active) {
316 ret = -EBUSY;
317 goto end;
318 }
319
320 mutex_lock(&cache->lock);
321 memset(cache->data, 0, cache->cache_alloc);
322 cache->metadata_written = 0;
323 cache->version++;
324 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
325 stream->metadata_out = 0;
326 stream->metadata_in = 0;
327 }
328 mutex_unlock(&cache->lock);
329
330 session->metadata_dumped = 0;
331 list_for_each_entry(chan, &session->chan, list) {
332 chan->metadata_dumped = 0;
333 }
334
335 list_for_each_entry(event, &session->events, list) {
336 event->metadata_dumped = 0;
337 }
338
339 ret = _lttng_session_metadata_statedump(session);
340
341 end:
342 mutex_unlock(&sessions_mutex);
343 return ret;
344 }
345
346 int lttng_channel_enable(struct lttng_channel *channel)
347 {
348 int ret = 0;
349
350 mutex_lock(&sessions_mutex);
351 if (channel->channel_type == METADATA_CHANNEL) {
352 ret = -EPERM;
353 goto end;
354 }
355 if (channel->enabled) {
356 ret = -EEXIST;
357 goto end;
358 }
359 /* Set transient enabler state to "enabled" */
360 channel->tstate = 1;
361 lttng_session_sync_enablers(channel->session);
362 /* Set atomically the state to "enabled" */
363 WRITE_ONCE(channel->enabled, 1);
364 end:
365 mutex_unlock(&sessions_mutex);
366 return ret;
367 }
368
369 int lttng_channel_disable(struct lttng_channel *channel)
370 {
371 int ret = 0;
372
373 mutex_lock(&sessions_mutex);
374 if (channel->channel_type == METADATA_CHANNEL) {
375 ret = -EPERM;
376 goto end;
377 }
378 if (!channel->enabled) {
379 ret = -EEXIST;
380 goto end;
381 }
382 /* Set atomically the state to "disabled" */
383 WRITE_ONCE(channel->enabled, 0);
384 /* Set transient enabler state to "enabled" */
385 channel->tstate = 0;
386 lttng_session_sync_enablers(channel->session);
387 end:
388 mutex_unlock(&sessions_mutex);
389 return ret;
390 }
391
392 int lttng_event_enable(struct lttng_event *event)
393 {
394 int ret = 0;
395
396 mutex_lock(&sessions_mutex);
397 if (event->chan->channel_type == METADATA_CHANNEL) {
398 ret = -EPERM;
399 goto end;
400 }
401 if (event->enabled) {
402 ret = -EEXIST;
403 goto end;
404 }
405 switch (event->instrumentation) {
406 case LTTNG_KERNEL_TRACEPOINT:
407 case LTTNG_KERNEL_SYSCALL:
408 ret = -EINVAL;
409 break;
410 case LTTNG_KERNEL_KPROBE:
411 case LTTNG_KERNEL_UPROBE:
412 case LTTNG_KERNEL_NOOP:
413 WRITE_ONCE(event->enabled, 1);
414 break;
415 case LTTNG_KERNEL_KRETPROBE:
416 ret = lttng_kretprobes_event_enable_state(event, 1);
417 break;
418 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
419 default:
420 WARN_ON_ONCE(1);
421 ret = -EINVAL;
422 }
423 end:
424 mutex_unlock(&sessions_mutex);
425 return ret;
426 }
427
428 int lttng_event_disable(struct lttng_event *event)
429 {
430 int ret = 0;
431
432 mutex_lock(&sessions_mutex);
433 if (event->chan->channel_type == METADATA_CHANNEL) {
434 ret = -EPERM;
435 goto end;
436 }
437 if (!event->enabled) {
438 ret = -EEXIST;
439 goto end;
440 }
441 switch (event->instrumentation) {
442 case LTTNG_KERNEL_TRACEPOINT:
443 case LTTNG_KERNEL_SYSCALL:
444 ret = -EINVAL;
445 break;
446 case LTTNG_KERNEL_KPROBE:
447 case LTTNG_KERNEL_UPROBE:
448 case LTTNG_KERNEL_NOOP:
449 WRITE_ONCE(event->enabled, 0);
450 break;
451 case LTTNG_KERNEL_KRETPROBE:
452 ret = lttng_kretprobes_event_enable_state(event, 0);
453 break;
454 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
455 default:
456 WARN_ON_ONCE(1);
457 ret = -EINVAL;
458 }
459 end:
460 mutex_unlock(&sessions_mutex);
461 return ret;
462 }
463
464 static struct lttng_transport *lttng_transport_find(const char *name)
465 {
466 struct lttng_transport *transport;
467
468 list_for_each_entry(transport, &lttng_transport_list, node) {
469 if (!strcmp(transport->name, name))
470 return transport;
471 }
472 return NULL;
473 }
474
475 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
476 const char *transport_name,
477 void *buf_addr,
478 size_t subbuf_size, size_t num_subbuf,
479 unsigned int switch_timer_interval,
480 unsigned int read_timer_interval,
481 enum channel_type channel_type)
482 {
483 struct lttng_channel *chan;
484 struct lttng_transport *transport = NULL;
485
486 mutex_lock(&sessions_mutex);
487 if (session->been_active && channel_type != METADATA_CHANNEL)
488 goto active; /* Refuse to add channel to active session */
489 transport = lttng_transport_find(transport_name);
490 if (!transport) {
491 printk(KERN_WARNING "LTTng transport %s not found\n",
492 transport_name);
493 goto notransport;
494 }
495 if (!try_module_get(transport->owner)) {
496 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
497 goto notransport;
498 }
499 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
500 if (!chan)
501 goto nomem;
502 chan->session = session;
503 chan->id = session->free_chan_id++;
504 chan->ops = &transport->ops;
505 /*
506 * Note: the channel creation op already writes into the packet
507 * headers. Therefore the "chan" information used as input
508 * should be already accessible.
509 */
510 chan->chan = transport->ops.channel_create(transport_name,
511 chan, buf_addr, subbuf_size, num_subbuf,
512 switch_timer_interval, read_timer_interval);
513 if (!chan->chan)
514 goto create_error;
515 chan->tstate = 1;
516 chan->enabled = 1;
517 chan->transport = transport;
518 chan->channel_type = channel_type;
519 list_add(&chan->list, &session->chan);
520 mutex_unlock(&sessions_mutex);
521 return chan;
522
523 create_error:
524 kfree(chan);
525 nomem:
526 if (transport)
527 module_put(transport->owner);
528 notransport:
529 active:
530 mutex_unlock(&sessions_mutex);
531 return NULL;
532 }
533
534 /*
535 * Only used internally at session destruction for per-cpu channels, and
536 * when metadata channel is released.
537 * Needs to be called with sessions mutex held.
538 */
539 static
540 void _lttng_channel_destroy(struct lttng_channel *chan)
541 {
542 chan->ops->channel_destroy(chan->chan);
543 module_put(chan->transport->owner);
544 list_del(&chan->list);
545 lttng_destroy_context(chan->ctx);
546 kfree(chan);
547 }
548
549 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
550 {
551 BUG_ON(chan->channel_type != METADATA_CHANNEL);
552
553 /* Protect the metadata cache with the sessions_mutex. */
554 mutex_lock(&sessions_mutex);
555 _lttng_channel_destroy(chan);
556 mutex_unlock(&sessions_mutex);
557 }
558 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
559
560 static
561 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
562 {
563 stream->finalized = 1;
564 wake_up_interruptible(&stream->read_wait);
565 }
566
567 /*
568 * Supports event creation while tracing session is active.
569 * Needs to be called with sessions mutex held.
570 */
571 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
572 struct lttng_kernel_event *event_param,
573 void *filter,
574 const struct lttng_event_desc *event_desc,
575 enum lttng_kernel_instrumentation itype)
576 {
577 struct lttng_session *session = chan->session;
578 struct lttng_event *event;
579 const char *event_name;
580 struct hlist_head *head;
581 size_t name_len;
582 uint32_t hash;
583 int ret;
584
585 if (chan->free_event_id == -1U) {
586 ret = -EMFILE;
587 goto full;
588 }
589
590 switch (itype) {
591 case LTTNG_KERNEL_TRACEPOINT:
592 event_name = event_desc->name;
593 break;
594 case LTTNG_KERNEL_KPROBE:
595 case LTTNG_KERNEL_UPROBE:
596 case LTTNG_KERNEL_KRETPROBE:
597 case LTTNG_KERNEL_NOOP:
598 case LTTNG_KERNEL_SYSCALL:
599 event_name = event_param->name;
600 break;
601 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
602 default:
603 WARN_ON_ONCE(1);
604 ret = -EINVAL;
605 goto type_error;
606 }
607 name_len = strlen(event_name);
608 hash = jhash(event_name, name_len, 0);
609 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
610 lttng_hlist_for_each_entry(event, head, hlist) {
611 WARN_ON_ONCE(!event->desc);
612 if (!strncmp(event->desc->name, event_name,
613 LTTNG_KERNEL_SYM_NAME_LEN - 1)
614 && chan == event->chan) {
615 ret = -EEXIST;
616 goto exist;
617 }
618 }
619
620 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
621 if (!event) {
622 ret = -ENOMEM;
623 goto cache_error;
624 }
625 event->chan = chan;
626 event->filter = filter;
627 event->id = chan->free_event_id++;
628 event->instrumentation = itype;
629 event->evtype = LTTNG_TYPE_EVENT;
630 INIT_LIST_HEAD(&event->bytecode_runtime_head);
631 INIT_LIST_HEAD(&event->enablers_ref_head);
632
633 switch (itype) {
634 case LTTNG_KERNEL_TRACEPOINT:
635 /* Event will be enabled by enabler sync. */
636 event->enabled = 0;
637 event->registered = 0;
638 event->desc = lttng_event_get(event_name);
639 if (!event->desc) {
640 ret = -ENOENT;
641 goto register_error;
642 }
643 /* Populate lttng_event structure before event registration. */
644 smp_wmb();
645 break;
646 case LTTNG_KERNEL_KPROBE:
647 /*
648 * Needs to be explicitly enabled after creation, since
649 * we may want to apply filters.
650 */
651 event->enabled = 0;
652 event->registered = 1;
653 /*
654 * Populate lttng_event structure before event
655 * registration.
656 */
657 smp_wmb();
658 ret = lttng_kprobes_register(event_name,
659 event_param->u.kprobe.symbol_name,
660 event_param->u.kprobe.offset,
661 event_param->u.kprobe.addr,
662 event);
663 if (ret) {
664 ret = -EINVAL;
665 goto register_error;
666 }
667 ret = try_module_get(event->desc->owner);
668 WARN_ON_ONCE(!ret);
669 break;
670 case LTTNG_KERNEL_KRETPROBE:
671 {
672 struct lttng_event *event_return;
673
674 /* kretprobe defines 2 events */
675 /*
676 * Needs to be explicitly enabled after creation, since
677 * we may want to apply filters.
678 */
679 event->enabled = 0;
680 event->registered = 1;
681 event_return =
682 kmem_cache_zalloc(event_cache, GFP_KERNEL);
683 if (!event_return) {
684 ret = -ENOMEM;
685 goto register_error;
686 }
687 event_return->chan = chan;
688 event_return->filter = filter;
689 event_return->id = chan->free_event_id++;
690 event_return->enabled = 0;
691 event_return->registered = 1;
692 event_return->instrumentation = itype;
693 INIT_LIST_HEAD(&event_return->bytecode_runtime_head);
694 INIT_LIST_HEAD(&event_return->enablers_ref_head);
695 /*
696 * Populate lttng_event structure before kretprobe registration.
697 */
698 smp_wmb();
699 ret = lttng_kretprobes_register(event_name,
700 event_param->u.kretprobe.symbol_name,
701 event_param->u.kretprobe.offset,
702 event_param->u.kretprobe.addr,
703 event, event_return);
704 if (ret) {
705 kmem_cache_free(event_cache, event_return);
706 ret = -EINVAL;
707 goto register_error;
708 }
709 /* Take 2 refs on the module: one per event. */
710 ret = try_module_get(event->desc->owner);
711 WARN_ON_ONCE(!ret);
712 ret = try_module_get(event->desc->owner);
713 WARN_ON_ONCE(!ret);
714 ret = _lttng_event_metadata_statedump(chan->session, chan,
715 event_return);
716 WARN_ON_ONCE(ret > 0);
717 if (ret) {
718 kmem_cache_free(event_cache, event_return);
719 module_put(event->desc->owner);
720 module_put(event->desc->owner);
721 goto statedump_error;
722 }
723 list_add(&event_return->list, &chan->session->events);
724 break;
725 }
726 case LTTNG_KERNEL_NOOP:
727 case LTTNG_KERNEL_SYSCALL:
728 /*
729 * Needs to be explicitly enabled after creation, since
730 * we may want to apply filters.
731 */
732 event->enabled = 0;
733 event->registered = 0;
734 event->desc = event_desc;
735 switch (event_param->u.syscall.entryexit) {
736 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
737 ret = -EINVAL;
738 goto register_error;
739 case LTTNG_KERNEL_SYSCALL_ENTRY:
740 event->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
741 break;
742 case LTTNG_KERNEL_SYSCALL_EXIT:
743 event->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
744 break;
745 }
746 switch (event_param->u.syscall.abi) {
747 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
748 ret = -EINVAL;
749 goto register_error;
750 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
751 event->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
752 break;
753 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
754 event->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
755 break;
756 }
757 if (!event->desc) {
758 ret = -EINVAL;
759 goto register_error;
760 }
761 break;
762 case LTTNG_KERNEL_UPROBE:
763 /*
764 * Needs to be explicitly enabled after creation, since
765 * we may want to apply filters.
766 */
767 event->enabled = 0;
768 event->registered = 1;
769
770 /*
771 * Populate lttng_event structure before event
772 * registration.
773 */
774 smp_wmb();
775
776 ret = lttng_uprobes_register(event_param->name,
777 event_param->u.uprobe.fd,
778 event);
779 if (ret)
780 goto register_error;
781 ret = try_module_get(event->desc->owner);
782 WARN_ON_ONCE(!ret);
783 break;
784 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
785 default:
786 WARN_ON_ONCE(1);
787 ret = -EINVAL;
788 goto register_error;
789 }
790 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
791 WARN_ON_ONCE(ret > 0);
792 if (ret) {
793 goto statedump_error;
794 }
795 hlist_add_head(&event->hlist, head);
796 list_add(&event->list, &chan->session->events);
797 return event;
798
799 statedump_error:
800 /* If a statedump error occurs, events will not be readable. */
801 register_error:
802 kmem_cache_free(event_cache, event);
803 cache_error:
804 exist:
805 type_error:
806 full:
807 return ERR_PTR(ret);
808 }
809
810 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
811 struct lttng_kernel_event *event_param,
812 void *filter,
813 const struct lttng_event_desc *event_desc,
814 enum lttng_kernel_instrumentation itype)
815 {
816 struct lttng_event *event;
817
818 mutex_lock(&sessions_mutex);
819 event = _lttng_event_create(chan, event_param, filter, event_desc,
820 itype);
821 mutex_unlock(&sessions_mutex);
822 return event;
823 }
824
825 /* Only used for tracepoints for now. */
826 static
827 void register_event(struct lttng_event *event)
828 {
829 const struct lttng_event_desc *desc;
830 int ret = -EINVAL;
831
832 if (event->registered)
833 return;
834
835 desc = event->desc;
836 switch (event->instrumentation) {
837 case LTTNG_KERNEL_TRACEPOINT:
838 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
839 desc->probe_callback,
840 event);
841 break;
842 case LTTNG_KERNEL_SYSCALL:
843 ret = lttng_syscall_filter_enable(event->chan, event);
844 break;
845 case LTTNG_KERNEL_KPROBE:
846 case LTTNG_KERNEL_UPROBE:
847 case LTTNG_KERNEL_KRETPROBE:
848 case LTTNG_KERNEL_NOOP:
849 ret = 0;
850 break;
851 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
852 default:
853 WARN_ON_ONCE(1);
854 }
855 if (!ret)
856 event->registered = 1;
857 }
858
859 /*
860 * Only used internally at session destruction.
861 */
862 int _lttng_event_unregister(struct lttng_event *event)
863 {
864 const struct lttng_event_desc *desc;
865 int ret = -EINVAL;
866
867 if (!event->registered)
868 return 0;
869
870 desc = event->desc;
871 switch (event->instrumentation) {
872 case LTTNG_KERNEL_TRACEPOINT:
873 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
874 event->desc->probe_callback,
875 event);
876 break;
877 case LTTNG_KERNEL_KPROBE:
878 lttng_kprobes_unregister(event);
879 ret = 0;
880 break;
881 case LTTNG_KERNEL_KRETPROBE:
882 lttng_kretprobes_unregister(event);
883 ret = 0;
884 break;
885 case LTTNG_KERNEL_SYSCALL:
886 ret = lttng_syscall_filter_disable(event->chan, event);
887 break;
888 case LTTNG_KERNEL_NOOP:
889 ret = 0;
890 break;
891 case LTTNG_KERNEL_UPROBE:
892 lttng_uprobes_unregister(event);
893 ret = 0;
894 break;
895 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
896 default:
897 WARN_ON_ONCE(1);
898 }
899 if (!ret)
900 event->registered = 0;
901 return ret;
902 }
903
904 /*
905 * Only used internally at session destruction.
906 */
907 static
908 void _lttng_event_destroy(struct lttng_event *event)
909 {
910 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
911
912 switch (event->instrumentation) {
913 case LTTNG_KERNEL_TRACEPOINT:
914 lttng_event_put(event->desc);
915 break;
916 case LTTNG_KERNEL_KPROBE:
917 module_put(event->desc->owner);
918 lttng_kprobes_destroy_private(event);
919 break;
920 case LTTNG_KERNEL_KRETPROBE:
921 module_put(event->desc->owner);
922 lttng_kretprobes_destroy_private(event);
923 break;
924 case LTTNG_KERNEL_NOOP:
925 case LTTNG_KERNEL_SYSCALL:
926 break;
927 case LTTNG_KERNEL_UPROBE:
928 module_put(event->desc->owner);
929 lttng_uprobes_destroy_private(event);
930 break;
931 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
932 default:
933 WARN_ON_ONCE(1);
934 }
935 list_del(&event->list);
936 lttng_destroy_context(event->ctx);
937 lttng_free_event_filter_runtime(event);
938 /* Free event enabler refs */
939 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
940 &event->enablers_ref_head, node)
941 kfree(enabler_ref);
942 kmem_cache_free(event_cache, event);
943 }
944
945 int lttng_session_track_pid(struct lttng_session *session, int pid)
946 {
947 int ret;
948
949 if (pid < -1)
950 return -EINVAL;
951 mutex_lock(&sessions_mutex);
952 if (pid == -1) {
953 /* track all pids: destroy tracker. */
954 if (session->pid_tracker) {
955 struct lttng_pid_tracker *lpf;
956
957 lpf = session->pid_tracker;
958 rcu_assign_pointer(session->pid_tracker, NULL);
959 synchronize_trace();
960 lttng_pid_tracker_destroy(lpf);
961 }
962 ret = 0;
963 } else {
964 if (!session->pid_tracker) {
965 struct lttng_pid_tracker *lpf;
966
967 lpf = lttng_pid_tracker_create();
968 if (!lpf) {
969 ret = -ENOMEM;
970 goto unlock;
971 }
972 ret = lttng_pid_tracker_add(lpf, pid);
973 rcu_assign_pointer(session->pid_tracker, lpf);
974 } else {
975 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
976 }
977 }
978 unlock:
979 mutex_unlock(&sessions_mutex);
980 return ret;
981 }
982
983 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
984 {
985 int ret;
986
987 if (pid < -1)
988 return -EINVAL;
989 mutex_lock(&sessions_mutex);
990 if (pid == -1) {
991 /* untrack all pids: replace by empty tracker. */
992 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
993 struct lttng_pid_tracker *lpf;
994
995 lpf = lttng_pid_tracker_create();
996 if (!lpf) {
997 ret = -ENOMEM;
998 goto unlock;
999 }
1000 rcu_assign_pointer(session->pid_tracker, lpf);
1001 synchronize_trace();
1002 if (old_lpf)
1003 lttng_pid_tracker_destroy(old_lpf);
1004 ret = 0;
1005 } else {
1006 if (!session->pid_tracker) {
1007 ret = -ENOENT;
1008 goto unlock;
1009 }
1010 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
1011 }
1012 unlock:
1013 mutex_unlock(&sessions_mutex);
1014 return ret;
1015 }
1016
1017 static
1018 void *pid_list_start(struct seq_file *m, loff_t *pos)
1019 {
1020 struct lttng_session *session = m->private;
1021 struct lttng_pid_tracker *lpf;
1022 struct lttng_pid_hash_node *e;
1023 int iter = 0, i;
1024
1025 mutex_lock(&sessions_mutex);
1026 lpf = session->pid_tracker;
1027 if (lpf) {
1028 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
1029 struct hlist_head *head = &lpf->pid_hash[i];
1030
1031 lttng_hlist_for_each_entry(e, head, hlist) {
1032 if (iter++ >= *pos)
1033 return e;
1034 }
1035 }
1036 } else {
1037 /* PID tracker disabled. */
1038 if (iter >= *pos && iter == 0) {
1039 return session; /* empty tracker */
1040 }
1041 iter++;
1042 }
1043 /* End of list */
1044 return NULL;
1045 }
1046
1047 /* Called with sessions_mutex held. */
1048 static
1049 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
1050 {
1051 struct lttng_session *session = m->private;
1052 struct lttng_pid_tracker *lpf;
1053 struct lttng_pid_hash_node *e;
1054 int iter = 0, i;
1055
1056 (*ppos)++;
1057 lpf = session->pid_tracker;
1058 if (lpf) {
1059 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
1060 struct hlist_head *head = &lpf->pid_hash[i];
1061
1062 lttng_hlist_for_each_entry(e, head, hlist) {
1063 if (iter++ >= *ppos)
1064 return e;
1065 }
1066 }
1067 } else {
1068 /* PID tracker disabled. */
1069 if (iter >= *ppos && iter == 0)
1070 return session; /* empty tracker */
1071 iter++;
1072 }
1073
1074 /* End of list */
1075 return NULL;
1076 }
1077
1078 static
1079 void pid_list_stop(struct seq_file *m, void *p)
1080 {
1081 mutex_unlock(&sessions_mutex);
1082 }
1083
1084 static
1085 int pid_list_show(struct seq_file *m, void *p)
1086 {
1087 int pid;
1088
1089 if (p == m->private) {
1090 /* Tracker disabled. */
1091 pid = -1;
1092 } else {
1093 const struct lttng_pid_hash_node *e = p;
1094
1095 pid = lttng_pid_tracker_get_node_pid(e);
1096 }
1097 seq_printf(m, "process { pid = %d; };\n", pid);
1098 return 0;
1099 }
1100
1101 static
1102 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
1103 .start = pid_list_start,
1104 .next = pid_list_next,
1105 .stop = pid_list_stop,
1106 .show = pid_list_show,
1107 };
1108
1109 static
1110 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
1111 {
1112 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
1113 }
1114
1115 static
1116 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
1117 {
1118 struct seq_file *m = file->private_data;
1119 struct lttng_session *session = m->private;
1120 int ret;
1121
1122 WARN_ON_ONCE(!session);
1123 ret = seq_release(inode, file);
1124 if (!ret && session)
1125 fput(session->file);
1126 return ret;
1127 }
1128
1129 const struct file_operations lttng_tracker_pids_list_fops = {
1130 .owner = THIS_MODULE,
1131 .open = lttng_tracker_pids_list_open,
1132 .read = seq_read,
1133 .llseek = seq_lseek,
1134 .release = lttng_tracker_pids_list_release,
1135 };
1136
1137 int lttng_session_list_tracker_pids(struct lttng_session *session)
1138 {
1139 struct file *tracker_pids_list_file;
1140 struct seq_file *m;
1141 int file_fd, ret;
1142
1143 file_fd = lttng_get_unused_fd();
1144 if (file_fd < 0) {
1145 ret = file_fd;
1146 goto fd_error;
1147 }
1148
1149 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
1150 &lttng_tracker_pids_list_fops,
1151 NULL, O_RDWR);
1152 if (IS_ERR(tracker_pids_list_file)) {
1153 ret = PTR_ERR(tracker_pids_list_file);
1154 goto file_error;
1155 }
1156 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1157 ret = -EOVERFLOW;
1158 goto refcount_error;
1159 }
1160 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1161 if (ret < 0)
1162 goto open_error;
1163 m = tracker_pids_list_file->private_data;
1164 m->private = session;
1165 fd_install(file_fd, tracker_pids_list_file);
1166
1167 return file_fd;
1168
1169 open_error:
1170 atomic_long_dec(&session->file->f_count);
1171 refcount_error:
1172 fput(tracker_pids_list_file);
1173 file_error:
1174 put_unused_fd(file_fd);
1175 fd_error:
1176 return ret;
1177 }
1178
1179 /*
1180 * Enabler management.
1181 */
1182 static
1183 int lttng_match_enabler_star_glob(const char *desc_name,
1184 const char *pattern)
1185 {
1186 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1187 desc_name, LTTNG_SIZE_MAX))
1188 return 0;
1189 return 1;
1190 }
1191
1192 static
1193 int lttng_match_enabler_name(const char *desc_name,
1194 const char *name)
1195 {
1196 if (strcmp(desc_name, name))
1197 return 0;
1198 return 1;
1199 }
1200
1201 static
1202 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1203 struct lttng_enabler *enabler)
1204 {
1205 const char *desc_name, *enabler_name;
1206 bool compat = false, entry = false;
1207
1208 enabler_name = enabler->event_param.name;
1209 switch (enabler->event_param.instrumentation) {
1210 case LTTNG_KERNEL_TRACEPOINT:
1211 desc_name = desc->name;
1212 switch (enabler->type) {
1213 case LTTNG_ENABLER_STAR_GLOB:
1214 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1215 case LTTNG_ENABLER_NAME:
1216 return lttng_match_enabler_name(desc_name, enabler_name);
1217 default:
1218 return -EINVAL;
1219 }
1220 break;
1221 case LTTNG_KERNEL_SYSCALL:
1222 desc_name = desc->name;
1223 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1224 desc_name += strlen("compat_");
1225 compat = true;
1226 }
1227 if (!strncmp(desc_name, "syscall_exit_",
1228 strlen("syscall_exit_"))) {
1229 desc_name += strlen("syscall_exit_");
1230 } else if (!strncmp(desc_name, "syscall_entry_",
1231 strlen("syscall_entry_"))) {
1232 desc_name += strlen("syscall_entry_");
1233 entry = true;
1234 } else {
1235 WARN_ON_ONCE(1);
1236 return -EINVAL;
1237 }
1238 switch (enabler->event_param.u.syscall.entryexit) {
1239 case LTTNG_KERNEL_SYSCALL_ENTRYEXIT:
1240 break;
1241 case LTTNG_KERNEL_SYSCALL_ENTRY:
1242 if (!entry)
1243 return 0;
1244 break;
1245 case LTTNG_KERNEL_SYSCALL_EXIT:
1246 if (entry)
1247 return 0;
1248 break;
1249 default:
1250 return -EINVAL;
1251 }
1252 switch (enabler->event_param.u.syscall.abi) {
1253 case LTTNG_KERNEL_SYSCALL_ABI_ALL:
1254 break;
1255 case LTTNG_KERNEL_SYSCALL_ABI_NATIVE:
1256 if (compat)
1257 return 0;
1258 break;
1259 case LTTNG_KERNEL_SYSCALL_ABI_COMPAT:
1260 if (!compat)
1261 return 0;
1262 break;
1263 default:
1264 return -EINVAL;
1265 }
1266 switch (enabler->event_param.u.syscall.match) {
1267 case LTTNG_SYSCALL_MATCH_NAME:
1268 switch (enabler->type) {
1269 case LTTNG_ENABLER_STAR_GLOB:
1270 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1271 case LTTNG_ENABLER_NAME:
1272 return lttng_match_enabler_name(desc_name, enabler_name);
1273 default:
1274 return -EINVAL;
1275 }
1276 break;
1277 case LTTNG_SYSCALL_MATCH_NR:
1278 return -EINVAL; /* Not implemented. */
1279 default:
1280 return -EINVAL;
1281 }
1282 break;
1283 default:
1284 WARN_ON_ONCE(1);
1285 return -EINVAL;
1286 }
1287 }
1288
1289 static
1290 int lttng_event_match_enabler(struct lttng_event *event,
1291 struct lttng_enabler *enabler)
1292 {
1293 if (enabler->event_param.instrumentation != event->instrumentation)
1294 return 0;
1295 if (lttng_desc_match_enabler(event->desc, enabler)
1296 && event->chan == enabler->chan)
1297 return 1;
1298 else
1299 return 0;
1300 }
1301
1302 static
1303 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1304 struct lttng_enabler *enabler)
1305 {
1306 struct lttng_enabler_ref *enabler_ref;
1307
1308 list_for_each_entry(enabler_ref,
1309 &event->enablers_ref_head, node) {
1310 if (enabler_ref->ref == enabler)
1311 return enabler_ref;
1312 }
1313 return NULL;
1314 }
1315
1316 static
1317 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1318 {
1319 struct lttng_session *session = enabler->chan->session;
1320 struct lttng_probe_desc *probe_desc;
1321 const struct lttng_event_desc *desc;
1322 int i;
1323 struct list_head *probe_list;
1324
1325 probe_list = lttng_get_probe_list_head();
1326 /*
1327 * For each probe event, if we find that a probe event matches
1328 * our enabler, create an associated lttng_event if not
1329 * already present.
1330 */
1331 list_for_each_entry(probe_desc, probe_list, head) {
1332 for (i = 0; i < probe_desc->nr_events; i++) {
1333 int found = 0;
1334 struct hlist_head *head;
1335 const char *event_name;
1336 size_t name_len;
1337 uint32_t hash;
1338 struct lttng_event *event;
1339
1340 desc = probe_desc->event_desc[i];
1341 if (!lttng_desc_match_enabler(desc, enabler))
1342 continue;
1343 event_name = desc->name;
1344 name_len = strlen(event_name);
1345
1346 /*
1347 * Check if already created.
1348 */
1349 hash = jhash(event_name, name_len, 0);
1350 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1351 lttng_hlist_for_each_entry(event, head, hlist) {
1352 if (event->desc == desc
1353 && event->chan == enabler->chan)
1354 found = 1;
1355 }
1356 if (found)
1357 continue;
1358
1359 /*
1360 * We need to create an event for this
1361 * event probe.
1362 */
1363 event = _lttng_event_create(enabler->chan,
1364 NULL, NULL, desc,
1365 LTTNG_KERNEL_TRACEPOINT);
1366 if (!event) {
1367 printk(KERN_INFO "Unable to create event %s\n",
1368 probe_desc->event_desc[i]->name);
1369 }
1370 }
1371 }
1372 }
1373
1374 static
1375 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1376 {
1377 int ret;
1378
1379 ret = lttng_syscalls_register(enabler->chan, NULL);
1380 WARN_ON_ONCE(ret);
1381 }
1382
1383 /*
1384 * Create struct lttng_event if it is missing and present in the list of
1385 * tracepoint probes.
1386 * Should be called with sessions mutex held.
1387 */
1388 static
1389 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1390 {
1391 switch (enabler->event_param.instrumentation) {
1392 case LTTNG_KERNEL_TRACEPOINT:
1393 lttng_create_tracepoint_if_missing(enabler);
1394 break;
1395 case LTTNG_KERNEL_SYSCALL:
1396 lttng_create_syscall_if_missing(enabler);
1397 break;
1398 default:
1399 WARN_ON_ONCE(1);
1400 break;
1401 }
1402 }
1403
1404 /*
1405 * Create events associated with an enabler (if not already present),
1406 * and add backward reference from the event to the enabler.
1407 * Should be called with sessions mutex held.
1408 */
1409 static
1410 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1411 {
1412 struct lttng_channel *chan = enabler->chan;
1413 struct lttng_session *session = chan->session;
1414 struct lttng_event *event;
1415
1416 if (enabler->event_param.instrumentation == LTTNG_KERNEL_SYSCALL &&
1417 enabler->event_param.u.syscall.entryexit == LTTNG_KERNEL_SYSCALL_ENTRYEXIT &&
1418 enabler->event_param.u.syscall.abi == LTTNG_KERNEL_SYSCALL_ABI_ALL &&
1419 enabler->event_param.u.syscall.match == LTTNG_SYSCALL_MATCH_NAME &&
1420 !strcmp(enabler->event_param.name, "*")) {
1421 if (enabler->enabled)
1422 WRITE_ONCE(chan->syscall_all, 1);
1423 else
1424 WRITE_ONCE(chan->syscall_all, 0);
1425 }
1426
1427 /* First ensure that probe events are created for this enabler. */
1428 lttng_create_event_if_missing(enabler);
1429
1430 /* For each event matching enabler in session event list. */
1431 list_for_each_entry(event, &session->events, list) {
1432 struct lttng_enabler_ref *enabler_ref;
1433
1434 if (!lttng_event_match_enabler(event, enabler))
1435 continue;
1436 enabler_ref = lttng_event_enabler_ref(event, enabler);
1437 if (!enabler_ref) {
1438 /*
1439 * If no backward ref, create it.
1440 * Add backward ref from event to enabler.
1441 */
1442 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1443 if (!enabler_ref)
1444 return -ENOMEM;
1445 enabler_ref->ref = enabler;
1446 list_add(&enabler_ref->node,
1447 &event->enablers_ref_head);
1448 }
1449
1450 /*
1451 * Link filter bytecodes if not linked yet.
1452 */
1453 lttng_enabler_event_link_bytecode(event, enabler);
1454
1455 /* TODO: merge event context. */
1456 }
1457 return 0;
1458 }
1459
1460 /*
1461 * Called at module load: connect the probe on all enablers matching
1462 * this event.
1463 * Called with sessions lock held.
1464 */
1465 int lttng_fix_pending_events(void)
1466 {
1467 struct lttng_session *session;
1468
1469 list_for_each_entry(session, &sessions, list)
1470 lttng_session_lazy_sync_enablers(session);
1471 return 0;
1472 }
1473
1474 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1475 struct lttng_kernel_event *event_param,
1476 struct lttng_channel *chan)
1477 {
1478 struct lttng_enabler *enabler;
1479
1480 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1481 if (!enabler)
1482 return NULL;
1483 enabler->type = type;
1484 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1485 memcpy(&enabler->event_param, event_param,
1486 sizeof(enabler->event_param));
1487 enabler->chan = chan;
1488 /* ctx left NULL */
1489 enabler->enabled = 0;
1490 enabler->evtype = LTTNG_TYPE_ENABLER;
1491 mutex_lock(&sessions_mutex);
1492 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1493 lttng_session_lazy_sync_enablers(enabler->chan->session);
1494 mutex_unlock(&sessions_mutex);
1495 return enabler;
1496 }
1497
1498 int lttng_enabler_enable(struct lttng_enabler *enabler)
1499 {
1500 mutex_lock(&sessions_mutex);
1501 enabler->enabled = 1;
1502 lttng_session_lazy_sync_enablers(enabler->chan->session);
1503 mutex_unlock(&sessions_mutex);
1504 return 0;
1505 }
1506
1507 int lttng_enabler_disable(struct lttng_enabler *enabler)
1508 {
1509 mutex_lock(&sessions_mutex);
1510 enabler->enabled = 0;
1511 lttng_session_lazy_sync_enablers(enabler->chan->session);
1512 mutex_unlock(&sessions_mutex);
1513 return 0;
1514 }
1515
1516 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1517 struct lttng_kernel_filter_bytecode __user *bytecode)
1518 {
1519 struct lttng_filter_bytecode_node *bytecode_node;
1520 uint32_t bytecode_len;
1521 int ret;
1522
1523 ret = get_user(bytecode_len, &bytecode->len);
1524 if (ret)
1525 return ret;
1526 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1527 GFP_KERNEL);
1528 if (!bytecode_node)
1529 return -ENOMEM;
1530 ret = copy_from_user(&bytecode_node->bc, bytecode,
1531 sizeof(*bytecode) + bytecode_len);
1532 if (ret)
1533 goto error_free;
1534 bytecode_node->enabler = enabler;
1535 /* Enforce length based on allocated size */
1536 bytecode_node->bc.len = bytecode_len;
1537 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1538 lttng_session_lazy_sync_enablers(enabler->chan->session);
1539 return 0;
1540
1541 error_free:
1542 kfree(bytecode_node);
1543 return ret;
1544 }
1545
1546 int lttng_event_add_callsite(struct lttng_event *event,
1547 struct lttng_kernel_event_callsite __user *callsite)
1548 {
1549
1550 switch (event->instrumentation) {
1551 case LTTNG_KERNEL_UPROBE:
1552 return lttng_uprobes_add_callsite(event, callsite);
1553 default:
1554 return -EINVAL;
1555 }
1556 }
1557
1558 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1559 struct lttng_kernel_context *context_param)
1560 {
1561 return -ENOSYS;
1562 }
1563
1564 static
1565 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1566 {
1567 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1568
1569 /* Destroy filter bytecode */
1570 list_for_each_entry_safe(filter_node, tmp_filter_node,
1571 &enabler->filter_bytecode_head, node) {
1572 kfree(filter_node);
1573 }
1574
1575 /* Destroy contexts */
1576 lttng_destroy_context(enabler->ctx);
1577
1578 list_del(&enabler->node);
1579 kfree(enabler);
1580 }
1581
1582 /*
1583 * lttng_session_sync_enablers should be called just before starting a
1584 * session.
1585 * Should be called with sessions mutex held.
1586 */
1587 static
1588 void lttng_session_sync_enablers(struct lttng_session *session)
1589 {
1590 struct lttng_enabler *enabler;
1591 struct lttng_event *event;
1592
1593 list_for_each_entry(enabler, &session->enablers_head, node)
1594 lttng_enabler_ref_events(enabler);
1595 /*
1596 * For each event, if at least one of its enablers is enabled,
1597 * and its channel and session transient states are enabled, we
1598 * enable the event, else we disable it.
1599 */
1600 list_for_each_entry(event, &session->events, list) {
1601 struct lttng_enabler_ref *enabler_ref;
1602 struct lttng_bytecode_runtime *runtime;
1603 int enabled = 0, has_enablers_without_bytecode = 0;
1604
1605 switch (event->instrumentation) {
1606 case LTTNG_KERNEL_TRACEPOINT:
1607 case LTTNG_KERNEL_SYSCALL:
1608 /* Enable events */
1609 list_for_each_entry(enabler_ref,
1610 &event->enablers_ref_head, node) {
1611 if (enabler_ref->ref->enabled) {
1612 enabled = 1;
1613 break;
1614 }
1615 }
1616 break;
1617 default:
1618 /* Not handled with lazy sync. */
1619 continue;
1620 }
1621 /*
1622 * Enabled state is based on union of enablers, with
1623 * intesection of session and channel transient enable
1624 * states.
1625 */
1626 enabled = enabled && session->tstate && event->chan->tstate;
1627
1628 WRITE_ONCE(event->enabled, enabled);
1629 /*
1630 * Sync tracepoint registration with event enabled
1631 * state.
1632 */
1633 if (enabled) {
1634 register_event(event);
1635 } else {
1636 _lttng_event_unregister(event);
1637 }
1638
1639 /* Check if has enablers without bytecode enabled */
1640 list_for_each_entry(enabler_ref,
1641 &event->enablers_ref_head, node) {
1642 if (enabler_ref->ref->enabled
1643 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1644 has_enablers_without_bytecode = 1;
1645 break;
1646 }
1647 }
1648 event->has_enablers_without_bytecode =
1649 has_enablers_without_bytecode;
1650
1651 /* Enable filters */
1652 list_for_each_entry(runtime,
1653 &event->bytecode_runtime_head, node)
1654 lttng_filter_sync_state(runtime);
1655 }
1656 }
1657
1658 /*
1659 * Apply enablers to session events, adding events to session if need
1660 * be. It is required after each modification applied to an active
1661 * session, and right before session "start".
1662 * "lazy" sync means we only sync if required.
1663 * Should be called with sessions mutex held.
1664 */
1665 static
1666 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1667 {
1668 /* We can skip if session is not active */
1669 if (!session->active)
1670 return;
1671 lttng_session_sync_enablers(session);
1672 }
1673
1674 /*
1675 * Serialize at most one packet worth of metadata into a metadata
1676 * channel.
1677 * We grab the metadata cache mutex to get exclusive access to our metadata
1678 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1679 * allows us to do racy operations such as looking for remaining space left in
1680 * packet and write, since mutual exclusion protects us from concurrent writes.
1681 * Mutual exclusion on the metadata cache allow us to read the cache content
1682 * without racing against reallocation of the cache by updates.
1683 * Returns the number of bytes written in the channel, 0 if no data
1684 * was written and a negative value on error.
1685 */
1686 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1687 struct channel *chan, bool *coherent)
1688 {
1689 struct lib_ring_buffer_ctx ctx;
1690 int ret = 0;
1691 size_t len, reserve_len;
1692
1693 /*
1694 * Ensure we support mutiple get_next / put sequences followed by
1695 * put_next. The metadata cache lock protects reading the metadata
1696 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1697 * "flush" operations on the buffer invoked by different processes.
1698 * Moreover, since the metadata cache memory can be reallocated, we
1699 * need to have exclusive access against updates even though we only
1700 * read it.
1701 */
1702 mutex_lock(&stream->metadata_cache->lock);
1703 WARN_ON(stream->metadata_in < stream->metadata_out);
1704 if (stream->metadata_in != stream->metadata_out)
1705 goto end;
1706
1707 /* Metadata regenerated, change the version. */
1708 if (stream->metadata_cache->version != stream->version)
1709 stream->version = stream->metadata_cache->version;
1710
1711 len = stream->metadata_cache->metadata_written -
1712 stream->metadata_in;
1713 if (!len)
1714 goto end;
1715 reserve_len = min_t(size_t,
1716 stream->transport->ops.packet_avail_size(chan),
1717 len);
1718 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1719 sizeof(char), -1);
1720 /*
1721 * If reservation failed, return an error to the caller.
1722 */
1723 ret = stream->transport->ops.event_reserve(&ctx, 0);
1724 if (ret != 0) {
1725 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1726 stream->coherent = false;
1727 goto end;
1728 }
1729 stream->transport->ops.event_write(&ctx,
1730 stream->metadata_cache->data + stream->metadata_in,
1731 reserve_len);
1732 stream->transport->ops.event_commit(&ctx);
1733 stream->metadata_in += reserve_len;
1734 if (reserve_len < len)
1735 stream->coherent = false;
1736 else
1737 stream->coherent = true;
1738 ret = reserve_len;
1739
1740 end:
1741 if (coherent)
1742 *coherent = stream->coherent;
1743 mutex_unlock(&stream->metadata_cache->lock);
1744 return ret;
1745 }
1746
1747 static
1748 void lttng_metadata_begin(struct lttng_session *session)
1749 {
1750 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
1751 mutex_lock(&session->metadata_cache->lock);
1752 }
1753
1754 static
1755 void lttng_metadata_end(struct lttng_session *session)
1756 {
1757 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1758 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
1759 struct lttng_metadata_stream *stream;
1760
1761 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1762 wake_up_interruptible(&stream->read_wait);
1763 mutex_unlock(&session->metadata_cache->lock);
1764 }
1765 }
1766
1767 /*
1768 * Write the metadata to the metadata cache.
1769 * Must be called with sessions_mutex held.
1770 * The metadata cache lock protects us from concurrent read access from
1771 * thread outputting metadata content to ring buffer.
1772 * The content of the printf is printed as a single atomic metadata
1773 * transaction.
1774 */
1775 int lttng_metadata_printf(struct lttng_session *session,
1776 const char *fmt, ...)
1777 {
1778 char *str;
1779 size_t len;
1780 va_list ap;
1781
1782 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
1783
1784 va_start(ap, fmt);
1785 str = kvasprintf(GFP_KERNEL, fmt, ap);
1786 va_end(ap);
1787 if (!str)
1788 return -ENOMEM;
1789
1790 len = strlen(str);
1791 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1792 if (session->metadata_cache->metadata_written + len >
1793 session->metadata_cache->cache_alloc) {
1794 char *tmp_cache_realloc;
1795 unsigned int tmp_cache_alloc_size;
1796
1797 tmp_cache_alloc_size = max_t(unsigned int,
1798 session->metadata_cache->cache_alloc + len,
1799 session->metadata_cache->cache_alloc << 1);
1800 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1801 if (!tmp_cache_realloc)
1802 goto err;
1803 if (session->metadata_cache->data) {
1804 memcpy(tmp_cache_realloc,
1805 session->metadata_cache->data,
1806 session->metadata_cache->cache_alloc);
1807 vfree(session->metadata_cache->data);
1808 }
1809
1810 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1811 session->metadata_cache->data = tmp_cache_realloc;
1812 }
1813 memcpy(session->metadata_cache->data +
1814 session->metadata_cache->metadata_written,
1815 str, len);
1816 session->metadata_cache->metadata_written += len;
1817 kfree(str);
1818
1819 return 0;
1820
1821 err:
1822 kfree(str);
1823 return -ENOMEM;
1824 }
1825
1826 static
1827 int print_tabs(struct lttng_session *session, size_t nesting)
1828 {
1829 size_t i;
1830
1831 for (i = 0; i < nesting; i++) {
1832 int ret;
1833
1834 ret = lttng_metadata_printf(session, " ");
1835 if (ret) {
1836 return ret;
1837 }
1838 }
1839 return 0;
1840 }
1841
1842 /*
1843 * Must be called with sessions_mutex held.
1844 */
1845 static
1846 int _lttng_struct_type_statedump(struct lttng_session *session,
1847 const struct lttng_type *type,
1848 size_t nesting)
1849 {
1850 int ret;
1851 uint32_t i, nr_fields;
1852
1853 ret = print_tabs(session, nesting);
1854 if (ret)
1855 return ret;
1856 ret = lttng_metadata_printf(session,
1857 "struct {\n");
1858 if (ret)
1859 return ret;
1860 nr_fields = type->u._struct.nr_fields;
1861 for (i = 0; i < nr_fields; i++) {
1862 const struct lttng_event_field *iter_field;
1863
1864 iter_field = &type->u._struct.fields[i];
1865 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1866 if (ret)
1867 return ret;
1868 }
1869 ret = print_tabs(session, nesting);
1870 if (ret)
1871 return ret;
1872 ret = lttng_metadata_printf(session,
1873 "}");
1874 return ret;
1875 }
1876
1877 /*
1878 * Must be called with sessions_mutex held.
1879 */
1880 static
1881 int _lttng_struct_statedump(struct lttng_session *session,
1882 const struct lttng_event_field *field,
1883 size_t nesting)
1884 {
1885 int ret;
1886
1887 ret = _lttng_struct_type_statedump(session,
1888 &field->type, nesting);
1889 if (ret)
1890 return ret;
1891 ret = lttng_metadata_printf(session,
1892 "_%s;\n",
1893 field->name);
1894 return ret;
1895 }
1896
1897 /*
1898 * Must be called with sessions_mutex held.
1899 */
1900 static
1901 int _lttng_variant_type_statedump(struct lttng_session *session,
1902 const struct lttng_type *type,
1903 size_t nesting)
1904 {
1905 int ret;
1906 uint32_t i, nr_choices;
1907
1908 ret = print_tabs(session, nesting);
1909 if (ret)
1910 return ret;
1911 ret = lttng_metadata_printf(session,
1912 "variant <_%s> {\n",
1913 type->u.variant.tag_name);
1914 if (ret)
1915 return ret;
1916 nr_choices = type->u.variant.nr_choices;
1917 for (i = 0; i < nr_choices; i++) {
1918 const struct lttng_event_field *iter_field;
1919
1920 iter_field = &type->u.variant.choices[i];
1921 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1922 if (ret)
1923 return ret;
1924 }
1925 ret = print_tabs(session, nesting);
1926 if (ret)
1927 return ret;
1928 ret = lttng_metadata_printf(session,
1929 "}");
1930 return ret;
1931 }
1932
1933 /*
1934 * Must be called with sessions_mutex held.
1935 */
1936 static
1937 int _lttng_variant_statedump(struct lttng_session *session,
1938 const struct lttng_event_field *field,
1939 size_t nesting)
1940 {
1941 int ret;
1942
1943 ret = _lttng_variant_type_statedump(session,
1944 &field->type, nesting);
1945 if (ret)
1946 return ret;
1947 ret = lttng_metadata_printf(session,
1948 "_%s;\n",
1949 field->name);
1950 return ret;
1951 }
1952
1953 /*
1954 * Must be called with sessions_mutex held.
1955 */
1956 static
1957 int _lttng_array_compound_statedump(struct lttng_session *session,
1958 const struct lttng_event_field *field,
1959 size_t nesting)
1960 {
1961 int ret;
1962 const struct lttng_type *elem_type;
1963
1964 /* Only array of structures and variants are currently supported. */
1965 elem_type = field->type.u.array_compound.elem_type;
1966 switch (elem_type->atype) {
1967 case atype_struct:
1968 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
1969 if (ret)
1970 return ret;
1971 break;
1972 case atype_variant:
1973 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
1974 if (ret)
1975 return ret;
1976 break;
1977 default:
1978 return -EINVAL;
1979 }
1980 ret = lttng_metadata_printf(session,
1981 " _%s[%u];\n",
1982 field->name,
1983 field->type.u.array_compound.length);
1984 return ret;
1985 }
1986
1987 /*
1988 * Must be called with sessions_mutex held.
1989 */
1990 static
1991 int _lttng_sequence_compound_statedump(struct lttng_session *session,
1992 const struct lttng_event_field *field,
1993 size_t nesting)
1994 {
1995 int ret;
1996 const char *length_name;
1997 const struct lttng_type *elem_type;
1998
1999 length_name = field->type.u.sequence_compound.length_name;
2000
2001 /* Only array of structures and variants are currently supported. */
2002 elem_type = field->type.u.sequence_compound.elem_type;
2003 switch (elem_type->atype) {
2004 case atype_struct:
2005 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
2006 if (ret)
2007 return ret;
2008 break;
2009 case atype_variant:
2010 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
2011 if (ret)
2012 return ret;
2013 break;
2014 default:
2015 return -EINVAL;
2016 }
2017 ret = lttng_metadata_printf(session,
2018 " _%s[ _%s ];\n",
2019 field->name,
2020 length_name);
2021 return ret;
2022 }
2023
2024 /*
2025 * Must be called with sessions_mutex held.
2026 */
2027 static
2028 int _lttng_enum_statedump(struct lttng_session *session,
2029 const struct lttng_event_field *field,
2030 size_t nesting)
2031 {
2032 const struct lttng_enum_desc *enum_desc;
2033 const struct lttng_integer_type *container_type;
2034 int ret;
2035 unsigned int i, nr_entries;
2036
2037 enum_desc = field->type.u.basic.enumeration.desc;
2038 container_type = &field->type.u.basic.enumeration.container_type;
2039 nr_entries = enum_desc->nr_entries;
2040
2041 ret = print_tabs(session, nesting);
2042 if (ret)
2043 goto end;
2044 ret = lttng_metadata_printf(session,
2045 "enum : integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } {\n",
2046 container_type->size,
2047 container_type->alignment,
2048 container_type->signedness,
2049 (container_type->encoding == lttng_encode_none)
2050 ? "none"
2051 : (container_type->encoding == lttng_encode_UTF8)
2052 ? "UTF8"
2053 : "ASCII",
2054 container_type->base,
2055 #if __BYTE_ORDER == __BIG_ENDIAN
2056 container_type->reverse_byte_order ? " byte_order = le;" : ""
2057 #else
2058 container_type->reverse_byte_order ? " byte_order = be;" : ""
2059 #endif
2060 );
2061 if (ret)
2062 goto end;
2063 /* Dump all entries */
2064 for (i = 0; i < nr_entries; i++) {
2065 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2066 int j, len;
2067
2068 ret = print_tabs(session, nesting + 1);
2069 if (ret)
2070 goto end;
2071 ret = lttng_metadata_printf(session,
2072 "\"");
2073 if (ret)
2074 goto end;
2075 len = strlen(entry->string);
2076 /* Escape the character '"' */
2077 for (j = 0; j < len; j++) {
2078 char c = entry->string[j];
2079
2080 switch (c) {
2081 case '"':
2082 ret = lttng_metadata_printf(session,
2083 "\\\"");
2084 break;
2085 case '\\':
2086 ret = lttng_metadata_printf(session,
2087 "\\\\");
2088 break;
2089 default:
2090 ret = lttng_metadata_printf(session,
2091 "%c", c);
2092 break;
2093 }
2094 if (ret)
2095 goto end;
2096 }
2097 ret = lttng_metadata_printf(session, "\"");
2098 if (ret)
2099 goto end;
2100
2101 if (entry->options.is_auto) {
2102 ret = lttng_metadata_printf(session, ",\n");
2103 if (ret)
2104 goto end;
2105 } else {
2106 ret = lttng_metadata_printf(session,
2107 " = ");
2108 if (ret)
2109 goto end;
2110 if (entry->start.signedness)
2111 ret = lttng_metadata_printf(session,
2112 "%lld", (long long) entry->start.value);
2113 else
2114 ret = lttng_metadata_printf(session,
2115 "%llu", entry->start.value);
2116 if (ret)
2117 goto end;
2118 if (entry->start.signedness == entry->end.signedness &&
2119 entry->start.value
2120 == entry->end.value) {
2121 ret = lttng_metadata_printf(session,
2122 ",\n");
2123 } else {
2124 if (entry->end.signedness) {
2125 ret = lttng_metadata_printf(session,
2126 " ... %lld,\n",
2127 (long long) entry->end.value);
2128 } else {
2129 ret = lttng_metadata_printf(session,
2130 " ... %llu,\n",
2131 entry->end.value);
2132 }
2133 }
2134 if (ret)
2135 goto end;
2136 }
2137 }
2138 ret = print_tabs(session, nesting);
2139 if (ret)
2140 goto end;
2141 ret = lttng_metadata_printf(session, "} _%s;\n",
2142 field->name);
2143 end:
2144 return ret;
2145 }
2146
2147 /*
2148 * Must be called with sessions_mutex held.
2149 */
2150 static
2151 int _lttng_field_statedump(struct lttng_session *session,
2152 const struct lttng_event_field *field,
2153 size_t nesting)
2154 {
2155 int ret = 0;
2156
2157 switch (field->type.atype) {
2158 case atype_integer:
2159 ret = print_tabs(session, nesting);
2160 if (ret)
2161 return ret;
2162 ret = lttng_metadata_printf(session,
2163 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
2164 field->type.u.basic.integer.size,
2165 field->type.u.basic.integer.alignment,
2166 field->type.u.basic.integer.signedness,
2167 (field->type.u.basic.integer.encoding == lttng_encode_none)
2168 ? "none"
2169 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
2170 ? "UTF8"
2171 : "ASCII",
2172 field->type.u.basic.integer.base,
2173 #if __BYTE_ORDER == __BIG_ENDIAN
2174 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2175 #else
2176 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2177 #endif
2178 field->name);
2179 break;
2180 case atype_enum:
2181 ret = _lttng_enum_statedump(session, field, nesting);
2182 break;
2183 case atype_array:
2184 case atype_array_bitfield:
2185 {
2186 const struct lttng_basic_type *elem_type;
2187
2188 elem_type = &field->type.u.array.elem_type;
2189 if (field->type.u.array.elem_alignment) {
2190 ret = print_tabs(session, nesting);
2191 if (ret)
2192 return ret;
2193 ret = lttng_metadata_printf(session,
2194 "struct { } align(%u) _%s_padding;\n",
2195 field->type.u.array.elem_alignment * CHAR_BIT,
2196 field->name);
2197 if (ret)
2198 return ret;
2199 }
2200 ret = print_tabs(session, nesting);
2201 if (ret)
2202 return ret;
2203 ret = lttng_metadata_printf(session,
2204 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
2205 elem_type->u.basic.integer.size,
2206 elem_type->u.basic.integer.alignment,
2207 elem_type->u.basic.integer.signedness,
2208 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2209 ? "none"
2210 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2211 ? "UTF8"
2212 : "ASCII",
2213 elem_type->u.basic.integer.base,
2214 #if __BYTE_ORDER == __BIG_ENDIAN
2215 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2216 #else
2217 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2218 #endif
2219 field->name, field->type.u.array.length);
2220 break;
2221 }
2222 case atype_sequence:
2223 case atype_sequence_bitfield:
2224 {
2225 const struct lttng_basic_type *elem_type;
2226 const struct lttng_basic_type *length_type;
2227
2228 elem_type = &field->type.u.sequence.elem_type;
2229 length_type = &field->type.u.sequence.length_type;
2230 ret = print_tabs(session, nesting);
2231 if (ret)
2232 return ret;
2233 ret = lttng_metadata_printf(session,
2234 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
2235 length_type->u.basic.integer.size,
2236 (unsigned int) length_type->u.basic.integer.alignment,
2237 length_type->u.basic.integer.signedness,
2238 (length_type->u.basic.integer.encoding == lttng_encode_none)
2239 ? "none"
2240 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
2241 ? "UTF8"
2242 : "ASCII"),
2243 length_type->u.basic.integer.base,
2244 #if __BYTE_ORDER == __BIG_ENDIAN
2245 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2246 #else
2247 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2248 #endif
2249 field->name);
2250 if (ret)
2251 return ret;
2252
2253 if (field->type.u.sequence.elem_alignment) {
2254 ret = print_tabs(session, nesting);
2255 if (ret)
2256 return ret;
2257 ret = lttng_metadata_printf(session,
2258 "struct { } align(%u) _%s_padding;\n",
2259 field->type.u.sequence.elem_alignment * CHAR_BIT,
2260 field->name);
2261 if (ret)
2262 return ret;
2263 }
2264 ret = print_tabs(session, nesting);
2265 if (ret)
2266 return ret;
2267 ret = lttng_metadata_printf(session,
2268 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
2269 elem_type->u.basic.integer.size,
2270 (unsigned int) elem_type->u.basic.integer.alignment,
2271 elem_type->u.basic.integer.signedness,
2272 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2273 ? "none"
2274 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2275 ? "UTF8"
2276 : "ASCII"),
2277 elem_type->u.basic.integer.base,
2278 #if __BYTE_ORDER == __BIG_ENDIAN
2279 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2280 #else
2281 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2282 #endif
2283 field->name,
2284 field->name);
2285 break;
2286 }
2287
2288 case atype_string:
2289 /* Default encoding is UTF8 */
2290 ret = print_tabs(session, nesting);
2291 if (ret)
2292 return ret;
2293 ret = lttng_metadata_printf(session,
2294 "string%s _%s;\n",
2295 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
2296 " { encoding = ASCII; }" : "",
2297 field->name);
2298 break;
2299 case atype_struct:
2300 ret = _lttng_struct_statedump(session, field, nesting);
2301 break;
2302 case atype_array_compound:
2303 ret = _lttng_array_compound_statedump(session, field, nesting);
2304 break;
2305 case atype_sequence_compound:
2306 ret = _lttng_sequence_compound_statedump(session, field, nesting);
2307 break;
2308 case atype_variant:
2309 ret = _lttng_variant_statedump(session, field, nesting);
2310 break;
2311
2312 default:
2313 WARN_ON_ONCE(1);
2314 return -EINVAL;
2315 }
2316 return ret;
2317 }
2318
2319 static
2320 int _lttng_context_metadata_statedump(struct lttng_session *session,
2321 struct lttng_ctx *ctx)
2322 {
2323 int ret = 0;
2324 int i;
2325
2326 if (!ctx)
2327 return 0;
2328 for (i = 0; i < ctx->nr_fields; i++) {
2329 const struct lttng_ctx_field *field = &ctx->fields[i];
2330
2331 ret = _lttng_field_statedump(session, &field->event_field, 2);
2332 if (ret)
2333 return ret;
2334 }
2335 return ret;
2336 }
2337
2338 static
2339 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2340 struct lttng_event *event)
2341 {
2342 const struct lttng_event_desc *desc = event->desc;
2343 int ret = 0;
2344 int i;
2345
2346 for (i = 0; i < desc->nr_fields; i++) {
2347 const struct lttng_event_field *field = &desc->fields[i];
2348
2349 ret = _lttng_field_statedump(session, field, 2);
2350 if (ret)
2351 return ret;
2352 }
2353 return ret;
2354 }
2355
2356 /*
2357 * Must be called with sessions_mutex held.
2358 * The entire event metadata is printed as a single atomic metadata
2359 * transaction.
2360 */
2361 static
2362 int _lttng_event_metadata_statedump(struct lttng_session *session,
2363 struct lttng_channel *chan,
2364 struct lttng_event *event)
2365 {
2366 int ret = 0;
2367
2368 if (event->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2369 return 0;
2370 if (chan->channel_type == METADATA_CHANNEL)
2371 return 0;
2372
2373 lttng_metadata_begin(session);
2374
2375 ret = lttng_metadata_printf(session,
2376 "event {\n"
2377 " name = \"%s\";\n"
2378 " id = %u;\n"
2379 " stream_id = %u;\n",
2380 event->desc->name,
2381 event->id,
2382 event->chan->id);
2383 if (ret)
2384 goto end;
2385
2386 if (event->ctx) {
2387 ret = lttng_metadata_printf(session,
2388 " context := struct {\n");
2389 if (ret)
2390 goto end;
2391 }
2392 ret = _lttng_context_metadata_statedump(session, event->ctx);
2393 if (ret)
2394 goto end;
2395 if (event->ctx) {
2396 ret = lttng_metadata_printf(session,
2397 " };\n");
2398 if (ret)
2399 goto end;
2400 }
2401
2402 ret = lttng_metadata_printf(session,
2403 " fields := struct {\n"
2404 );
2405 if (ret)
2406 goto end;
2407
2408 ret = _lttng_fields_metadata_statedump(session, event);
2409 if (ret)
2410 goto end;
2411
2412 /*
2413 * LTTng space reservation can only reserve multiples of the
2414 * byte size.
2415 */
2416 ret = lttng_metadata_printf(session,
2417 " };\n"
2418 "};\n\n");
2419 if (ret)
2420 goto end;
2421
2422 event->metadata_dumped = 1;
2423 end:
2424 lttng_metadata_end(session);
2425 return ret;
2426
2427 }
2428
2429 /*
2430 * Must be called with sessions_mutex held.
2431 * The entire channel metadata is printed as a single atomic metadata
2432 * transaction.
2433 */
2434 static
2435 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2436 struct lttng_channel *chan)
2437 {
2438 int ret = 0;
2439
2440 if (chan->metadata_dumped || !LTTNG_READ_ONCE(session->active))
2441 return 0;
2442
2443 if (chan->channel_type == METADATA_CHANNEL)
2444 return 0;
2445
2446 lttng_metadata_begin(session);
2447
2448 WARN_ON_ONCE(!chan->header_type);
2449 ret = lttng_metadata_printf(session,
2450 "stream {\n"
2451 " id = %u;\n"
2452 " event.header := %s;\n"
2453 " packet.context := struct packet_context;\n",
2454 chan->id,
2455 chan->header_type == 1 ? "struct event_header_compact" :
2456 "struct event_header_large");
2457 if (ret)
2458 goto end;
2459
2460 if (chan->ctx) {
2461 ret = lttng_metadata_printf(session,
2462 " event.context := struct {\n");
2463 if (ret)
2464 goto end;
2465 }
2466 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2467 if (ret)
2468 goto end;
2469 if (chan->ctx) {
2470 ret = lttng_metadata_printf(session,
2471 " };\n");
2472 if (ret)
2473 goto end;
2474 }
2475
2476 ret = lttng_metadata_printf(session,
2477 "};\n\n");
2478
2479 chan->metadata_dumped = 1;
2480 end:
2481 lttng_metadata_end(session);
2482 return ret;
2483 }
2484
2485 /*
2486 * Must be called with sessions_mutex held.
2487 */
2488 static
2489 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2490 {
2491 return lttng_metadata_printf(session,
2492 "struct packet_context {\n"
2493 " uint64_clock_monotonic_t timestamp_begin;\n"
2494 " uint64_clock_monotonic_t timestamp_end;\n"
2495 " uint64_t content_size;\n"
2496 " uint64_t packet_size;\n"
2497 " uint64_t packet_seq_num;\n"
2498 " unsigned long events_discarded;\n"
2499 " uint32_t cpu_id;\n"
2500 "};\n\n"
2501 );
2502 }
2503
2504 /*
2505 * Compact header:
2506 * id: range: 0 - 30.
2507 * id 31 is reserved to indicate an extended header.
2508 *
2509 * Large header:
2510 * id: range: 0 - 65534.
2511 * id 65535 is reserved to indicate an extended header.
2512 *
2513 * Must be called with sessions_mutex held.
2514 */
2515 static
2516 int _lttng_event_header_declare(struct lttng_session *session)
2517 {
2518 return lttng_metadata_printf(session,
2519 "struct event_header_compact {\n"
2520 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2521 " variant <id> {\n"
2522 " struct {\n"
2523 " uint27_clock_monotonic_t timestamp;\n"
2524 " } compact;\n"
2525 " struct {\n"
2526 " uint32_t id;\n"
2527 " uint64_clock_monotonic_t timestamp;\n"
2528 " } extended;\n"
2529 " } v;\n"
2530 "} align(%u);\n"
2531 "\n"
2532 "struct event_header_large {\n"
2533 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2534 " variant <id> {\n"
2535 " struct {\n"
2536 " uint32_clock_monotonic_t timestamp;\n"
2537 " } compact;\n"
2538 " struct {\n"
2539 " uint32_t id;\n"
2540 " uint64_clock_monotonic_t timestamp;\n"
2541 " } extended;\n"
2542 " } v;\n"
2543 "} align(%u);\n\n",
2544 lttng_alignof(uint32_t) * CHAR_BIT,
2545 lttng_alignof(uint16_t) * CHAR_BIT
2546 );
2547 }
2548
2549 /*
2550 * Approximation of NTP time of day to clock monotonic correlation,
2551 * taken at start of trace.
2552 * Yes, this is only an approximation. Yes, we can (and will) do better
2553 * in future versions.
2554 * This function may return a negative offset. It may happen if the
2555 * system sets the REALTIME clock to 0 after boot.
2556 *
2557 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2558 * y2038 compliant.
2559 */
2560 static
2561 int64_t measure_clock_offset(void)
2562 {
2563 uint64_t monotonic_avg, monotonic[2], realtime;
2564 uint64_t tcf = trace_clock_freq();
2565 int64_t offset;
2566 unsigned long flags;
2567 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2568 struct timespec64 rts = { 0, 0 };
2569 #else
2570 struct timespec rts = { 0, 0 };
2571 #endif
2572
2573 /* Disable interrupts to increase correlation precision. */
2574 local_irq_save(flags);
2575 monotonic[0] = trace_clock_read64();
2576 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2577 ktime_get_real_ts64(&rts);
2578 #else
2579 getnstimeofday(&rts);
2580 #endif
2581 monotonic[1] = trace_clock_read64();
2582 local_irq_restore(flags);
2583
2584 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2585 realtime = (uint64_t) rts.tv_sec * tcf;
2586 if (tcf == NSEC_PER_SEC) {
2587 realtime += rts.tv_nsec;
2588 } else {
2589 uint64_t n = rts.tv_nsec * tcf;
2590
2591 do_div(n, NSEC_PER_SEC);
2592 realtime += n;
2593 }
2594 offset = (int64_t) realtime - monotonic_avg;
2595 return offset;
2596 }
2597
2598 static
2599 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2600 {
2601 int ret;
2602 size_t i;
2603 char cur;
2604
2605 i = 0;
2606 cur = string[i];
2607 while (cur != '\0') {
2608 switch (cur) {
2609 case '\n':
2610 ret = lttng_metadata_printf(session, "%s", "\\n");
2611 break;
2612 case '\\':
2613 case '"':
2614 ret = lttng_metadata_printf(session, "%c", '\\');
2615 if (ret)
2616 goto error;
2617 /* We still print the current char */
2618 /* Fallthrough */
2619 default:
2620 ret = lttng_metadata_printf(session, "%c", cur);
2621 break;
2622 }
2623
2624 if (ret)
2625 goto error;
2626
2627 cur = string[++i];
2628 }
2629 error:
2630 return ret;
2631 }
2632
2633 static
2634 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2635 const char *field_value)
2636 {
2637 int ret;
2638
2639 ret = lttng_metadata_printf(session, " %s = \"", field);
2640 if (ret)
2641 goto error;
2642
2643 ret = print_escaped_ctf_string(session, field_value);
2644 if (ret)
2645 goto error;
2646
2647 ret = lttng_metadata_printf(session, "\";\n");
2648
2649 error:
2650 return ret;
2651 }
2652
2653 /*
2654 * Output metadata into this session's metadata buffers.
2655 * Must be called with sessions_mutex held.
2656 */
2657 static
2658 int _lttng_session_metadata_statedump(struct lttng_session *session)
2659 {
2660 unsigned char *uuid_c = session->uuid.b;
2661 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2662 struct lttng_channel *chan;
2663 struct lttng_event *event;
2664 int ret = 0;
2665
2666 if (!LTTNG_READ_ONCE(session->active))
2667 return 0;
2668
2669 lttng_metadata_begin(session);
2670
2671 if (session->metadata_dumped)
2672 goto skip_session;
2673
2674 snprintf(uuid_s, sizeof(uuid_s),
2675 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2676 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2677 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2678 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2679 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2680
2681 ret = lttng_metadata_printf(session,
2682 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2683 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2684 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2685 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2686 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2687 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2688 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2689 "\n"
2690 "trace {\n"
2691 " major = %u;\n"
2692 " minor = %u;\n"
2693 " uuid = \"%s\";\n"
2694 " byte_order = %s;\n"
2695 " packet.header := struct {\n"
2696 " uint32_t magic;\n"
2697 " uint8_t uuid[16];\n"
2698 " uint32_t stream_id;\n"
2699 " uint64_t stream_instance_id;\n"
2700 " };\n"
2701 "};\n\n",
2702 lttng_alignof(uint8_t) * CHAR_BIT,
2703 lttng_alignof(uint16_t) * CHAR_BIT,
2704 lttng_alignof(uint32_t) * CHAR_BIT,
2705 lttng_alignof(uint64_t) * CHAR_BIT,
2706 sizeof(unsigned long) * CHAR_BIT,
2707 lttng_alignof(unsigned long) * CHAR_BIT,
2708 CTF_SPEC_MAJOR,
2709 CTF_SPEC_MINOR,
2710 uuid_s,
2711 #if __BYTE_ORDER == __BIG_ENDIAN
2712 "be"
2713 #else
2714 "le"
2715 #endif
2716 );
2717 if (ret)
2718 goto end;
2719
2720 ret = lttng_metadata_printf(session,
2721 "env {\n"
2722 " hostname = \"%s\";\n"
2723 " domain = \"kernel\";\n"
2724 " sysname = \"%s\";\n"
2725 " kernel_release = \"%s\";\n"
2726 " kernel_version = \"%s\";\n"
2727 " tracer_name = \"lttng-modules\";\n"
2728 " tracer_major = %d;\n"
2729 " tracer_minor = %d;\n"
2730 " tracer_patchlevel = %d;\n"
2731 " trace_buffering_scheme = \"global\";\n",
2732 current->nsproxy->uts_ns->name.nodename,
2733 utsname()->sysname,
2734 utsname()->release,
2735 utsname()->version,
2736 LTTNG_MODULES_MAJOR_VERSION,
2737 LTTNG_MODULES_MINOR_VERSION,
2738 LTTNG_MODULES_PATCHLEVEL_VERSION
2739 );
2740 if (ret)
2741 goto end;
2742
2743 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2744 if (ret)
2745 goto end;
2746 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2747 session->creation_time);
2748 if (ret)
2749 goto end;
2750
2751 /* Close env */
2752 ret = lttng_metadata_printf(session, "};\n\n");
2753 if (ret)
2754 goto end;
2755
2756 ret = lttng_metadata_printf(session,
2757 "clock {\n"
2758 " name = \"%s\";\n",
2759 trace_clock_name()
2760 );
2761 if (ret)
2762 goto end;
2763
2764 if (!trace_clock_uuid(clock_uuid_s)) {
2765 ret = lttng_metadata_printf(session,
2766 " uuid = \"%s\";\n",
2767 clock_uuid_s
2768 );
2769 if (ret)
2770 goto end;
2771 }
2772
2773 ret = lttng_metadata_printf(session,
2774 " description = \"%s\";\n"
2775 " freq = %llu; /* Frequency, in Hz */\n"
2776 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2777 " offset = %lld;\n"
2778 "};\n\n",
2779 trace_clock_description(),
2780 (unsigned long long) trace_clock_freq(),
2781 (long long) measure_clock_offset()
2782 );
2783 if (ret)
2784 goto end;
2785
2786 ret = lttng_metadata_printf(session,
2787 "typealias integer {\n"
2788 " size = 27; align = 1; signed = false;\n"
2789 " map = clock.%s.value;\n"
2790 "} := uint27_clock_monotonic_t;\n"
2791 "\n"
2792 "typealias integer {\n"
2793 " size = 32; align = %u; signed = false;\n"
2794 " map = clock.%s.value;\n"
2795 "} := uint32_clock_monotonic_t;\n"
2796 "\n"
2797 "typealias integer {\n"
2798 " size = 64; align = %u; signed = false;\n"
2799 " map = clock.%s.value;\n"
2800 "} := uint64_clock_monotonic_t;\n\n",
2801 trace_clock_name(),
2802 lttng_alignof(uint32_t) * CHAR_BIT,
2803 trace_clock_name(),
2804 lttng_alignof(uint64_t) * CHAR_BIT,
2805 trace_clock_name()
2806 );
2807 if (ret)
2808 goto end;
2809
2810 ret = _lttng_stream_packet_context_declare(session);
2811 if (ret)
2812 goto end;
2813
2814 ret = _lttng_event_header_declare(session);
2815 if (ret)
2816 goto end;
2817
2818 skip_session:
2819 list_for_each_entry(chan, &session->chan, list) {
2820 ret = _lttng_channel_metadata_statedump(session, chan);
2821 if (ret)
2822 goto end;
2823 }
2824
2825 list_for_each_entry(event, &session->events, list) {
2826 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2827 if (ret)
2828 goto end;
2829 }
2830 session->metadata_dumped = 1;
2831 end:
2832 lttng_metadata_end(session);
2833 return ret;
2834 }
2835
2836 /**
2837 * lttng_transport_register - LTT transport registration
2838 * @transport: transport structure
2839 *
2840 * Registers a transport which can be used as output to extract the data out of
2841 * LTTng. The module calling this registration function must ensure that no
2842 * trap-inducing code will be executed by the transport functions. E.g.
2843 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
2844 * is made visible to the transport function. This registration acts as a
2845 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
2846 * after its registration must it synchronize the TLBs.
2847 */
2848 void lttng_transport_register(struct lttng_transport *transport)
2849 {
2850 /*
2851 * Make sure no page fault can be triggered by the module about to be
2852 * registered. We deal with this here so we don't have to call
2853 * vmalloc_sync_mappings() in each module's init.
2854 */
2855 wrapper_vmalloc_sync_mappings();
2856
2857 mutex_lock(&sessions_mutex);
2858 list_add_tail(&transport->node, &lttng_transport_list);
2859 mutex_unlock(&sessions_mutex);
2860 }
2861 EXPORT_SYMBOL_GPL(lttng_transport_register);
2862
2863 /**
2864 * lttng_transport_unregister - LTT transport unregistration
2865 * @transport: transport structure
2866 */
2867 void lttng_transport_unregister(struct lttng_transport *transport)
2868 {
2869 mutex_lock(&sessions_mutex);
2870 list_del(&transport->node);
2871 mutex_unlock(&sessions_mutex);
2872 }
2873 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2874
2875 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
2876
2877 enum cpuhp_state lttng_hp_prepare;
2878 enum cpuhp_state lttng_hp_online;
2879
2880 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2881 {
2882 struct lttng_cpuhp_node *lttng_node;
2883
2884 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2885 switch (lttng_node->component) {
2886 case LTTNG_RING_BUFFER_FRONTEND:
2887 return 0;
2888 case LTTNG_RING_BUFFER_BACKEND:
2889 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2890 case LTTNG_RING_BUFFER_ITER:
2891 return 0;
2892 case LTTNG_CONTEXT_PERF_COUNTERS:
2893 return 0;
2894 default:
2895 return -EINVAL;
2896 }
2897 }
2898
2899 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2900 {
2901 struct lttng_cpuhp_node *lttng_node;
2902
2903 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2904 switch (lttng_node->component) {
2905 case LTTNG_RING_BUFFER_FRONTEND:
2906 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2907 case LTTNG_RING_BUFFER_BACKEND:
2908 return 0;
2909 case LTTNG_RING_BUFFER_ITER:
2910 return 0;
2911 case LTTNG_CONTEXT_PERF_COUNTERS:
2912 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2913 default:
2914 return -EINVAL;
2915 }
2916 }
2917
2918 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2919 {
2920 struct lttng_cpuhp_node *lttng_node;
2921
2922 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2923 switch (lttng_node->component) {
2924 case LTTNG_RING_BUFFER_FRONTEND:
2925 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2926 case LTTNG_RING_BUFFER_BACKEND:
2927 return 0;
2928 case LTTNG_RING_BUFFER_ITER:
2929 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2930 case LTTNG_CONTEXT_PERF_COUNTERS:
2931 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2932 default:
2933 return -EINVAL;
2934 }
2935 }
2936
2937 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2938 {
2939 struct lttng_cpuhp_node *lttng_node;
2940
2941 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2942 switch (lttng_node->component) {
2943 case LTTNG_RING_BUFFER_FRONTEND:
2944 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2945 case LTTNG_RING_BUFFER_BACKEND:
2946 return 0;
2947 case LTTNG_RING_BUFFER_ITER:
2948 return 0;
2949 case LTTNG_CONTEXT_PERF_COUNTERS:
2950 return 0;
2951 default:
2952 return -EINVAL;
2953 }
2954 }
2955
2956 static int __init lttng_init_cpu_hotplug(void)
2957 {
2958 int ret;
2959
2960 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2961 lttng_hotplug_prepare,
2962 lttng_hotplug_dead);
2963 if (ret < 0) {
2964 return ret;
2965 }
2966 lttng_hp_prepare = ret;
2967 lttng_rb_set_hp_prepare(ret);
2968
2969 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2970 lttng_hotplug_online,
2971 lttng_hotplug_offline);
2972 if (ret < 0) {
2973 cpuhp_remove_multi_state(lttng_hp_prepare);
2974 lttng_hp_prepare = 0;
2975 return ret;
2976 }
2977 lttng_hp_online = ret;
2978 lttng_rb_set_hp_online(ret);
2979
2980 return 0;
2981 }
2982
2983 static void __exit lttng_exit_cpu_hotplug(void)
2984 {
2985 lttng_rb_set_hp_online(0);
2986 cpuhp_remove_multi_state(lttng_hp_online);
2987 lttng_rb_set_hp_prepare(0);
2988 cpuhp_remove_multi_state(lttng_hp_prepare);
2989 }
2990
2991 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
2992 static int lttng_init_cpu_hotplug(void)
2993 {
2994 return 0;
2995 }
2996 static void lttng_exit_cpu_hotplug(void)
2997 {
2998 }
2999 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
3000
3001
3002 static int __init lttng_events_init(void)
3003 {
3004 int ret;
3005
3006 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3007 if (ret)
3008 return ret;
3009 ret = wrapper_get_pfnblock_flags_mask_init();
3010 if (ret)
3011 return ret;
3012 ret = wrapper_get_pageblock_flags_mask_init();
3013 if (ret)
3014 return ret;
3015 ret = lttng_probes_init();
3016 if (ret)
3017 return ret;
3018 ret = lttng_context_init();
3019 if (ret)
3020 return ret;
3021 ret = lttng_tracepoint_init();
3022 if (ret)
3023 goto error_tp;
3024 event_cache = KMEM_CACHE(lttng_event, 0);
3025 if (!event_cache) {
3026 ret = -ENOMEM;
3027 goto error_kmem;
3028 }
3029 ret = lttng_abi_init();
3030 if (ret)
3031 goto error_abi;
3032 ret = lttng_logger_init();
3033 if (ret)
3034 goto error_logger;
3035 ret = lttng_init_cpu_hotplug();
3036 if (ret)
3037 goto error_hotplug;
3038 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3039 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3040 __stringify(LTTNG_MODULES_MINOR_VERSION),
3041 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3042 LTTNG_MODULES_EXTRAVERSION,
3043 LTTNG_VERSION_NAME,
3044 #ifdef LTTNG_EXTRA_VERSION_GIT
3045 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3046 #else
3047 "",
3048 #endif
3049 #ifdef LTTNG_EXTRA_VERSION_NAME
3050 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3051 #else
3052 "");
3053 #endif
3054 return 0;
3055
3056 error_hotplug:
3057 lttng_logger_exit();
3058 error_logger:
3059 lttng_abi_exit();
3060 error_abi:
3061 kmem_cache_destroy(event_cache);
3062 error_kmem:
3063 lttng_tracepoint_exit();
3064 error_tp:
3065 lttng_context_exit();
3066 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
3067 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3068 __stringify(LTTNG_MODULES_MINOR_VERSION),
3069 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3070 LTTNG_MODULES_EXTRAVERSION,
3071 LTTNG_VERSION_NAME,
3072 #ifdef LTTNG_EXTRA_VERSION_GIT
3073 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3074 #else
3075 "",
3076 #endif
3077 #ifdef LTTNG_EXTRA_VERSION_NAME
3078 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3079 #else
3080 "");
3081 #endif
3082 return ret;
3083 }
3084
3085 module_init(lttng_events_init);
3086
3087 static void __exit lttng_events_exit(void)
3088 {
3089 struct lttng_session *session, *tmpsession;
3090
3091 lttng_exit_cpu_hotplug();
3092 lttng_logger_exit();
3093 lttng_abi_exit();
3094 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3095 lttng_session_destroy(session);
3096 kmem_cache_destroy(event_cache);
3097 lttng_tracepoint_exit();
3098 lttng_context_exit();
3099 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3100 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3101 __stringify(LTTNG_MODULES_MINOR_VERSION),
3102 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3103 LTTNG_MODULES_EXTRAVERSION,
3104 LTTNG_VERSION_NAME,
3105 #ifdef LTTNG_EXTRA_VERSION_GIT
3106 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3107 #else
3108 "",
3109 #endif
3110 #ifdef LTTNG_EXTRA_VERSION_NAME
3111 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3112 #else
3113 "");
3114 #endif
3115 }
3116
3117 module_exit(lttng_events_exit);
3118
3119 #include "extra_version/patches.i"
3120 #ifdef LTTNG_EXTRA_VERSION_GIT
3121 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3122 #endif
3123 #ifdef LTTNG_EXTRA_VERSION_NAME
3124 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3125 #endif
3126 MODULE_LICENSE("GPL and additional rights");
3127 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3128 MODULE_DESCRIPTION("LTTng tracer");
3129 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3130 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3131 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3132 LTTNG_MODULES_EXTRAVERSION);
This page took 0.126608 seconds and 4 git commands to generate.