wrapper: remove poll wrapper
[lttng-modules.git] / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/jhash.h>
28 #include <linux/uaccess.h>
29 #include <linux/uuid.h>
30 #include <linux/dmi.h>
31 #include <linux/vmalloc.h>
32
33 #include <wrapper/random.h>
34 #include <wrapper/list.h>
35 #include <wrapper/types.h>
36 #include <lttng-kernel-version.h>
37 #include <lttng-events.h>
38 #include <lttng-tracer.h>
39 #include <lttng-abi-old.h>
40 #include <lttng-endian.h>
41 #include <lttng-string-utils.h>
42 #include <lttng-tracepoint.h>
43 #include <wrapper/ringbuffer/backend.h>
44 #include <wrapper/ringbuffer/frontend.h>
45
46 #define METADATA_CACHE_DEFAULT_SIZE 4096
47
48 static LIST_HEAD(sessions);
49 static LIST_HEAD(lttng_transport_list);
50 /*
51 * Protect the sessions and metadata caches.
52 */
53 static DEFINE_MUTEX(sessions_mutex);
54 static struct kmem_cache *event_cache;
55
56 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
57 static void lttng_session_sync_enablers(struct lttng_session *session);
58 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
59
60 static void _lttng_event_destroy(struct lttng_event *event);
61 static void _lttng_channel_destroy(struct lttng_channel *chan);
62 static int _lttng_event_unregister(struct lttng_event *event);
63 static
64 int _lttng_event_metadata_statedump(struct lttng_session *session,
65 struct lttng_channel *chan,
66 struct lttng_event *event);
67 static
68 int _lttng_session_metadata_statedump(struct lttng_session *session);
69 static
70 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
71 static
72 int _lttng_type_statedump(struct lttng_session *session,
73 const struct lttng_type *type,
74 size_t nesting);
75 static
76 int _lttng_field_statedump(struct lttng_session *session,
77 const struct lttng_event_field *field,
78 size_t nesting);
79
80 void synchronize_trace(void)
81 {
82 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
83 synchronize_rcu();
84 #else
85 synchronize_sched();
86 #endif
87
88 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
89 #ifdef CONFIG_PREEMPT_RT_FULL
90 synchronize_rcu();
91 #endif
92 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
93 #ifdef CONFIG_PREEMPT_RT
94 synchronize_rcu();
95 #endif
96 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
97 }
98
99 void lttng_lock_sessions(void)
100 {
101 mutex_lock(&sessions_mutex);
102 }
103
104 void lttng_unlock_sessions(void)
105 {
106 mutex_unlock(&sessions_mutex);
107 }
108
109 /*
110 * Called with sessions lock held.
111 */
112 int lttng_session_active(void)
113 {
114 struct lttng_session *iter;
115
116 list_for_each_entry(iter, &sessions, list) {
117 if (iter->active)
118 return 1;
119 }
120 return 0;
121 }
122
123 struct lttng_session *lttng_session_create(void)
124 {
125 struct lttng_session *session;
126 struct lttng_metadata_cache *metadata_cache;
127 int i;
128
129 mutex_lock(&sessions_mutex);
130 session = kvzalloc_node(sizeof(struct lttng_session), GFP_KERNEL,
131 NUMA_NO_NODE);
132 if (!session)
133 goto err;
134 INIT_LIST_HEAD(&session->chan);
135 INIT_LIST_HEAD(&session->events);
136 guid_gen(&session->uuid);
137
138 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
139 GFP_KERNEL);
140 if (!metadata_cache)
141 goto err_free_session;
142 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
143 if (!metadata_cache->data)
144 goto err_free_cache;
145 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
146 kref_init(&metadata_cache->refcount);
147 mutex_init(&metadata_cache->lock);
148 session->metadata_cache = metadata_cache;
149 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
150 memcpy(&metadata_cache->uuid, &session->uuid,
151 sizeof(metadata_cache->uuid));
152 INIT_LIST_HEAD(&session->enablers_head);
153 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
154 INIT_HLIST_HEAD(&session->events_ht.table[i]);
155 list_add(&session->list, &sessions);
156 session->pid_tracker.session = session;
157 session->pid_tracker.tracker_type = TRACKER_PID;
158 session->vpid_tracker.session = session;
159 session->vpid_tracker.tracker_type = TRACKER_VPID;
160 session->uid_tracker.session = session;
161 session->uid_tracker.tracker_type = TRACKER_UID;
162 session->vuid_tracker.session = session;
163 session->vuid_tracker.tracker_type = TRACKER_VUID;
164 session->gid_tracker.session = session;
165 session->gid_tracker.tracker_type = TRACKER_GID;
166 session->vgid_tracker.session = session;
167 session->vgid_tracker.tracker_type = TRACKER_VGID;
168 mutex_unlock(&sessions_mutex);
169 return session;
170
171 err_free_cache:
172 kfree(metadata_cache);
173 err_free_session:
174 kvfree(session);
175 err:
176 mutex_unlock(&sessions_mutex);
177 return NULL;
178 }
179
180 void metadata_cache_destroy(struct kref *kref)
181 {
182 struct lttng_metadata_cache *cache =
183 container_of(kref, struct lttng_metadata_cache, refcount);
184 vfree(cache->data);
185 kfree(cache);
186 }
187
188 void lttng_session_destroy(struct lttng_session *session)
189 {
190 struct lttng_channel *chan, *tmpchan;
191 struct lttng_event *event, *tmpevent;
192 struct lttng_metadata_stream *metadata_stream;
193 struct lttng_enabler *enabler, *tmpenabler;
194 int ret;
195
196 mutex_lock(&sessions_mutex);
197 WRITE_ONCE(session->active, 0);
198 list_for_each_entry(chan, &session->chan, list) {
199 ret = lttng_syscalls_unregister(chan);
200 WARN_ON(ret);
201 }
202 list_for_each_entry(event, &session->events, list) {
203 ret = _lttng_event_unregister(event);
204 WARN_ON(ret);
205 }
206 synchronize_trace(); /* Wait for in-flight events to complete */
207 list_for_each_entry_safe(enabler, tmpenabler,
208 &session->enablers_head, node)
209 lttng_enabler_destroy(enabler);
210 list_for_each_entry_safe(event, tmpevent, &session->events, list)
211 _lttng_event_destroy(event);
212 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
213 BUG_ON(chan->channel_type == METADATA_CHANNEL);
214 _lttng_channel_destroy(chan);
215 }
216 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
217 _lttng_metadata_channel_hangup(metadata_stream);
218 lttng_id_tracker_destroy(&session->pid_tracker, false);
219 lttng_id_tracker_destroy(&session->vpid_tracker, false);
220 lttng_id_tracker_destroy(&session->uid_tracker, false);
221 lttng_id_tracker_destroy(&session->vuid_tracker, false);
222 lttng_id_tracker_destroy(&session->gid_tracker, false);
223 lttng_id_tracker_destroy(&session->vgid_tracker, false);
224 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
225 list_del(&session->list);
226 mutex_unlock(&sessions_mutex);
227 kvfree(session);
228 }
229
230 int lttng_session_statedump(struct lttng_session *session)
231 {
232 int ret;
233
234 mutex_lock(&sessions_mutex);
235 ret = lttng_statedump_start(session);
236 mutex_unlock(&sessions_mutex);
237 return ret;
238 }
239
240 int lttng_session_enable(struct lttng_session *session)
241 {
242 int ret = 0;
243 struct lttng_channel *chan;
244
245 mutex_lock(&sessions_mutex);
246 if (session->active) {
247 ret = -EBUSY;
248 goto end;
249 }
250
251 /* Set transient enabler state to "enabled" */
252 session->tstate = 1;
253
254 /* We need to sync enablers with session before activation. */
255 lttng_session_sync_enablers(session);
256
257 /*
258 * Snapshot the number of events per channel to know the type of header
259 * we need to use.
260 */
261 list_for_each_entry(chan, &session->chan, list) {
262 if (chan->header_type)
263 continue; /* don't change it if session stop/restart */
264 if (chan->free_event_id < 31)
265 chan->header_type = 1; /* compact */
266 else
267 chan->header_type = 2; /* large */
268 }
269
270 /* Clear each stream's quiescent state. */
271 list_for_each_entry(chan, &session->chan, list) {
272 if (chan->channel_type != METADATA_CHANNEL)
273 lib_ring_buffer_clear_quiescent_channel(chan->chan);
274 }
275
276 WRITE_ONCE(session->active, 1);
277 WRITE_ONCE(session->been_active, 1);
278 ret = _lttng_session_metadata_statedump(session);
279 if (ret) {
280 WRITE_ONCE(session->active, 0);
281 goto end;
282 }
283 ret = lttng_statedump_start(session);
284 if (ret)
285 WRITE_ONCE(session->active, 0);
286 end:
287 mutex_unlock(&sessions_mutex);
288 return ret;
289 }
290
291 int lttng_session_disable(struct lttng_session *session)
292 {
293 int ret = 0;
294 struct lttng_channel *chan;
295
296 mutex_lock(&sessions_mutex);
297 if (!session->active) {
298 ret = -EBUSY;
299 goto end;
300 }
301 WRITE_ONCE(session->active, 0);
302
303 /* Set transient enabler state to "disabled" */
304 session->tstate = 0;
305 lttng_session_sync_enablers(session);
306
307 /* Set each stream's quiescent state. */
308 list_for_each_entry(chan, &session->chan, list) {
309 if (chan->channel_type != METADATA_CHANNEL)
310 lib_ring_buffer_set_quiescent_channel(chan->chan);
311 }
312 end:
313 mutex_unlock(&sessions_mutex);
314 return ret;
315 }
316
317 int lttng_session_metadata_regenerate(struct lttng_session *session)
318 {
319 int ret = 0;
320 struct lttng_channel *chan;
321 struct lttng_event *event;
322 struct lttng_metadata_cache *cache = session->metadata_cache;
323 struct lttng_metadata_stream *stream;
324
325 mutex_lock(&sessions_mutex);
326 if (!session->active) {
327 ret = -EBUSY;
328 goto end;
329 }
330
331 mutex_lock(&cache->lock);
332 memset(cache->data, 0, cache->cache_alloc);
333 cache->metadata_written = 0;
334 cache->version++;
335 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
336 stream->metadata_out = 0;
337 stream->metadata_in = 0;
338 }
339 mutex_unlock(&cache->lock);
340
341 session->metadata_dumped = 0;
342 list_for_each_entry(chan, &session->chan, list) {
343 chan->metadata_dumped = 0;
344 }
345
346 list_for_each_entry(event, &session->events, list) {
347 event->metadata_dumped = 0;
348 }
349
350 ret = _lttng_session_metadata_statedump(session);
351
352 end:
353 mutex_unlock(&sessions_mutex);
354 return ret;
355 }
356
357 int lttng_channel_enable(struct lttng_channel *channel)
358 {
359 int ret = 0;
360
361 mutex_lock(&sessions_mutex);
362 if (channel->channel_type == METADATA_CHANNEL) {
363 ret = -EPERM;
364 goto end;
365 }
366 if (channel->enabled) {
367 ret = -EEXIST;
368 goto end;
369 }
370 /* Set transient enabler state to "enabled" */
371 channel->tstate = 1;
372 lttng_session_sync_enablers(channel->session);
373 /* Set atomically the state to "enabled" */
374 WRITE_ONCE(channel->enabled, 1);
375 end:
376 mutex_unlock(&sessions_mutex);
377 return ret;
378 }
379
380 int lttng_channel_disable(struct lttng_channel *channel)
381 {
382 int ret = 0;
383
384 mutex_lock(&sessions_mutex);
385 if (channel->channel_type == METADATA_CHANNEL) {
386 ret = -EPERM;
387 goto end;
388 }
389 if (!channel->enabled) {
390 ret = -EEXIST;
391 goto end;
392 }
393 /* Set atomically the state to "disabled" */
394 WRITE_ONCE(channel->enabled, 0);
395 /* Set transient enabler state to "enabled" */
396 channel->tstate = 0;
397 lttng_session_sync_enablers(channel->session);
398 end:
399 mutex_unlock(&sessions_mutex);
400 return ret;
401 }
402
403 int lttng_event_enable(struct lttng_event *event)
404 {
405 int ret = 0;
406
407 mutex_lock(&sessions_mutex);
408 if (event->chan->channel_type == METADATA_CHANNEL) {
409 ret = -EPERM;
410 goto end;
411 }
412 if (event->enabled) {
413 ret = -EEXIST;
414 goto end;
415 }
416 switch (event->instrumentation) {
417 case LTTNG_KERNEL_TRACEPOINT:
418 case LTTNG_KERNEL_SYSCALL:
419 ret = -EINVAL;
420 break;
421 case LTTNG_KERNEL_KPROBE:
422 case LTTNG_KERNEL_UPROBE:
423 case LTTNG_KERNEL_NOOP:
424 WRITE_ONCE(event->enabled, 1);
425 break;
426 case LTTNG_KERNEL_KRETPROBE:
427 ret = lttng_kretprobes_event_enable_state(event, 1);
428 break;
429 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
430 default:
431 WARN_ON_ONCE(1);
432 ret = -EINVAL;
433 }
434 end:
435 mutex_unlock(&sessions_mutex);
436 return ret;
437 }
438
439 int lttng_event_disable(struct lttng_event *event)
440 {
441 int ret = 0;
442
443 mutex_lock(&sessions_mutex);
444 if (event->chan->channel_type == METADATA_CHANNEL) {
445 ret = -EPERM;
446 goto end;
447 }
448 if (!event->enabled) {
449 ret = -EEXIST;
450 goto end;
451 }
452 switch (event->instrumentation) {
453 case LTTNG_KERNEL_TRACEPOINT:
454 case LTTNG_KERNEL_SYSCALL:
455 ret = -EINVAL;
456 break;
457 case LTTNG_KERNEL_KPROBE:
458 case LTTNG_KERNEL_UPROBE:
459 case LTTNG_KERNEL_NOOP:
460 WRITE_ONCE(event->enabled, 0);
461 break;
462 case LTTNG_KERNEL_KRETPROBE:
463 ret = lttng_kretprobes_event_enable_state(event, 0);
464 break;
465 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
466 default:
467 WARN_ON_ONCE(1);
468 ret = -EINVAL;
469 }
470 end:
471 mutex_unlock(&sessions_mutex);
472 return ret;
473 }
474
475 static struct lttng_transport *lttng_transport_find(const char *name)
476 {
477 struct lttng_transport *transport;
478
479 list_for_each_entry(transport, &lttng_transport_list, node) {
480 if (!strcmp(transport->name, name))
481 return transport;
482 }
483 return NULL;
484 }
485
486 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
487 const char *transport_name,
488 void *buf_addr,
489 size_t subbuf_size, size_t num_subbuf,
490 unsigned int switch_timer_interval,
491 unsigned int read_timer_interval,
492 enum channel_type channel_type)
493 {
494 struct lttng_channel *chan;
495 struct lttng_transport *transport = NULL;
496
497 mutex_lock(&sessions_mutex);
498 if (session->been_active && channel_type != METADATA_CHANNEL)
499 goto active; /* Refuse to add channel to active session */
500 transport = lttng_transport_find(transport_name);
501 if (!transport) {
502 printk(KERN_WARNING "LTTng transport %s not found\n",
503 transport_name);
504 goto notransport;
505 }
506 if (!try_module_get(transport->owner)) {
507 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
508 goto notransport;
509 }
510 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
511 if (!chan)
512 goto nomem;
513 chan->session = session;
514 chan->id = session->free_chan_id++;
515 chan->ops = &transport->ops;
516 /*
517 * Note: the channel creation op already writes into the packet
518 * headers. Therefore the "chan" information used as input
519 * should be already accessible.
520 */
521 chan->chan = transport->ops.channel_create(transport_name,
522 chan, buf_addr, subbuf_size, num_subbuf,
523 switch_timer_interval, read_timer_interval);
524 if (!chan->chan)
525 goto create_error;
526 chan->tstate = 1;
527 chan->enabled = 1;
528 chan->transport = transport;
529 chan->channel_type = channel_type;
530 list_add(&chan->list, &session->chan);
531 mutex_unlock(&sessions_mutex);
532 return chan;
533
534 create_error:
535 kfree(chan);
536 nomem:
537 if (transport)
538 module_put(transport->owner);
539 notransport:
540 active:
541 mutex_unlock(&sessions_mutex);
542 return NULL;
543 }
544
545 /*
546 * Only used internally at session destruction for per-cpu channels, and
547 * when metadata channel is released.
548 * Needs to be called with sessions mutex held.
549 */
550 static
551 void _lttng_channel_destroy(struct lttng_channel *chan)
552 {
553 chan->ops->channel_destroy(chan->chan);
554 module_put(chan->transport->owner);
555 list_del(&chan->list);
556 lttng_destroy_context(chan->ctx);
557 kfree(chan);
558 }
559
560 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
561 {
562 BUG_ON(chan->channel_type != METADATA_CHANNEL);
563
564 /* Protect the metadata cache with the sessions_mutex. */
565 mutex_lock(&sessions_mutex);
566 _lttng_channel_destroy(chan);
567 mutex_unlock(&sessions_mutex);
568 }
569 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
570
571 static
572 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
573 {
574 stream->finalized = 1;
575 wake_up_interruptible(&stream->read_wait);
576 }
577
578 /*
579 * Supports event creation while tracing session is active.
580 * Needs to be called with sessions mutex held.
581 */
582 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
583 struct lttng_kernel_event *event_param,
584 void *filter,
585 const struct lttng_event_desc *event_desc,
586 enum lttng_kernel_instrumentation itype)
587 {
588 struct lttng_session *session = chan->session;
589 struct lttng_event *event;
590 const char *event_name;
591 struct hlist_head *head;
592 size_t name_len;
593 uint32_t hash;
594 int ret;
595
596 if (chan->free_event_id == -1U) {
597 ret = -EMFILE;
598 goto full;
599 }
600
601 switch (itype) {
602 case LTTNG_KERNEL_TRACEPOINT:
603 event_name = event_desc->name;
604 break;
605 case LTTNG_KERNEL_KPROBE:
606 case LTTNG_KERNEL_UPROBE:
607 case LTTNG_KERNEL_KRETPROBE:
608 case LTTNG_KERNEL_NOOP:
609 case LTTNG_KERNEL_SYSCALL:
610 event_name = event_param->name;
611 break;
612 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
613 default:
614 WARN_ON_ONCE(1);
615 ret = -EINVAL;
616 goto type_error;
617 }
618 name_len = strlen(event_name);
619 hash = jhash(event_name, name_len, 0);
620 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
621 lttng_hlist_for_each_entry(event, head, hlist) {
622 WARN_ON_ONCE(!event->desc);
623 if (!strncmp(event->desc->name, event_name,
624 LTTNG_KERNEL_SYM_NAME_LEN - 1)
625 && chan == event->chan) {
626 ret = -EEXIST;
627 goto exist;
628 }
629 }
630
631 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
632 if (!event) {
633 ret = -ENOMEM;
634 goto cache_error;
635 }
636 event->chan = chan;
637 event->filter = filter;
638 event->id = chan->free_event_id++;
639 event->instrumentation = itype;
640 event->evtype = LTTNG_TYPE_EVENT;
641 INIT_LIST_HEAD(&event->bytecode_runtime_head);
642 INIT_LIST_HEAD(&event->enablers_ref_head);
643
644 switch (itype) {
645 case LTTNG_KERNEL_TRACEPOINT:
646 /* Event will be enabled by enabler sync. */
647 event->enabled = 0;
648 event->registered = 0;
649 event->desc = lttng_event_get(event_name);
650 if (!event->desc) {
651 ret = -ENOENT;
652 goto register_error;
653 }
654 /* Populate lttng_event structure before event registration. */
655 smp_wmb();
656 break;
657 case LTTNG_KERNEL_KPROBE:
658 /*
659 * Needs to be explicitly enabled after creation, since
660 * we may want to apply filters.
661 */
662 event->enabled = 0;
663 event->registered = 1;
664 /*
665 * Populate lttng_event structure before event
666 * registration.
667 */
668 smp_wmb();
669 ret = lttng_kprobes_register(event_name,
670 event_param->u.kprobe.symbol_name,
671 event_param->u.kprobe.offset,
672 event_param->u.kprobe.addr,
673 event);
674 if (ret) {
675 ret = -EINVAL;
676 goto register_error;
677 }
678 ret = try_module_get(event->desc->owner);
679 WARN_ON_ONCE(!ret);
680 break;
681 case LTTNG_KERNEL_KRETPROBE:
682 {
683 struct lttng_event *event_return;
684
685 /* kretprobe defines 2 events */
686 /*
687 * Needs to be explicitly enabled after creation, since
688 * we may want to apply filters.
689 */
690 event->enabled = 0;
691 event->registered = 1;
692 event_return =
693 kmem_cache_zalloc(event_cache, GFP_KERNEL);
694 if (!event_return) {
695 ret = -ENOMEM;
696 goto register_error;
697 }
698 event_return->chan = chan;
699 event_return->filter = filter;
700 event_return->id = chan->free_event_id++;
701 event_return->enabled = 0;
702 event_return->registered = 1;
703 event_return->instrumentation = itype;
704 /*
705 * Populate lttng_event structure before kretprobe registration.
706 */
707 smp_wmb();
708 ret = lttng_kretprobes_register(event_name,
709 event_param->u.kretprobe.symbol_name,
710 event_param->u.kretprobe.offset,
711 event_param->u.kretprobe.addr,
712 event, event_return);
713 if (ret) {
714 kmem_cache_free(event_cache, event_return);
715 ret = -EINVAL;
716 goto register_error;
717 }
718 /* Take 2 refs on the module: one per event. */
719 ret = try_module_get(event->desc->owner);
720 WARN_ON_ONCE(!ret);
721 ret = try_module_get(event->desc->owner);
722 WARN_ON_ONCE(!ret);
723 ret = _lttng_event_metadata_statedump(chan->session, chan,
724 event_return);
725 WARN_ON_ONCE(ret > 0);
726 if (ret) {
727 kmem_cache_free(event_cache, event_return);
728 module_put(event->desc->owner);
729 module_put(event->desc->owner);
730 goto statedump_error;
731 }
732 list_add(&event_return->list, &chan->session->events);
733 break;
734 }
735 case LTTNG_KERNEL_NOOP:
736 case LTTNG_KERNEL_SYSCALL:
737 /*
738 * Needs to be explicitly enabled after creation, since
739 * we may want to apply filters.
740 */
741 event->enabled = 0;
742 event->registered = 0;
743 event->desc = event_desc;
744 if (!event->desc) {
745 ret = -EINVAL;
746 goto register_error;
747 }
748 break;
749 case LTTNG_KERNEL_UPROBE:
750 /*
751 * Needs to be explicitly enabled after creation, since
752 * we may want to apply filters.
753 */
754 event->enabled = 0;
755 event->registered = 1;
756
757 /*
758 * Populate lttng_event structure before event
759 * registration.
760 */
761 smp_wmb();
762
763 ret = lttng_uprobes_register(event_param->name,
764 event_param->u.uprobe.fd,
765 event);
766 if (ret)
767 goto register_error;
768 ret = try_module_get(event->desc->owner);
769 WARN_ON_ONCE(!ret);
770 break;
771 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
772 default:
773 WARN_ON_ONCE(1);
774 ret = -EINVAL;
775 goto register_error;
776 }
777 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
778 WARN_ON_ONCE(ret > 0);
779 if (ret) {
780 goto statedump_error;
781 }
782 hlist_add_head(&event->hlist, head);
783 list_add(&event->list, &chan->session->events);
784 return event;
785
786 statedump_error:
787 /* If a statedump error occurs, events will not be readable. */
788 register_error:
789 kmem_cache_free(event_cache, event);
790 cache_error:
791 exist:
792 type_error:
793 full:
794 return ERR_PTR(ret);
795 }
796
797 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
798 struct lttng_kernel_event *event_param,
799 void *filter,
800 const struct lttng_event_desc *event_desc,
801 enum lttng_kernel_instrumentation itype)
802 {
803 struct lttng_event *event;
804
805 mutex_lock(&sessions_mutex);
806 event = _lttng_event_create(chan, event_param, filter, event_desc,
807 itype);
808 mutex_unlock(&sessions_mutex);
809 return event;
810 }
811
812 /* Only used for tracepoints for now. */
813 static
814 void register_event(struct lttng_event *event)
815 {
816 const struct lttng_event_desc *desc;
817 int ret = -EINVAL;
818
819 if (event->registered)
820 return;
821
822 desc = event->desc;
823 switch (event->instrumentation) {
824 case LTTNG_KERNEL_TRACEPOINT:
825 ret = lttng_tracepoint_probe_register(desc->kname,
826 desc->probe_callback,
827 event);
828 break;
829 case LTTNG_KERNEL_SYSCALL:
830 ret = lttng_syscall_filter_enable(event->chan,
831 desc->name);
832 break;
833 case LTTNG_KERNEL_KPROBE:
834 case LTTNG_KERNEL_UPROBE:
835 case LTTNG_KERNEL_KRETPROBE:
836 case LTTNG_KERNEL_NOOP:
837 ret = 0;
838 break;
839 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
840 default:
841 WARN_ON_ONCE(1);
842 }
843 if (!ret)
844 event->registered = 1;
845 }
846
847 /*
848 * Only used internally at session destruction.
849 */
850 int _lttng_event_unregister(struct lttng_event *event)
851 {
852 const struct lttng_event_desc *desc;
853 int ret = -EINVAL;
854
855 if (!event->registered)
856 return 0;
857
858 desc = event->desc;
859 switch (event->instrumentation) {
860 case LTTNG_KERNEL_TRACEPOINT:
861 ret = lttng_tracepoint_probe_unregister(event->desc->kname,
862 event->desc->probe_callback,
863 event);
864 break;
865 case LTTNG_KERNEL_KPROBE:
866 lttng_kprobes_unregister(event);
867 ret = 0;
868 break;
869 case LTTNG_KERNEL_KRETPROBE:
870 lttng_kretprobes_unregister(event);
871 ret = 0;
872 break;
873 case LTTNG_KERNEL_SYSCALL:
874 ret = lttng_syscall_filter_disable(event->chan,
875 desc->name);
876 break;
877 case LTTNG_KERNEL_NOOP:
878 ret = 0;
879 break;
880 case LTTNG_KERNEL_UPROBE:
881 lttng_uprobes_unregister(event);
882 ret = 0;
883 break;
884 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
885 default:
886 WARN_ON_ONCE(1);
887 }
888 if (!ret)
889 event->registered = 0;
890 return ret;
891 }
892
893 /*
894 * Only used internally at session destruction.
895 */
896 static
897 void _lttng_event_destroy(struct lttng_event *event)
898 {
899 switch (event->instrumentation) {
900 case LTTNG_KERNEL_TRACEPOINT:
901 lttng_event_put(event->desc);
902 break;
903 case LTTNG_KERNEL_KPROBE:
904 module_put(event->desc->owner);
905 lttng_kprobes_destroy_private(event);
906 break;
907 case LTTNG_KERNEL_KRETPROBE:
908 module_put(event->desc->owner);
909 lttng_kretprobes_destroy_private(event);
910 break;
911 case LTTNG_KERNEL_NOOP:
912 case LTTNG_KERNEL_SYSCALL:
913 break;
914 case LTTNG_KERNEL_UPROBE:
915 module_put(event->desc->owner);
916 lttng_uprobes_destroy_private(event);
917 break;
918 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
919 default:
920 WARN_ON_ONCE(1);
921 }
922 list_del(&event->list);
923 lttng_destroy_context(event->ctx);
924 kmem_cache_free(event_cache, event);
925 }
926
927 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
928 enum tracker_type tracker_type)
929 {
930 switch (tracker_type) {
931 case TRACKER_PID:
932 return &session->pid_tracker;
933 case TRACKER_VPID:
934 return &session->vpid_tracker;
935 case TRACKER_UID:
936 return &session->uid_tracker;
937 case TRACKER_VUID:
938 return &session->vuid_tracker;
939 case TRACKER_GID:
940 return &session->gid_tracker;
941 case TRACKER_VGID:
942 return &session->vgid_tracker;
943 default:
944 WARN_ON_ONCE(1);
945 return NULL;
946 }
947 }
948
949 int lttng_session_track_id(struct lttng_session *session,
950 enum tracker_type tracker_type, int id)
951 {
952 struct lttng_id_tracker *tracker;
953 int ret;
954
955 tracker = get_tracker(session, tracker_type);
956 if (!tracker)
957 return -EINVAL;
958 if (id < -1)
959 return -EINVAL;
960 mutex_lock(&sessions_mutex);
961 if (id == -1) {
962 /* track all ids: destroy tracker. */
963 lttng_id_tracker_destroy(tracker, true);
964 ret = 0;
965 } else {
966 ret = lttng_id_tracker_add(tracker, id);
967 }
968 mutex_unlock(&sessions_mutex);
969 return ret;
970 }
971
972 int lttng_session_untrack_id(struct lttng_session *session,
973 enum tracker_type tracker_type, int id)
974 {
975 struct lttng_id_tracker *tracker;
976 int ret;
977
978 tracker = get_tracker(session, tracker_type);
979 if (!tracker)
980 return -EINVAL;
981 if (id < -1)
982 return -EINVAL;
983 mutex_lock(&sessions_mutex);
984 if (id == -1) {
985 /* untrack all ids: replace by empty tracker. */
986 ret = lttng_id_tracker_empty_set(tracker);
987 } else {
988 ret = lttng_id_tracker_del(tracker, id);
989 }
990 mutex_unlock(&sessions_mutex);
991 return ret;
992 }
993
994 static
995 void *id_list_start(struct seq_file *m, loff_t *pos)
996 {
997 struct lttng_id_tracker *id_tracker = m->private;
998 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
999 struct lttng_id_hash_node *e;
1000 int iter = 0, i;
1001
1002 mutex_lock(&sessions_mutex);
1003 if (id_tracker_p) {
1004 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1005 struct hlist_head *head = &id_tracker_p->id_hash[i];
1006
1007 lttng_hlist_for_each_entry(e, head, hlist) {
1008 if (iter++ >= *pos)
1009 return e;
1010 }
1011 }
1012 } else {
1013 /* ID tracker disabled. */
1014 if (iter >= *pos && iter == 0) {
1015 return id_tracker_p; /* empty tracker */
1016 }
1017 iter++;
1018 }
1019 /* End of list */
1020 return NULL;
1021 }
1022
1023 /* Called with sessions_mutex held. */
1024 static
1025 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1026 {
1027 struct lttng_id_tracker *id_tracker = m->private;
1028 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1029 struct lttng_id_hash_node *e;
1030 int iter = 0, i;
1031
1032 (*ppos)++;
1033 if (id_tracker_p) {
1034 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1035 struct hlist_head *head = &id_tracker_p->id_hash[i];
1036
1037 lttng_hlist_for_each_entry(e, head, hlist) {
1038 if (iter++ >= *ppos)
1039 return e;
1040 }
1041 }
1042 } else {
1043 /* ID tracker disabled. */
1044 if (iter >= *ppos && iter == 0)
1045 return p; /* empty tracker */
1046 iter++;
1047 }
1048
1049 /* End of list */
1050 return NULL;
1051 }
1052
1053 static
1054 void id_list_stop(struct seq_file *m, void *p)
1055 {
1056 mutex_unlock(&sessions_mutex);
1057 }
1058
1059 static
1060 int id_list_show(struct seq_file *m, void *p)
1061 {
1062 struct lttng_id_tracker *id_tracker = m->private;
1063 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1064 int id;
1065
1066 if (p == id_tracker_p) {
1067 /* Tracker disabled. */
1068 id = -1;
1069 } else {
1070 const struct lttng_id_hash_node *e = p;
1071
1072 id = lttng_id_tracker_get_node_id(e);
1073 }
1074 switch (id_tracker->tracker_type) {
1075 case TRACKER_PID:
1076 seq_printf(m, "process { pid = %d; };\n", id);
1077 break;
1078 case TRACKER_VPID:
1079 seq_printf(m, "process { vpid = %d; };\n", id);
1080 break;
1081 case TRACKER_UID:
1082 seq_printf(m, "user { uid = %d; };\n", id);
1083 break;
1084 case TRACKER_VUID:
1085 seq_printf(m, "user { vuid = %d; };\n", id);
1086 break;
1087 case TRACKER_GID:
1088 seq_printf(m, "group { gid = %d; };\n", id);
1089 break;
1090 case TRACKER_VGID:
1091 seq_printf(m, "group { vgid = %d; };\n", id);
1092 break;
1093 default:
1094 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1095 }
1096 return 0;
1097 }
1098
1099 static
1100 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1101 .start = id_list_start,
1102 .next = id_list_next,
1103 .stop = id_list_stop,
1104 .show = id_list_show,
1105 };
1106
1107 static
1108 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1109 {
1110 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1111 }
1112
1113 static
1114 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1115 {
1116 struct seq_file *m = file->private_data;
1117 struct lttng_id_tracker *id_tracker = m->private;
1118 int ret;
1119
1120 WARN_ON_ONCE(!id_tracker);
1121 ret = seq_release(inode, file);
1122 if (!ret)
1123 fput(id_tracker->session->file);
1124 return ret;
1125 }
1126
1127 const struct file_operations lttng_tracker_ids_list_fops = {
1128 .owner = THIS_MODULE,
1129 .open = lttng_tracker_ids_list_open,
1130 .read = seq_read,
1131 .llseek = seq_lseek,
1132 .release = lttng_tracker_ids_list_release,
1133 };
1134
1135 int lttng_session_list_tracker_ids(struct lttng_session *session,
1136 enum tracker_type tracker_type)
1137 {
1138 struct file *tracker_ids_list_file;
1139 struct seq_file *m;
1140 int file_fd, ret;
1141
1142 file_fd = lttng_get_unused_fd();
1143 if (file_fd < 0) {
1144 ret = file_fd;
1145 goto fd_error;
1146 }
1147
1148 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1149 &lttng_tracker_ids_list_fops,
1150 NULL, O_RDWR);
1151 if (IS_ERR(tracker_ids_list_file)) {
1152 ret = PTR_ERR(tracker_ids_list_file);
1153 goto file_error;
1154 }
1155 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1156 ret = -EOVERFLOW;
1157 goto refcount_error;
1158 }
1159 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1160 if (ret < 0)
1161 goto open_error;
1162 m = tracker_ids_list_file->private_data;
1163
1164 m->private = get_tracker(session, tracker_type);
1165 BUG_ON(!m->private);
1166 fd_install(file_fd, tracker_ids_list_file);
1167
1168 return file_fd;
1169
1170 open_error:
1171 atomic_long_dec(&session->file->f_count);
1172 refcount_error:
1173 fput(tracker_ids_list_file);
1174 file_error:
1175 put_unused_fd(file_fd);
1176 fd_error:
1177 return ret;
1178 }
1179
1180 /*
1181 * Enabler management.
1182 */
1183 static
1184 int lttng_match_enabler_star_glob(const char *desc_name,
1185 const char *pattern)
1186 {
1187 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1188 desc_name, LTTNG_SIZE_MAX))
1189 return 0;
1190 return 1;
1191 }
1192
1193 static
1194 int lttng_match_enabler_name(const char *desc_name,
1195 const char *name)
1196 {
1197 if (strcmp(desc_name, name))
1198 return 0;
1199 return 1;
1200 }
1201
1202 static
1203 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1204 struct lttng_enabler *enabler)
1205 {
1206 const char *desc_name, *enabler_name;
1207
1208 enabler_name = enabler->event_param.name;
1209 switch (enabler->event_param.instrumentation) {
1210 case LTTNG_KERNEL_TRACEPOINT:
1211 desc_name = desc->name;
1212 break;
1213 case LTTNG_KERNEL_SYSCALL:
1214 desc_name = desc->name;
1215 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1216 desc_name += strlen("compat_");
1217 if (!strncmp(desc_name, "syscall_exit_",
1218 strlen("syscall_exit_"))) {
1219 desc_name += strlen("syscall_exit_");
1220 } else if (!strncmp(desc_name, "syscall_entry_",
1221 strlen("syscall_entry_"))) {
1222 desc_name += strlen("syscall_entry_");
1223 } else {
1224 WARN_ON_ONCE(1);
1225 return -EINVAL;
1226 }
1227 break;
1228 default:
1229 WARN_ON_ONCE(1);
1230 return -EINVAL;
1231 }
1232 switch (enabler->type) {
1233 case LTTNG_ENABLER_STAR_GLOB:
1234 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1235 case LTTNG_ENABLER_NAME:
1236 return lttng_match_enabler_name(desc_name, enabler_name);
1237 default:
1238 return -EINVAL;
1239 }
1240 }
1241
1242 static
1243 int lttng_event_match_enabler(struct lttng_event *event,
1244 struct lttng_enabler *enabler)
1245 {
1246 if (enabler->event_param.instrumentation != event->instrumentation)
1247 return 0;
1248 if (lttng_desc_match_enabler(event->desc, enabler)
1249 && event->chan == enabler->chan)
1250 return 1;
1251 else
1252 return 0;
1253 }
1254
1255 static
1256 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1257 struct lttng_enabler *enabler)
1258 {
1259 struct lttng_enabler_ref *enabler_ref;
1260
1261 list_for_each_entry(enabler_ref,
1262 &event->enablers_ref_head, node) {
1263 if (enabler_ref->ref == enabler)
1264 return enabler_ref;
1265 }
1266 return NULL;
1267 }
1268
1269 static
1270 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1271 {
1272 struct lttng_session *session = enabler->chan->session;
1273 struct lttng_probe_desc *probe_desc;
1274 const struct lttng_event_desc *desc;
1275 int i;
1276 struct list_head *probe_list;
1277
1278 probe_list = lttng_get_probe_list_head();
1279 /*
1280 * For each probe event, if we find that a probe event matches
1281 * our enabler, create an associated lttng_event if not
1282 * already present.
1283 */
1284 list_for_each_entry(probe_desc, probe_list, head) {
1285 for (i = 0; i < probe_desc->nr_events; i++) {
1286 int found = 0;
1287 struct hlist_head *head;
1288 const char *event_name;
1289 size_t name_len;
1290 uint32_t hash;
1291 struct lttng_event *event;
1292
1293 desc = probe_desc->event_desc[i];
1294 if (!lttng_desc_match_enabler(desc, enabler))
1295 continue;
1296 event_name = desc->name;
1297 name_len = strlen(event_name);
1298
1299 /*
1300 * Check if already created.
1301 */
1302 hash = jhash(event_name, name_len, 0);
1303 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1304 lttng_hlist_for_each_entry(event, head, hlist) {
1305 if (event->desc == desc
1306 && event->chan == enabler->chan)
1307 found = 1;
1308 }
1309 if (found)
1310 continue;
1311
1312 /*
1313 * We need to create an event for this
1314 * event probe.
1315 */
1316 event = _lttng_event_create(enabler->chan,
1317 NULL, NULL, desc,
1318 LTTNG_KERNEL_TRACEPOINT);
1319 if (!event) {
1320 printk(KERN_INFO "Unable to create event %s\n",
1321 probe_desc->event_desc[i]->name);
1322 }
1323 }
1324 }
1325 }
1326
1327 static
1328 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1329 {
1330 int ret;
1331
1332 ret = lttng_syscalls_register(enabler->chan, NULL);
1333 WARN_ON_ONCE(ret);
1334 }
1335
1336 /*
1337 * Create struct lttng_event if it is missing and present in the list of
1338 * tracepoint probes.
1339 * Should be called with sessions mutex held.
1340 */
1341 static
1342 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1343 {
1344 switch (enabler->event_param.instrumentation) {
1345 case LTTNG_KERNEL_TRACEPOINT:
1346 lttng_create_tracepoint_if_missing(enabler);
1347 break;
1348 case LTTNG_KERNEL_SYSCALL:
1349 lttng_create_syscall_if_missing(enabler);
1350 break;
1351 default:
1352 WARN_ON_ONCE(1);
1353 break;
1354 }
1355 }
1356
1357 /*
1358 * Create events associated with an enabler (if not already present),
1359 * and add backward reference from the event to the enabler.
1360 * Should be called with sessions mutex held.
1361 */
1362 static
1363 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1364 {
1365 struct lttng_session *session = enabler->chan->session;
1366 struct lttng_event *event;
1367
1368 /* First ensure that probe events are created for this enabler. */
1369 lttng_create_event_if_missing(enabler);
1370
1371 /* For each event matching enabler in session event list. */
1372 list_for_each_entry(event, &session->events, list) {
1373 struct lttng_enabler_ref *enabler_ref;
1374
1375 if (!lttng_event_match_enabler(event, enabler))
1376 continue;
1377 enabler_ref = lttng_event_enabler_ref(event, enabler);
1378 if (!enabler_ref) {
1379 /*
1380 * If no backward ref, create it.
1381 * Add backward ref from event to enabler.
1382 */
1383 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1384 if (!enabler_ref)
1385 return -ENOMEM;
1386 enabler_ref->ref = enabler;
1387 list_add(&enabler_ref->node,
1388 &event->enablers_ref_head);
1389 }
1390
1391 /*
1392 * Link filter bytecodes if not linked yet.
1393 */
1394 lttng_enabler_event_link_bytecode(event, enabler);
1395
1396 /* TODO: merge event context. */
1397 }
1398 return 0;
1399 }
1400
1401 /*
1402 * Called at module load: connect the probe on all enablers matching
1403 * this event.
1404 * Called with sessions lock held.
1405 */
1406 int lttng_fix_pending_events(void)
1407 {
1408 struct lttng_session *session;
1409
1410 list_for_each_entry(session, &sessions, list)
1411 lttng_session_lazy_sync_enablers(session);
1412 return 0;
1413 }
1414
1415 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1416 struct lttng_kernel_event *event_param,
1417 struct lttng_channel *chan)
1418 {
1419 struct lttng_enabler *enabler;
1420
1421 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1422 if (!enabler)
1423 return NULL;
1424 enabler->type = type;
1425 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1426 memcpy(&enabler->event_param, event_param,
1427 sizeof(enabler->event_param));
1428 enabler->chan = chan;
1429 /* ctx left NULL */
1430 enabler->enabled = 0;
1431 enabler->evtype = LTTNG_TYPE_ENABLER;
1432 mutex_lock(&sessions_mutex);
1433 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1434 lttng_session_lazy_sync_enablers(enabler->chan->session);
1435 mutex_unlock(&sessions_mutex);
1436 return enabler;
1437 }
1438
1439 int lttng_enabler_enable(struct lttng_enabler *enabler)
1440 {
1441 mutex_lock(&sessions_mutex);
1442 enabler->enabled = 1;
1443 lttng_session_lazy_sync_enablers(enabler->chan->session);
1444 mutex_unlock(&sessions_mutex);
1445 return 0;
1446 }
1447
1448 int lttng_enabler_disable(struct lttng_enabler *enabler)
1449 {
1450 mutex_lock(&sessions_mutex);
1451 enabler->enabled = 0;
1452 lttng_session_lazy_sync_enablers(enabler->chan->session);
1453 mutex_unlock(&sessions_mutex);
1454 return 0;
1455 }
1456
1457 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1458 struct lttng_kernel_filter_bytecode __user *bytecode)
1459 {
1460 struct lttng_filter_bytecode_node *bytecode_node;
1461 uint32_t bytecode_len;
1462 int ret;
1463
1464 ret = get_user(bytecode_len, &bytecode->len);
1465 if (ret)
1466 return ret;
1467 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1468 GFP_KERNEL);
1469 if (!bytecode_node)
1470 return -ENOMEM;
1471 ret = copy_from_user(&bytecode_node->bc, bytecode,
1472 sizeof(*bytecode) + bytecode_len);
1473 if (ret)
1474 goto error_free;
1475 bytecode_node->enabler = enabler;
1476 /* Enforce length based on allocated size */
1477 bytecode_node->bc.len = bytecode_len;
1478 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1479 lttng_session_lazy_sync_enablers(enabler->chan->session);
1480 return 0;
1481
1482 error_free:
1483 kfree(bytecode_node);
1484 return ret;
1485 }
1486
1487 int lttng_event_add_callsite(struct lttng_event *event,
1488 struct lttng_kernel_event_callsite __user *callsite)
1489 {
1490
1491 switch (event->instrumentation) {
1492 case LTTNG_KERNEL_UPROBE:
1493 return lttng_uprobes_add_callsite(event, callsite);
1494 default:
1495 return -EINVAL;
1496 }
1497 }
1498
1499 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1500 struct lttng_kernel_context *context_param)
1501 {
1502 return -ENOSYS;
1503 }
1504
1505 static
1506 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1507 {
1508 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1509
1510 /* Destroy filter bytecode */
1511 list_for_each_entry_safe(filter_node, tmp_filter_node,
1512 &enabler->filter_bytecode_head, node) {
1513 kfree(filter_node);
1514 }
1515
1516 /* Destroy contexts */
1517 lttng_destroy_context(enabler->ctx);
1518
1519 list_del(&enabler->node);
1520 kfree(enabler);
1521 }
1522
1523 /*
1524 * lttng_session_sync_enablers should be called just before starting a
1525 * session.
1526 * Should be called with sessions mutex held.
1527 */
1528 static
1529 void lttng_session_sync_enablers(struct lttng_session *session)
1530 {
1531 struct lttng_enabler *enabler;
1532 struct lttng_event *event;
1533
1534 list_for_each_entry(enabler, &session->enablers_head, node)
1535 lttng_enabler_ref_events(enabler);
1536 /*
1537 * For each event, if at least one of its enablers is enabled,
1538 * and its channel and session transient states are enabled, we
1539 * enable the event, else we disable it.
1540 */
1541 list_for_each_entry(event, &session->events, list) {
1542 struct lttng_enabler_ref *enabler_ref;
1543 struct lttng_bytecode_runtime *runtime;
1544 int enabled = 0, has_enablers_without_bytecode = 0;
1545
1546 switch (event->instrumentation) {
1547 case LTTNG_KERNEL_TRACEPOINT:
1548 case LTTNG_KERNEL_SYSCALL:
1549 /* Enable events */
1550 list_for_each_entry(enabler_ref,
1551 &event->enablers_ref_head, node) {
1552 if (enabler_ref->ref->enabled) {
1553 enabled = 1;
1554 break;
1555 }
1556 }
1557 break;
1558 default:
1559 /* Not handled with lazy sync. */
1560 continue;
1561 }
1562 /*
1563 * Enabled state is based on union of enablers, with
1564 * intesection of session and channel transient enable
1565 * states.
1566 */
1567 enabled = enabled && session->tstate && event->chan->tstate;
1568
1569 WRITE_ONCE(event->enabled, enabled);
1570 /*
1571 * Sync tracepoint registration with event enabled
1572 * state.
1573 */
1574 if (enabled) {
1575 register_event(event);
1576 } else {
1577 _lttng_event_unregister(event);
1578 }
1579
1580 /* Check if has enablers without bytecode enabled */
1581 list_for_each_entry(enabler_ref,
1582 &event->enablers_ref_head, node) {
1583 if (enabler_ref->ref->enabled
1584 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1585 has_enablers_without_bytecode = 1;
1586 break;
1587 }
1588 }
1589 event->has_enablers_without_bytecode =
1590 has_enablers_without_bytecode;
1591
1592 /* Enable filters */
1593 list_for_each_entry(runtime,
1594 &event->bytecode_runtime_head, node)
1595 lttng_filter_sync_state(runtime);
1596 }
1597 }
1598
1599 /*
1600 * Apply enablers to session events, adding events to session if need
1601 * be. It is required after each modification applied to an active
1602 * session, and right before session "start".
1603 * "lazy" sync means we only sync if required.
1604 * Should be called with sessions mutex held.
1605 */
1606 static
1607 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1608 {
1609 /* We can skip if session is not active */
1610 if (!session->active)
1611 return;
1612 lttng_session_sync_enablers(session);
1613 }
1614
1615 /*
1616 * Serialize at most one packet worth of metadata into a metadata
1617 * channel.
1618 * We grab the metadata cache mutex to get exclusive access to our metadata
1619 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1620 * allows us to do racy operations such as looking for remaining space left in
1621 * packet and write, since mutual exclusion protects us from concurrent writes.
1622 * Mutual exclusion on the metadata cache allow us to read the cache content
1623 * without racing against reallocation of the cache by updates.
1624 * Returns the number of bytes written in the channel, 0 if no data
1625 * was written and a negative value on error.
1626 */
1627 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1628 struct channel *chan)
1629 {
1630 struct lib_ring_buffer_ctx ctx;
1631 int ret = 0;
1632 size_t len, reserve_len;
1633
1634 /*
1635 * Ensure we support mutiple get_next / put sequences followed by
1636 * put_next. The metadata cache lock protects reading the metadata
1637 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1638 * "flush" operations on the buffer invoked by different processes.
1639 * Moreover, since the metadata cache memory can be reallocated, we
1640 * need to have exclusive access against updates even though we only
1641 * read it.
1642 */
1643 mutex_lock(&stream->metadata_cache->lock);
1644 WARN_ON(stream->metadata_in < stream->metadata_out);
1645 if (stream->metadata_in != stream->metadata_out)
1646 goto end;
1647
1648 /* Metadata regenerated, change the version. */
1649 if (stream->metadata_cache->version != stream->version)
1650 stream->version = stream->metadata_cache->version;
1651
1652 len = stream->metadata_cache->metadata_written -
1653 stream->metadata_in;
1654 if (!len)
1655 goto end;
1656 reserve_len = min_t(size_t,
1657 stream->transport->ops.packet_avail_size(chan),
1658 len);
1659 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1660 sizeof(char), -1);
1661 /*
1662 * If reservation failed, return an error to the caller.
1663 */
1664 ret = stream->transport->ops.event_reserve(&ctx, 0);
1665 if (ret != 0) {
1666 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1667 goto end;
1668 }
1669 stream->transport->ops.event_write(&ctx,
1670 stream->metadata_cache->data + stream->metadata_in,
1671 reserve_len);
1672 stream->transport->ops.event_commit(&ctx);
1673 stream->metadata_in += reserve_len;
1674 ret = reserve_len;
1675
1676 end:
1677 mutex_unlock(&stream->metadata_cache->lock);
1678 return ret;
1679 }
1680
1681 /*
1682 * Write the metadata to the metadata cache.
1683 * Must be called with sessions_mutex held.
1684 * The metadata cache lock protects us from concurrent read access from
1685 * thread outputting metadata content to ring buffer.
1686 */
1687 int lttng_metadata_printf(struct lttng_session *session,
1688 const char *fmt, ...)
1689 {
1690 char *str;
1691 size_t len;
1692 va_list ap;
1693 struct lttng_metadata_stream *stream;
1694
1695 WARN_ON_ONCE(!READ_ONCE(session->active));
1696
1697 va_start(ap, fmt);
1698 str = kvasprintf(GFP_KERNEL, fmt, ap);
1699 va_end(ap);
1700 if (!str)
1701 return -ENOMEM;
1702
1703 len = strlen(str);
1704 mutex_lock(&session->metadata_cache->lock);
1705 if (session->metadata_cache->metadata_written + len >
1706 session->metadata_cache->cache_alloc) {
1707 char *tmp_cache_realloc;
1708 unsigned int tmp_cache_alloc_size;
1709
1710 tmp_cache_alloc_size = max_t(unsigned int,
1711 session->metadata_cache->cache_alloc + len,
1712 session->metadata_cache->cache_alloc << 1);
1713 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
1714 if (!tmp_cache_realloc)
1715 goto err;
1716 if (session->metadata_cache->data) {
1717 memcpy(tmp_cache_realloc,
1718 session->metadata_cache->data,
1719 session->metadata_cache->cache_alloc);
1720 vfree(session->metadata_cache->data);
1721 }
1722
1723 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1724 session->metadata_cache->data = tmp_cache_realloc;
1725 }
1726 memcpy(session->metadata_cache->data +
1727 session->metadata_cache->metadata_written,
1728 str, len);
1729 session->metadata_cache->metadata_written += len;
1730 mutex_unlock(&session->metadata_cache->lock);
1731 kfree(str);
1732
1733 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1734 wake_up_interruptible(&stream->read_wait);
1735
1736 return 0;
1737
1738 err:
1739 mutex_unlock(&session->metadata_cache->lock);
1740 kfree(str);
1741 return -ENOMEM;
1742 }
1743
1744 static
1745 int print_tabs(struct lttng_session *session, size_t nesting)
1746 {
1747 size_t i;
1748
1749 for (i = 0; i < nesting; i++) {
1750 int ret;
1751
1752 ret = lttng_metadata_printf(session, " ");
1753 if (ret) {
1754 return ret;
1755 }
1756 }
1757 return 0;
1758 }
1759
1760 static
1761 int lttng_field_name_statedump(struct lttng_session *session,
1762 const struct lttng_event_field *field,
1763 size_t nesting)
1764 {
1765 return lttng_metadata_printf(session, " _%s;\n", field->name);
1766 }
1767
1768 static
1769 int _lttng_integer_type_statedump(struct lttng_session *session,
1770 const struct lttng_type *type,
1771 size_t nesting)
1772 {
1773 int ret;
1774
1775 WARN_ON_ONCE(type->atype != atype_integer);
1776 ret = print_tabs(session, nesting);
1777 if (ret)
1778 return ret;
1779 ret = lttng_metadata_printf(session,
1780 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
1781 type->u.integer.size,
1782 type->u.integer.alignment,
1783 type->u.integer.signedness,
1784 (type->u.integer.encoding == lttng_encode_none)
1785 ? "none"
1786 : (type->u.integer.encoding == lttng_encode_UTF8)
1787 ? "UTF8"
1788 : "ASCII",
1789 type->u.integer.base,
1790 #if __BYTE_ORDER == __BIG_ENDIAN
1791 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
1792 #else
1793 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
1794 #endif
1795 );
1796 return ret;
1797 }
1798
1799 /*
1800 * Must be called with sessions_mutex held.
1801 */
1802 static
1803 int _lttng_struct_type_statedump(struct lttng_session *session,
1804 const struct lttng_type *type,
1805 size_t nesting)
1806 {
1807 int ret;
1808 uint32_t i, nr_fields;
1809 unsigned int alignment;
1810
1811 WARN_ON_ONCE(type->atype != atype_struct_nestable);
1812
1813 ret = print_tabs(session, nesting);
1814 if (ret)
1815 return ret;
1816 ret = lttng_metadata_printf(session,
1817 "struct {\n");
1818 if (ret)
1819 return ret;
1820 nr_fields = type->u.struct_nestable.nr_fields;
1821 for (i = 0; i < nr_fields; i++) {
1822 const struct lttng_event_field *iter_field;
1823
1824 iter_field = &type->u.struct_nestable.fields[i];
1825 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1826 if (ret)
1827 return ret;
1828 }
1829 ret = print_tabs(session, nesting);
1830 if (ret)
1831 return ret;
1832 alignment = type->u.struct_nestable.alignment;
1833 if (alignment) {
1834 ret = lttng_metadata_printf(session,
1835 "} align(%u)",
1836 alignment);
1837 } else {
1838 ret = lttng_metadata_printf(session,
1839 "}");
1840 }
1841 return ret;
1842 }
1843
1844 /*
1845 * Must be called with sessions_mutex held.
1846 */
1847 static
1848 int _lttng_struct_field_statedump(struct lttng_session *session,
1849 const struct lttng_event_field *field,
1850 size_t nesting)
1851 {
1852 int ret;
1853
1854 ret = _lttng_struct_type_statedump(session,
1855 &field->type, nesting);
1856 if (ret)
1857 return ret;
1858 return lttng_field_name_statedump(session, field, nesting);
1859 }
1860
1861 /*
1862 * Must be called with sessions_mutex held.
1863 */
1864 static
1865 int _lttng_variant_type_statedump(struct lttng_session *session,
1866 const struct lttng_type *type,
1867 size_t nesting)
1868 {
1869 int ret;
1870 uint32_t i, nr_choices;
1871
1872 WARN_ON_ONCE(type->atype != atype_variant_nestable);
1873 /*
1874 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
1875 */
1876 if (type->u.variant_nestable.alignment != 0)
1877 return -EINVAL;
1878 ret = print_tabs(session, nesting);
1879 if (ret)
1880 return ret;
1881 ret = lttng_metadata_printf(session,
1882 "variant <_%s> {\n",
1883 type->u.variant_nestable.tag_name);
1884 if (ret)
1885 return ret;
1886 nr_choices = type->u.variant_nestable.nr_choices;
1887 for (i = 0; i < nr_choices; i++) {
1888 const struct lttng_event_field *iter_field;
1889
1890 iter_field = &type->u.variant_nestable.choices[i];
1891 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1892 if (ret)
1893 return ret;
1894 }
1895 ret = print_tabs(session, nesting);
1896 if (ret)
1897 return ret;
1898 ret = lttng_metadata_printf(session,
1899 "}");
1900 return ret;
1901 }
1902
1903 /*
1904 * Must be called with sessions_mutex held.
1905 */
1906 static
1907 int _lttng_variant_field_statedump(struct lttng_session *session,
1908 const struct lttng_event_field *field,
1909 size_t nesting)
1910 {
1911 int ret;
1912
1913 ret = _lttng_variant_type_statedump(session,
1914 &field->type, nesting);
1915 if (ret)
1916 return ret;
1917 return lttng_field_name_statedump(session, field, nesting);
1918 }
1919
1920 /*
1921 * Must be called with sessions_mutex held.
1922 */
1923 static
1924 int _lttng_array_field_statedump(struct lttng_session *session,
1925 const struct lttng_event_field *field,
1926 size_t nesting)
1927 {
1928 int ret;
1929 const struct lttng_type *elem_type;
1930
1931 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
1932
1933 if (field->type.u.array_nestable.alignment) {
1934 ret = print_tabs(session, nesting);
1935 if (ret)
1936 return ret;
1937 ret = lttng_metadata_printf(session,
1938 "struct { } align(%u) _%s_padding;\n",
1939 field->type.u.array_nestable.alignment * CHAR_BIT,
1940 field->name);
1941 if (ret)
1942 return ret;
1943 }
1944 /*
1945 * Nested compound types: Only array of structures and variants are
1946 * currently supported.
1947 */
1948 elem_type = field->type.u.array_nestable.elem_type;
1949 switch (elem_type->atype) {
1950 case atype_integer:
1951 case atype_struct_nestable:
1952 case atype_variant_nestable:
1953 ret = _lttng_type_statedump(session, elem_type, nesting);
1954 if (ret)
1955 return ret;
1956 break;
1957
1958 default:
1959 return -EINVAL;
1960 }
1961 ret = lttng_metadata_printf(session,
1962 " _%s[%u];\n",
1963 field->name,
1964 field->type.u.array_nestable.length);
1965 return ret;
1966 }
1967
1968 /*
1969 * Must be called with sessions_mutex held.
1970 */
1971 static
1972 int _lttng_sequence_field_statedump(struct lttng_session *session,
1973 const struct lttng_event_field *field,
1974 size_t nesting)
1975 {
1976 int ret;
1977 const char *length_name;
1978 const struct lttng_type *elem_type;
1979
1980 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
1981
1982 length_name = field->type.u.sequence_nestable.length_name;
1983
1984 if (field->type.u.sequence_nestable.alignment) {
1985 ret = print_tabs(session, nesting);
1986 if (ret)
1987 return ret;
1988 ret = lttng_metadata_printf(session,
1989 "struct { } align(%u) _%s_padding;\n",
1990 field->type.u.sequence_nestable.alignment * CHAR_BIT,
1991 field->name);
1992 if (ret)
1993 return ret;
1994 }
1995
1996 /*
1997 * Nested compound types: Only array of structures and variants are
1998 * currently supported.
1999 */
2000 elem_type = field->type.u.sequence_nestable.elem_type;
2001 switch (elem_type->atype) {
2002 case atype_integer:
2003 case atype_struct_nestable:
2004 case atype_variant_nestable:
2005 ret = _lttng_type_statedump(session, elem_type, nesting);
2006 if (ret)
2007 return ret;
2008 break;
2009
2010 default:
2011 return -EINVAL;
2012 }
2013 ret = lttng_metadata_printf(session,
2014 " _%s[ _%s ];\n",
2015 field->name,
2016 field->type.u.sequence_nestable.length_name);
2017 return ret;
2018 }
2019
2020 /*
2021 * Must be called with sessions_mutex held.
2022 */
2023 static
2024 int _lttng_enum_type_statedump(struct lttng_session *session,
2025 const struct lttng_type *type,
2026 size_t nesting)
2027 {
2028 const struct lttng_enum_desc *enum_desc;
2029 const struct lttng_type *container_type;
2030 int ret;
2031 unsigned int i, nr_entries;
2032
2033 container_type = type->u.enum_nestable.container_type;
2034 if (container_type->atype != atype_integer) {
2035 ret = -EINVAL;
2036 goto end;
2037 }
2038 enum_desc = type->u.enum_nestable.desc;
2039 nr_entries = enum_desc->nr_entries;
2040
2041 ret = print_tabs(session, nesting);
2042 if (ret)
2043 goto end;
2044 ret = lttng_metadata_printf(session, "enum : ");
2045 if (ret)
2046 goto end;
2047 ret = _lttng_integer_type_statedump(session, container_type, 0);
2048 if (ret)
2049 goto end;
2050 ret = lttng_metadata_printf(session, " {\n");
2051 if (ret)
2052 goto end;
2053 /* Dump all entries */
2054 for (i = 0; i < nr_entries; i++) {
2055 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2056 int j, len;
2057
2058 ret = print_tabs(session, nesting + 1);
2059 if (ret)
2060 goto end;
2061 ret = lttng_metadata_printf(session,
2062 "\"");
2063 if (ret)
2064 goto end;
2065 len = strlen(entry->string);
2066 /* Escape the character '"' */
2067 for (j = 0; j < len; j++) {
2068 char c = entry->string[j];
2069
2070 switch (c) {
2071 case '"':
2072 ret = lttng_metadata_printf(session,
2073 "\\\"");
2074 break;
2075 case '\\':
2076 ret = lttng_metadata_printf(session,
2077 "\\\\");
2078 break;
2079 default:
2080 ret = lttng_metadata_printf(session,
2081 "%c", c);
2082 break;
2083 }
2084 if (ret)
2085 goto end;
2086 }
2087 ret = lttng_metadata_printf(session, "\"");
2088 if (ret)
2089 goto end;
2090
2091 if (entry->options.is_auto) {
2092 ret = lttng_metadata_printf(session, ",\n");
2093 if (ret)
2094 goto end;
2095 } else {
2096 ret = lttng_metadata_printf(session,
2097 " = ");
2098 if (ret)
2099 goto end;
2100 if (entry->start.signedness)
2101 ret = lttng_metadata_printf(session,
2102 "%lld", (long long) entry->start.value);
2103 else
2104 ret = lttng_metadata_printf(session,
2105 "%llu", entry->start.value);
2106 if (ret)
2107 goto end;
2108 if (entry->start.signedness == entry->end.signedness &&
2109 entry->start.value
2110 == entry->end.value) {
2111 ret = lttng_metadata_printf(session,
2112 ",\n");
2113 } else {
2114 if (entry->end.signedness) {
2115 ret = lttng_metadata_printf(session,
2116 " ... %lld,\n",
2117 (long long) entry->end.value);
2118 } else {
2119 ret = lttng_metadata_printf(session,
2120 " ... %llu,\n",
2121 entry->end.value);
2122 }
2123 }
2124 if (ret)
2125 goto end;
2126 }
2127 }
2128 ret = print_tabs(session, nesting);
2129 if (ret)
2130 goto end;
2131 ret = lttng_metadata_printf(session, "}");
2132 end:
2133 return ret;
2134 }
2135
2136 /*
2137 * Must be called with sessions_mutex held.
2138 */
2139 static
2140 int _lttng_enum_field_statedump(struct lttng_session *session,
2141 const struct lttng_event_field *field,
2142 size_t nesting)
2143 {
2144 int ret;
2145
2146 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
2147 if (ret)
2148 return ret;
2149 return lttng_field_name_statedump(session, field, nesting);
2150 }
2151
2152 static
2153 int _lttng_integer_field_statedump(struct lttng_session *session,
2154 const struct lttng_event_field *field,
2155 size_t nesting)
2156 {
2157 int ret;
2158
2159 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
2160 if (ret)
2161 return ret;
2162 return lttng_field_name_statedump(session, field, nesting);
2163 }
2164
2165 static
2166 int _lttng_string_type_statedump(struct lttng_session *session,
2167 const struct lttng_type *type,
2168 size_t nesting)
2169 {
2170 int ret;
2171
2172 WARN_ON_ONCE(type->atype != atype_string);
2173 /* Default encoding is UTF8 */
2174 ret = print_tabs(session, nesting);
2175 if (ret)
2176 return ret;
2177 ret = lttng_metadata_printf(session,
2178 "string%s",
2179 type->u.string.encoding == lttng_encode_ASCII ?
2180 " { encoding = ASCII; }" : "");
2181 return ret;
2182 }
2183
2184 static
2185 int _lttng_string_field_statedump(struct lttng_session *session,
2186 const struct lttng_event_field *field,
2187 size_t nesting)
2188 {
2189 int ret;
2190
2191 WARN_ON_ONCE(field->type.atype != atype_string);
2192 ret = _lttng_string_type_statedump(session, &field->type, nesting);
2193 if (ret)
2194 return ret;
2195 return lttng_field_name_statedump(session, field, nesting);
2196 }
2197
2198 /*
2199 * Must be called with sessions_mutex held.
2200 */
2201 static
2202 int _lttng_type_statedump(struct lttng_session *session,
2203 const struct lttng_type *type,
2204 size_t nesting)
2205 {
2206 int ret = 0;
2207
2208 switch (type->atype) {
2209 case atype_integer:
2210 ret = _lttng_integer_type_statedump(session, type, nesting);
2211 break;
2212 case atype_enum_nestable:
2213 ret = _lttng_enum_type_statedump(session, type, nesting);
2214 break;
2215 case atype_string:
2216 ret = _lttng_string_type_statedump(session, type, nesting);
2217 break;
2218 case atype_struct_nestable:
2219 ret = _lttng_struct_type_statedump(session, type, nesting);
2220 break;
2221 case atype_variant_nestable:
2222 ret = _lttng_variant_type_statedump(session, type, nesting);
2223 break;
2224
2225 /* Nested arrays and sequences are not supported yet. */
2226 case atype_array_nestable:
2227 case atype_sequence_nestable:
2228 default:
2229 WARN_ON_ONCE(1);
2230 return -EINVAL;
2231 }
2232 return ret;
2233 }
2234
2235 /*
2236 * Must be called with sessions_mutex held.
2237 */
2238 static
2239 int _lttng_field_statedump(struct lttng_session *session,
2240 const struct lttng_event_field *field,
2241 size_t nesting)
2242 {
2243 int ret = 0;
2244
2245 switch (field->type.atype) {
2246 case atype_integer:
2247 ret = _lttng_integer_field_statedump(session, field, nesting);
2248 break;
2249 case atype_enum_nestable:
2250 ret = _lttng_enum_field_statedump(session, field, nesting);
2251 break;
2252 case atype_string:
2253 ret = _lttng_string_field_statedump(session, field, nesting);
2254 break;
2255 case atype_struct_nestable:
2256 ret = _lttng_struct_field_statedump(session, field, nesting);
2257 break;
2258 case atype_array_nestable:
2259 ret = _lttng_array_field_statedump(session, field, nesting);
2260 break;
2261 case atype_sequence_nestable:
2262 ret = _lttng_sequence_field_statedump(session, field, nesting);
2263 break;
2264 case atype_variant_nestable:
2265 ret = _lttng_variant_field_statedump(session, field, nesting);
2266 break;
2267
2268 default:
2269 WARN_ON_ONCE(1);
2270 return -EINVAL;
2271 }
2272 return ret;
2273 }
2274
2275 static
2276 int _lttng_context_metadata_statedump(struct lttng_session *session,
2277 struct lttng_ctx *ctx)
2278 {
2279 int ret = 0;
2280 int i;
2281
2282 if (!ctx)
2283 return 0;
2284 for (i = 0; i < ctx->nr_fields; i++) {
2285 const struct lttng_ctx_field *field = &ctx->fields[i];
2286
2287 ret = _lttng_field_statedump(session, &field->event_field, 2);
2288 if (ret)
2289 return ret;
2290 }
2291 return ret;
2292 }
2293
2294 static
2295 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2296 struct lttng_event *event)
2297 {
2298 const struct lttng_event_desc *desc = event->desc;
2299 int ret = 0;
2300 int i;
2301
2302 for (i = 0; i < desc->nr_fields; i++) {
2303 const struct lttng_event_field *field = &desc->fields[i];
2304
2305 ret = _lttng_field_statedump(session, field, 2);
2306 if (ret)
2307 return ret;
2308 }
2309 return ret;
2310 }
2311
2312 /*
2313 * Must be called with sessions_mutex held.
2314 */
2315 static
2316 int _lttng_event_metadata_statedump(struct lttng_session *session,
2317 struct lttng_channel *chan,
2318 struct lttng_event *event)
2319 {
2320 int ret = 0;
2321
2322 if (event->metadata_dumped || !READ_ONCE(session->active))
2323 return 0;
2324 if (chan->channel_type == METADATA_CHANNEL)
2325 return 0;
2326
2327 ret = lttng_metadata_printf(session,
2328 "event {\n"
2329 " name = \"%s\";\n"
2330 " id = %u;\n"
2331 " stream_id = %u;\n",
2332 event->desc->name,
2333 event->id,
2334 event->chan->id);
2335 if (ret)
2336 goto end;
2337
2338 if (event->ctx) {
2339 ret = lttng_metadata_printf(session,
2340 " context := struct {\n");
2341 if (ret)
2342 goto end;
2343 }
2344 ret = _lttng_context_metadata_statedump(session, event->ctx);
2345 if (ret)
2346 goto end;
2347 if (event->ctx) {
2348 ret = lttng_metadata_printf(session,
2349 " };\n");
2350 if (ret)
2351 goto end;
2352 }
2353
2354 ret = lttng_metadata_printf(session,
2355 " fields := struct {\n"
2356 );
2357 if (ret)
2358 goto end;
2359
2360 ret = _lttng_fields_metadata_statedump(session, event);
2361 if (ret)
2362 goto end;
2363
2364 /*
2365 * LTTng space reservation can only reserve multiples of the
2366 * byte size.
2367 */
2368 ret = lttng_metadata_printf(session,
2369 " };\n"
2370 "};\n\n");
2371 if (ret)
2372 goto end;
2373
2374 event->metadata_dumped = 1;
2375 end:
2376 return ret;
2377
2378 }
2379
2380 /*
2381 * Must be called with sessions_mutex held.
2382 */
2383 static
2384 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2385 struct lttng_channel *chan)
2386 {
2387 int ret = 0;
2388
2389 if (chan->metadata_dumped || !READ_ONCE(session->active))
2390 return 0;
2391
2392 if (chan->channel_type == METADATA_CHANNEL)
2393 return 0;
2394
2395 WARN_ON_ONCE(!chan->header_type);
2396 ret = lttng_metadata_printf(session,
2397 "stream {\n"
2398 " id = %u;\n"
2399 " event.header := %s;\n"
2400 " packet.context := struct packet_context;\n",
2401 chan->id,
2402 chan->header_type == 1 ? "struct event_header_compact" :
2403 "struct event_header_large");
2404 if (ret)
2405 goto end;
2406
2407 if (chan->ctx) {
2408 ret = lttng_metadata_printf(session,
2409 " event.context := struct {\n");
2410 if (ret)
2411 goto end;
2412 }
2413 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2414 if (ret)
2415 goto end;
2416 if (chan->ctx) {
2417 ret = lttng_metadata_printf(session,
2418 " };\n");
2419 if (ret)
2420 goto end;
2421 }
2422
2423 ret = lttng_metadata_printf(session,
2424 "};\n\n");
2425
2426 chan->metadata_dumped = 1;
2427 end:
2428 return ret;
2429 }
2430
2431 /*
2432 * Must be called with sessions_mutex held.
2433 */
2434 static
2435 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2436 {
2437 return lttng_metadata_printf(session,
2438 "struct packet_context {\n"
2439 " uint64_clock_monotonic_t timestamp_begin;\n"
2440 " uint64_clock_monotonic_t timestamp_end;\n"
2441 " uint64_t content_size;\n"
2442 " uint64_t packet_size;\n"
2443 " uint64_t packet_seq_num;\n"
2444 " unsigned long events_discarded;\n"
2445 " uint32_t cpu_id;\n"
2446 "};\n\n"
2447 );
2448 }
2449
2450 /*
2451 * Compact header:
2452 * id: range: 0 - 30.
2453 * id 31 is reserved to indicate an extended header.
2454 *
2455 * Large header:
2456 * id: range: 0 - 65534.
2457 * id 65535 is reserved to indicate an extended header.
2458 *
2459 * Must be called with sessions_mutex held.
2460 */
2461 static
2462 int _lttng_event_header_declare(struct lttng_session *session)
2463 {
2464 return lttng_metadata_printf(session,
2465 "struct event_header_compact {\n"
2466 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2467 " variant <id> {\n"
2468 " struct {\n"
2469 " uint27_clock_monotonic_t timestamp;\n"
2470 " } compact;\n"
2471 " struct {\n"
2472 " uint32_t id;\n"
2473 " uint64_clock_monotonic_t timestamp;\n"
2474 " } extended;\n"
2475 " } v;\n"
2476 "} align(%u);\n"
2477 "\n"
2478 "struct event_header_large {\n"
2479 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2480 " variant <id> {\n"
2481 " struct {\n"
2482 " uint32_clock_monotonic_t timestamp;\n"
2483 " } compact;\n"
2484 " struct {\n"
2485 " uint32_t id;\n"
2486 " uint64_clock_monotonic_t timestamp;\n"
2487 " } extended;\n"
2488 " } v;\n"
2489 "} align(%u);\n\n",
2490 lttng_alignof(uint32_t) * CHAR_BIT,
2491 lttng_alignof(uint16_t) * CHAR_BIT
2492 );
2493 }
2494
2495 /*
2496 * Approximation of NTP time of day to clock monotonic correlation,
2497 * taken at start of trace.
2498 * Yes, this is only an approximation. Yes, we can (and will) do better
2499 * in future versions.
2500 * This function may return a negative offset. It may happen if the
2501 * system sets the REALTIME clock to 0 after boot.
2502 *
2503 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2504 * y2038 compliant.
2505 */
2506 static
2507 int64_t measure_clock_offset(void)
2508 {
2509 uint64_t monotonic_avg, monotonic[2], realtime;
2510 uint64_t tcf = trace_clock_freq();
2511 int64_t offset;
2512 unsigned long flags;
2513 struct timespec64 rts = { 0, 0 };
2514
2515 /* Disable interrupts to increase correlation precision. */
2516 local_irq_save(flags);
2517 monotonic[0] = trace_clock_read64();
2518 ktime_get_real_ts64(&rts);
2519 monotonic[1] = trace_clock_read64();
2520 local_irq_restore(flags);
2521
2522 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2523 realtime = (uint64_t) rts.tv_sec * tcf;
2524 if (tcf == NSEC_PER_SEC) {
2525 realtime += rts.tv_nsec;
2526 } else {
2527 uint64_t n = rts.tv_nsec * tcf;
2528
2529 do_div(n, NSEC_PER_SEC);
2530 realtime += n;
2531 }
2532 offset = (int64_t) realtime - monotonic_avg;
2533 return offset;
2534 }
2535
2536 static
2537 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2538 {
2539 int ret = 0;
2540 size_t i;
2541 char cur;
2542
2543 i = 0;
2544 cur = string[i];
2545 while (cur != '\0') {
2546 switch (cur) {
2547 case '\n':
2548 ret = lttng_metadata_printf(session, "%s", "\\n");
2549 break;
2550 case '\\':
2551 case '"':
2552 ret = lttng_metadata_printf(session, "%c", '\\');
2553 if (ret)
2554 goto error;
2555 /* We still print the current char */
2556 /* Fallthrough */
2557 default:
2558 ret = lttng_metadata_printf(session, "%c", cur);
2559 break;
2560 }
2561
2562 if (ret)
2563 goto error;
2564
2565 cur = string[++i];
2566 }
2567 error:
2568 return ret;
2569 }
2570
2571 static
2572 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2573 const char *field_value)
2574 {
2575 int ret;
2576
2577 ret = lttng_metadata_printf(session, " %s = \"", field);
2578 if (ret)
2579 goto error;
2580
2581 ret = print_escaped_ctf_string(session, field_value);
2582 if (ret)
2583 goto error;
2584
2585 ret = lttng_metadata_printf(session, "\";\n");
2586
2587 error:
2588 return ret;
2589 }
2590
2591 /*
2592 * Output metadata into this session's metadata buffers.
2593 * Must be called with sessions_mutex held.
2594 */
2595 static
2596 int _lttng_session_metadata_statedump(struct lttng_session *session)
2597 {
2598 unsigned char *uuid_c = session->uuid.b;
2599 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2600 const char *product_uuid;
2601 struct lttng_channel *chan;
2602 struct lttng_event *event;
2603 int ret = 0;
2604
2605 if (!READ_ONCE(session->active))
2606 return 0;
2607 if (session->metadata_dumped)
2608 goto skip_session;
2609
2610 snprintf(uuid_s, sizeof(uuid_s),
2611 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2612 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2613 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2614 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2615 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2616
2617 ret = lttng_metadata_printf(session,
2618 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2619 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2620 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2621 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2622 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2623 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2624 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2625 "\n"
2626 "trace {\n"
2627 " major = %u;\n"
2628 " minor = %u;\n"
2629 " uuid = \"%s\";\n"
2630 " byte_order = %s;\n"
2631 " packet.header := struct {\n"
2632 " uint32_t magic;\n"
2633 " uint8_t uuid[16];\n"
2634 " uint32_t stream_id;\n"
2635 " uint64_t stream_instance_id;\n"
2636 " };\n"
2637 "};\n\n",
2638 lttng_alignof(uint8_t) * CHAR_BIT,
2639 lttng_alignof(uint16_t) * CHAR_BIT,
2640 lttng_alignof(uint32_t) * CHAR_BIT,
2641 lttng_alignof(uint64_t) * CHAR_BIT,
2642 sizeof(unsigned long) * CHAR_BIT,
2643 lttng_alignof(unsigned long) * CHAR_BIT,
2644 CTF_SPEC_MAJOR,
2645 CTF_SPEC_MINOR,
2646 uuid_s,
2647 #if __BYTE_ORDER == __BIG_ENDIAN
2648 "be"
2649 #else
2650 "le"
2651 #endif
2652 );
2653 if (ret)
2654 goto end;
2655
2656 ret = lttng_metadata_printf(session,
2657 "env {\n"
2658 " hostname = \"%s\";\n"
2659 " domain = \"kernel\";\n"
2660 " sysname = \"%s\";\n"
2661 " kernel_release = \"%s\";\n"
2662 " kernel_version = \"%s\";\n"
2663 " tracer_name = \"lttng-modules\";\n"
2664 " tracer_major = %d;\n"
2665 " tracer_minor = %d;\n"
2666 " tracer_patchlevel = %d;\n"
2667 " trace_buffering_scheme = \"global\";\n",
2668 current->nsproxy->uts_ns->name.nodename,
2669 utsname()->sysname,
2670 utsname()->release,
2671 utsname()->version,
2672 LTTNG_MODULES_MAJOR_VERSION,
2673 LTTNG_MODULES_MINOR_VERSION,
2674 LTTNG_MODULES_PATCHLEVEL_VERSION
2675 );
2676 if (ret)
2677 goto end;
2678
2679 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2680 if (ret)
2681 goto end;
2682 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2683 session->creation_time);
2684 if (ret)
2685 goto end;
2686
2687 /* Add the product UUID to the 'env' section */
2688 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
2689 if (product_uuid) {
2690 ret = lttng_metadata_printf(session,
2691 " product_uuid = \"%s\";\n",
2692 product_uuid
2693 );
2694 if (ret)
2695 goto end;
2696 }
2697
2698 /* Close the 'env' section */
2699 ret = lttng_metadata_printf(session, "};\n\n");
2700 if (ret)
2701 goto end;
2702
2703 ret = lttng_metadata_printf(session,
2704 "clock {\n"
2705 " name = \"%s\";\n",
2706 trace_clock_name()
2707 );
2708 if (ret)
2709 goto end;
2710
2711 if (!trace_clock_uuid(clock_uuid_s)) {
2712 ret = lttng_metadata_printf(session,
2713 " uuid = \"%s\";\n",
2714 clock_uuid_s
2715 );
2716 if (ret)
2717 goto end;
2718 }
2719
2720 ret = lttng_metadata_printf(session,
2721 " description = \"%s\";\n"
2722 " freq = %llu; /* Frequency, in Hz */\n"
2723 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2724 " offset = %lld;\n"
2725 "};\n\n",
2726 trace_clock_description(),
2727 (unsigned long long) trace_clock_freq(),
2728 (long long) measure_clock_offset()
2729 );
2730 if (ret)
2731 goto end;
2732
2733 ret = lttng_metadata_printf(session,
2734 "typealias integer {\n"
2735 " size = 27; align = 1; signed = false;\n"
2736 " map = clock.%s.value;\n"
2737 "} := uint27_clock_monotonic_t;\n"
2738 "\n"
2739 "typealias integer {\n"
2740 " size = 32; align = %u; signed = false;\n"
2741 " map = clock.%s.value;\n"
2742 "} := uint32_clock_monotonic_t;\n"
2743 "\n"
2744 "typealias integer {\n"
2745 " size = 64; align = %u; signed = false;\n"
2746 " map = clock.%s.value;\n"
2747 "} := uint64_clock_monotonic_t;\n\n",
2748 trace_clock_name(),
2749 lttng_alignof(uint32_t) * CHAR_BIT,
2750 trace_clock_name(),
2751 lttng_alignof(uint64_t) * CHAR_BIT,
2752 trace_clock_name()
2753 );
2754 if (ret)
2755 goto end;
2756
2757 ret = _lttng_stream_packet_context_declare(session);
2758 if (ret)
2759 goto end;
2760
2761 ret = _lttng_event_header_declare(session);
2762 if (ret)
2763 goto end;
2764
2765 skip_session:
2766 list_for_each_entry(chan, &session->chan, list) {
2767 ret = _lttng_channel_metadata_statedump(session, chan);
2768 if (ret)
2769 goto end;
2770 }
2771
2772 list_for_each_entry(event, &session->events, list) {
2773 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2774 if (ret)
2775 goto end;
2776 }
2777 session->metadata_dumped = 1;
2778 end:
2779 return ret;
2780 }
2781
2782 /**
2783 * lttng_transport_register - LTT transport registration
2784 * @transport: transport structure
2785 *
2786 * Registers a transport which can be used as output to extract the data out of
2787 * LTTng.
2788 */
2789 void lttng_transport_register(struct lttng_transport *transport)
2790 {
2791 mutex_lock(&sessions_mutex);
2792 list_add_tail(&transport->node, &lttng_transport_list);
2793 mutex_unlock(&sessions_mutex);
2794 }
2795 EXPORT_SYMBOL_GPL(lttng_transport_register);
2796
2797 /**
2798 * lttng_transport_unregister - LTT transport unregistration
2799 * @transport: transport structure
2800 */
2801 void lttng_transport_unregister(struct lttng_transport *transport)
2802 {
2803 mutex_lock(&sessions_mutex);
2804 list_del(&transport->node);
2805 mutex_unlock(&sessions_mutex);
2806 }
2807 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2808
2809 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
2810
2811 enum cpuhp_state lttng_hp_prepare;
2812 enum cpuhp_state lttng_hp_online;
2813
2814 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2815 {
2816 struct lttng_cpuhp_node *lttng_node;
2817
2818 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2819 switch (lttng_node->component) {
2820 case LTTNG_RING_BUFFER_FRONTEND:
2821 return 0;
2822 case LTTNG_RING_BUFFER_BACKEND:
2823 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2824 case LTTNG_RING_BUFFER_ITER:
2825 return 0;
2826 case LTTNG_CONTEXT_PERF_COUNTERS:
2827 return 0;
2828 default:
2829 return -EINVAL;
2830 }
2831 }
2832
2833 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2834 {
2835 struct lttng_cpuhp_node *lttng_node;
2836
2837 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2838 switch (lttng_node->component) {
2839 case LTTNG_RING_BUFFER_FRONTEND:
2840 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2841 case LTTNG_RING_BUFFER_BACKEND:
2842 return 0;
2843 case LTTNG_RING_BUFFER_ITER:
2844 return 0;
2845 case LTTNG_CONTEXT_PERF_COUNTERS:
2846 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2847 default:
2848 return -EINVAL;
2849 }
2850 }
2851
2852 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2853 {
2854 struct lttng_cpuhp_node *lttng_node;
2855
2856 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2857 switch (lttng_node->component) {
2858 case LTTNG_RING_BUFFER_FRONTEND:
2859 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2860 case LTTNG_RING_BUFFER_BACKEND:
2861 return 0;
2862 case LTTNG_RING_BUFFER_ITER:
2863 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2864 case LTTNG_CONTEXT_PERF_COUNTERS:
2865 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2866 default:
2867 return -EINVAL;
2868 }
2869 }
2870
2871 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2872 {
2873 struct lttng_cpuhp_node *lttng_node;
2874
2875 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2876 switch (lttng_node->component) {
2877 case LTTNG_RING_BUFFER_FRONTEND:
2878 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2879 case LTTNG_RING_BUFFER_BACKEND:
2880 return 0;
2881 case LTTNG_RING_BUFFER_ITER:
2882 return 0;
2883 case LTTNG_CONTEXT_PERF_COUNTERS:
2884 return 0;
2885 default:
2886 return -EINVAL;
2887 }
2888 }
2889
2890 static int __init lttng_init_cpu_hotplug(void)
2891 {
2892 int ret;
2893
2894 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2895 lttng_hotplug_prepare,
2896 lttng_hotplug_dead);
2897 if (ret < 0) {
2898 return ret;
2899 }
2900 lttng_hp_prepare = ret;
2901 lttng_rb_set_hp_prepare(ret);
2902
2903 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2904 lttng_hotplug_online,
2905 lttng_hotplug_offline);
2906 if (ret < 0) {
2907 cpuhp_remove_multi_state(lttng_hp_prepare);
2908 lttng_hp_prepare = 0;
2909 return ret;
2910 }
2911 lttng_hp_online = ret;
2912 lttng_rb_set_hp_online(ret);
2913
2914 return 0;
2915 }
2916
2917 static void __exit lttng_exit_cpu_hotplug(void)
2918 {
2919 lttng_rb_set_hp_online(0);
2920 cpuhp_remove_multi_state(lttng_hp_online);
2921 lttng_rb_set_hp_prepare(0);
2922 cpuhp_remove_multi_state(lttng_hp_prepare);
2923 }
2924
2925 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2926 static int lttng_init_cpu_hotplug(void)
2927 {
2928 return 0;
2929 }
2930 static void lttng_exit_cpu_hotplug(void)
2931 {
2932 }
2933 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2934
2935
2936 static int __init lttng_events_init(void)
2937 {
2938 int ret;
2939
2940 ret = wrapper_get_pfnblock_flags_mask_init();
2941 if (ret)
2942 return ret;
2943 ret = wrapper_get_pageblock_flags_mask_init();
2944 if (ret)
2945 return ret;
2946 ret = lttng_probes_init();
2947 if (ret)
2948 return ret;
2949 ret = lttng_context_init();
2950 if (ret)
2951 return ret;
2952 ret = lttng_tracepoint_init();
2953 if (ret)
2954 goto error_tp;
2955 event_cache = KMEM_CACHE(lttng_event, 0);
2956 if (!event_cache) {
2957 ret = -ENOMEM;
2958 goto error_kmem;
2959 }
2960 ret = lttng_abi_init();
2961 if (ret)
2962 goto error_abi;
2963 ret = lttng_logger_init();
2964 if (ret)
2965 goto error_logger;
2966 ret = lttng_init_cpu_hotplug();
2967 if (ret)
2968 goto error_hotplug;
2969 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
2970 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2971 __stringify(LTTNG_MODULES_MINOR_VERSION),
2972 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2973 LTTNG_MODULES_EXTRAVERSION,
2974 LTTNG_VERSION_NAME,
2975 #ifdef LTTNG_EXTRA_VERSION_GIT
2976 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2977 #else
2978 "",
2979 #endif
2980 #ifdef LTTNG_EXTRA_VERSION_NAME
2981 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2982 #else
2983 "");
2984 #endif
2985 return 0;
2986
2987 error_hotplug:
2988 lttng_logger_exit();
2989 error_logger:
2990 lttng_abi_exit();
2991 error_abi:
2992 kmem_cache_destroy(event_cache);
2993 error_kmem:
2994 lttng_tracepoint_exit();
2995 error_tp:
2996 lttng_context_exit();
2997 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
2998 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2999 __stringify(LTTNG_MODULES_MINOR_VERSION),
3000 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3001 LTTNG_MODULES_EXTRAVERSION,
3002 LTTNG_VERSION_NAME,
3003 #ifdef LTTNG_EXTRA_VERSION_GIT
3004 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3005 #else
3006 "",
3007 #endif
3008 #ifdef LTTNG_EXTRA_VERSION_NAME
3009 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3010 #else
3011 "");
3012 #endif
3013 return ret;
3014 }
3015
3016 module_init(lttng_events_init);
3017
3018 static void __exit lttng_events_exit(void)
3019 {
3020 struct lttng_session *session, *tmpsession;
3021
3022 lttng_exit_cpu_hotplug();
3023 lttng_logger_exit();
3024 lttng_abi_exit();
3025 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3026 lttng_session_destroy(session);
3027 kmem_cache_destroy(event_cache);
3028 lttng_tracepoint_exit();
3029 lttng_context_exit();
3030 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3031 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3032 __stringify(LTTNG_MODULES_MINOR_VERSION),
3033 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3034 LTTNG_MODULES_EXTRAVERSION,
3035 LTTNG_VERSION_NAME,
3036 #ifdef LTTNG_EXTRA_VERSION_GIT
3037 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3038 #else
3039 "",
3040 #endif
3041 #ifdef LTTNG_EXTRA_VERSION_NAME
3042 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3043 #else
3044 "");
3045 #endif
3046 }
3047
3048 module_exit(lttng_events_exit);
3049
3050 #include "extra_version/patches.i"
3051 #ifdef LTTNG_EXTRA_VERSION_GIT
3052 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3053 #endif
3054 #ifdef LTTNG_EXTRA_VERSION_NAME
3055 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3056 #endif
3057 MODULE_LICENSE("GPL and additional rights");
3058 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3059 MODULE_DESCRIPTION("LTTng tracer");
3060 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3061 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3062 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3063 LTTNG_MODULES_EXTRAVERSION);
This page took 0.134606 seconds and 4 git commands to generate.