probe kvm x86 mmu: remove compatibility code
[lttng-modules.git] / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/mutex.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/utsname.h>
16 #include <linux/err.h>
17 #include <linux/seq_file.h>
18 #include <linux/file.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/jhash.h>
21 #include <linux/uaccess.h>
22 #include <linux/uuid.h>
23 #include <linux/dmi.h>
24 #include <linux/vmalloc.h>
25 #include <linux/limits.h>
26
27 #include <wrapper/random.h>
28 #include <lttng-kernel-version.h>
29 #include <lttng-events.h>
30 #include <lttng-tracer.h>
31 #include <lttng-abi-old.h>
32 #include <lttng-endian.h>
33 #include <lttng-string-utils.h>
34 #include <lttng-tracepoint.h>
35 #include <wrapper/ringbuffer/backend.h>
36 #include <wrapper/ringbuffer/frontend.h>
37
38 #define METADATA_CACHE_DEFAULT_SIZE 4096
39
40 static LIST_HEAD(sessions);
41 static LIST_HEAD(lttng_transport_list);
42 /*
43 * Protect the sessions and metadata caches.
44 */
45 static DEFINE_MUTEX(sessions_mutex);
46 static struct kmem_cache *event_cache;
47
48 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
49 static void lttng_session_sync_enablers(struct lttng_session *session);
50 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
51
52 static void _lttng_event_destroy(struct lttng_event *event);
53 static void _lttng_channel_destroy(struct lttng_channel *chan);
54 static int _lttng_event_unregister(struct lttng_event *event);
55 static
56 int _lttng_event_metadata_statedump(struct lttng_session *session,
57 struct lttng_channel *chan,
58 struct lttng_event *event);
59 static
60 int _lttng_session_metadata_statedump(struct lttng_session *session);
61 static
62 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
63 static
64 int _lttng_type_statedump(struct lttng_session *session,
65 const struct lttng_type *type,
66 size_t nesting);
67 static
68 int _lttng_field_statedump(struct lttng_session *session,
69 const struct lttng_event_field *field,
70 size_t nesting);
71
72 void synchronize_trace(void)
73 {
74 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
75 synchronize_rcu();
76 #else
77 synchronize_sched();
78 #endif
79
80 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
81 #ifdef CONFIG_PREEMPT_RT_FULL
82 synchronize_rcu();
83 #endif
84 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
85 #ifdef CONFIG_PREEMPT_RT
86 synchronize_rcu();
87 #endif
88 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
89 }
90
91 void lttng_lock_sessions(void)
92 {
93 mutex_lock(&sessions_mutex);
94 }
95
96 void lttng_unlock_sessions(void)
97 {
98 mutex_unlock(&sessions_mutex);
99 }
100
101 /*
102 * Called with sessions lock held.
103 */
104 int lttng_session_active(void)
105 {
106 struct lttng_session *iter;
107
108 list_for_each_entry(iter, &sessions, list) {
109 if (iter->active)
110 return 1;
111 }
112 return 0;
113 }
114
115 struct lttng_session *lttng_session_create(void)
116 {
117 struct lttng_session *session;
118 struct lttng_metadata_cache *metadata_cache;
119 int i;
120
121 mutex_lock(&sessions_mutex);
122 session = kvzalloc_node(sizeof(struct lttng_session), GFP_KERNEL,
123 NUMA_NO_NODE);
124 if (!session)
125 goto err;
126 INIT_LIST_HEAD(&session->chan);
127 INIT_LIST_HEAD(&session->events);
128 guid_gen(&session->uuid);
129
130 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
131 GFP_KERNEL);
132 if (!metadata_cache)
133 goto err_free_session;
134 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
135 if (!metadata_cache->data)
136 goto err_free_cache;
137 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
138 kref_init(&metadata_cache->refcount);
139 mutex_init(&metadata_cache->lock);
140 session->metadata_cache = metadata_cache;
141 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
142 memcpy(&metadata_cache->uuid, &session->uuid,
143 sizeof(metadata_cache->uuid));
144 INIT_LIST_HEAD(&session->enablers_head);
145 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
146 INIT_HLIST_HEAD(&session->events_ht.table[i]);
147 list_add(&session->list, &sessions);
148 session->pid_tracker.session = session;
149 session->pid_tracker.tracker_type = TRACKER_PID;
150 session->vpid_tracker.session = session;
151 session->vpid_tracker.tracker_type = TRACKER_VPID;
152 session->uid_tracker.session = session;
153 session->uid_tracker.tracker_type = TRACKER_UID;
154 session->vuid_tracker.session = session;
155 session->vuid_tracker.tracker_type = TRACKER_VUID;
156 session->gid_tracker.session = session;
157 session->gid_tracker.tracker_type = TRACKER_GID;
158 session->vgid_tracker.session = session;
159 session->vgid_tracker.tracker_type = TRACKER_VGID;
160 mutex_unlock(&sessions_mutex);
161 return session;
162
163 err_free_cache:
164 kfree(metadata_cache);
165 err_free_session:
166 kvfree(session);
167 err:
168 mutex_unlock(&sessions_mutex);
169 return NULL;
170 }
171
172 void metadata_cache_destroy(struct kref *kref)
173 {
174 struct lttng_metadata_cache *cache =
175 container_of(kref, struct lttng_metadata_cache, refcount);
176 vfree(cache->data);
177 kfree(cache);
178 }
179
180 void lttng_session_destroy(struct lttng_session *session)
181 {
182 struct lttng_channel *chan, *tmpchan;
183 struct lttng_event *event, *tmpevent;
184 struct lttng_metadata_stream *metadata_stream;
185 struct lttng_enabler *enabler, *tmpenabler;
186 int ret;
187
188 mutex_lock(&sessions_mutex);
189 WRITE_ONCE(session->active, 0);
190 list_for_each_entry(chan, &session->chan, list) {
191 ret = lttng_syscalls_unregister(chan);
192 WARN_ON(ret);
193 }
194 list_for_each_entry(event, &session->events, list) {
195 ret = _lttng_event_unregister(event);
196 WARN_ON(ret);
197 }
198 synchronize_trace(); /* Wait for in-flight events to complete */
199 list_for_each_entry_safe(enabler, tmpenabler,
200 &session->enablers_head, node)
201 lttng_enabler_destroy(enabler);
202 list_for_each_entry_safe(event, tmpevent, &session->events, list)
203 _lttng_event_destroy(event);
204 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
205 BUG_ON(chan->channel_type == METADATA_CHANNEL);
206 _lttng_channel_destroy(chan);
207 }
208 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
209 _lttng_metadata_channel_hangup(metadata_stream);
210 lttng_id_tracker_destroy(&session->pid_tracker, false);
211 lttng_id_tracker_destroy(&session->vpid_tracker, false);
212 lttng_id_tracker_destroy(&session->uid_tracker, false);
213 lttng_id_tracker_destroy(&session->vuid_tracker, false);
214 lttng_id_tracker_destroy(&session->gid_tracker, false);
215 lttng_id_tracker_destroy(&session->vgid_tracker, false);
216 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
217 list_del(&session->list);
218 mutex_unlock(&sessions_mutex);
219 kvfree(session);
220 }
221
222 int lttng_session_statedump(struct lttng_session *session)
223 {
224 int ret;
225
226 mutex_lock(&sessions_mutex);
227 ret = lttng_statedump_start(session);
228 mutex_unlock(&sessions_mutex);
229 return ret;
230 }
231
232 int lttng_session_enable(struct lttng_session *session)
233 {
234 int ret = 0;
235 struct lttng_channel *chan;
236
237 mutex_lock(&sessions_mutex);
238 if (session->active) {
239 ret = -EBUSY;
240 goto end;
241 }
242
243 /* Set transient enabler state to "enabled" */
244 session->tstate = 1;
245
246 /* We need to sync enablers with session before activation. */
247 lttng_session_sync_enablers(session);
248
249 /*
250 * Snapshot the number of events per channel to know the type of header
251 * we need to use.
252 */
253 list_for_each_entry(chan, &session->chan, list) {
254 if (chan->header_type)
255 continue; /* don't change it if session stop/restart */
256 if (chan->free_event_id < 31)
257 chan->header_type = 1; /* compact */
258 else
259 chan->header_type = 2; /* large */
260 }
261
262 /* Clear each stream's quiescent state. */
263 list_for_each_entry(chan, &session->chan, list) {
264 if (chan->channel_type != METADATA_CHANNEL)
265 lib_ring_buffer_clear_quiescent_channel(chan->chan);
266 }
267
268 WRITE_ONCE(session->active, 1);
269 WRITE_ONCE(session->been_active, 1);
270 ret = _lttng_session_metadata_statedump(session);
271 if (ret) {
272 WRITE_ONCE(session->active, 0);
273 goto end;
274 }
275 ret = lttng_statedump_start(session);
276 if (ret)
277 WRITE_ONCE(session->active, 0);
278 end:
279 mutex_unlock(&sessions_mutex);
280 return ret;
281 }
282
283 int lttng_session_disable(struct lttng_session *session)
284 {
285 int ret = 0;
286 struct lttng_channel *chan;
287
288 mutex_lock(&sessions_mutex);
289 if (!session->active) {
290 ret = -EBUSY;
291 goto end;
292 }
293 WRITE_ONCE(session->active, 0);
294
295 /* Set transient enabler state to "disabled" */
296 session->tstate = 0;
297 lttng_session_sync_enablers(session);
298
299 /* Set each stream's quiescent state. */
300 list_for_each_entry(chan, &session->chan, list) {
301 if (chan->channel_type != METADATA_CHANNEL)
302 lib_ring_buffer_set_quiescent_channel(chan->chan);
303 }
304 end:
305 mutex_unlock(&sessions_mutex);
306 return ret;
307 }
308
309 int lttng_session_metadata_regenerate(struct lttng_session *session)
310 {
311 int ret = 0;
312 struct lttng_channel *chan;
313 struct lttng_event *event;
314 struct lttng_metadata_cache *cache = session->metadata_cache;
315 struct lttng_metadata_stream *stream;
316
317 mutex_lock(&sessions_mutex);
318 if (!session->active) {
319 ret = -EBUSY;
320 goto end;
321 }
322
323 mutex_lock(&cache->lock);
324 memset(cache->data, 0, cache->cache_alloc);
325 cache->metadata_written = 0;
326 cache->version++;
327 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
328 stream->metadata_out = 0;
329 stream->metadata_in = 0;
330 }
331 mutex_unlock(&cache->lock);
332
333 session->metadata_dumped = 0;
334 list_for_each_entry(chan, &session->chan, list) {
335 chan->metadata_dumped = 0;
336 }
337
338 list_for_each_entry(event, &session->events, list) {
339 event->metadata_dumped = 0;
340 }
341
342 ret = _lttng_session_metadata_statedump(session);
343
344 end:
345 mutex_unlock(&sessions_mutex);
346 return ret;
347 }
348
349 int lttng_channel_enable(struct lttng_channel *channel)
350 {
351 int ret = 0;
352
353 mutex_lock(&sessions_mutex);
354 if (channel->channel_type == METADATA_CHANNEL) {
355 ret = -EPERM;
356 goto end;
357 }
358 if (channel->enabled) {
359 ret = -EEXIST;
360 goto end;
361 }
362 /* Set transient enabler state to "enabled" */
363 channel->tstate = 1;
364 lttng_session_sync_enablers(channel->session);
365 /* Set atomically the state to "enabled" */
366 WRITE_ONCE(channel->enabled, 1);
367 end:
368 mutex_unlock(&sessions_mutex);
369 return ret;
370 }
371
372 int lttng_channel_disable(struct lttng_channel *channel)
373 {
374 int ret = 0;
375
376 mutex_lock(&sessions_mutex);
377 if (channel->channel_type == METADATA_CHANNEL) {
378 ret = -EPERM;
379 goto end;
380 }
381 if (!channel->enabled) {
382 ret = -EEXIST;
383 goto end;
384 }
385 /* Set atomically the state to "disabled" */
386 WRITE_ONCE(channel->enabled, 0);
387 /* Set transient enabler state to "enabled" */
388 channel->tstate = 0;
389 lttng_session_sync_enablers(channel->session);
390 end:
391 mutex_unlock(&sessions_mutex);
392 return ret;
393 }
394
395 int lttng_event_enable(struct lttng_event *event)
396 {
397 int ret = 0;
398
399 mutex_lock(&sessions_mutex);
400 if (event->chan->channel_type == METADATA_CHANNEL) {
401 ret = -EPERM;
402 goto end;
403 }
404 if (event->enabled) {
405 ret = -EEXIST;
406 goto end;
407 }
408 switch (event->instrumentation) {
409 case LTTNG_KERNEL_TRACEPOINT:
410 case LTTNG_KERNEL_SYSCALL:
411 ret = -EINVAL;
412 break;
413 case LTTNG_KERNEL_KPROBE:
414 case LTTNG_KERNEL_UPROBE:
415 case LTTNG_KERNEL_NOOP:
416 WRITE_ONCE(event->enabled, 1);
417 break;
418 case LTTNG_KERNEL_KRETPROBE:
419 ret = lttng_kretprobes_event_enable_state(event, 1);
420 break;
421 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
422 default:
423 WARN_ON_ONCE(1);
424 ret = -EINVAL;
425 }
426 end:
427 mutex_unlock(&sessions_mutex);
428 return ret;
429 }
430
431 int lttng_event_disable(struct lttng_event *event)
432 {
433 int ret = 0;
434
435 mutex_lock(&sessions_mutex);
436 if (event->chan->channel_type == METADATA_CHANNEL) {
437 ret = -EPERM;
438 goto end;
439 }
440 if (!event->enabled) {
441 ret = -EEXIST;
442 goto end;
443 }
444 switch (event->instrumentation) {
445 case LTTNG_KERNEL_TRACEPOINT:
446 case LTTNG_KERNEL_SYSCALL:
447 ret = -EINVAL;
448 break;
449 case LTTNG_KERNEL_KPROBE:
450 case LTTNG_KERNEL_UPROBE:
451 case LTTNG_KERNEL_NOOP:
452 WRITE_ONCE(event->enabled, 0);
453 break;
454 case LTTNG_KERNEL_KRETPROBE:
455 ret = lttng_kretprobes_event_enable_state(event, 0);
456 break;
457 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
458 default:
459 WARN_ON_ONCE(1);
460 ret = -EINVAL;
461 }
462 end:
463 mutex_unlock(&sessions_mutex);
464 return ret;
465 }
466
467 static struct lttng_transport *lttng_transport_find(const char *name)
468 {
469 struct lttng_transport *transport;
470
471 list_for_each_entry(transport, &lttng_transport_list, node) {
472 if (!strcmp(transport->name, name))
473 return transport;
474 }
475 return NULL;
476 }
477
478 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
479 const char *transport_name,
480 void *buf_addr,
481 size_t subbuf_size, size_t num_subbuf,
482 unsigned int switch_timer_interval,
483 unsigned int read_timer_interval,
484 enum channel_type channel_type)
485 {
486 struct lttng_channel *chan;
487 struct lttng_transport *transport = NULL;
488
489 mutex_lock(&sessions_mutex);
490 if (session->been_active && channel_type != METADATA_CHANNEL)
491 goto active; /* Refuse to add channel to active session */
492 transport = lttng_transport_find(transport_name);
493 if (!transport) {
494 printk(KERN_WARNING "LTTng transport %s not found\n",
495 transport_name);
496 goto notransport;
497 }
498 if (!try_module_get(transport->owner)) {
499 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
500 goto notransport;
501 }
502 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
503 if (!chan)
504 goto nomem;
505 chan->session = session;
506 chan->id = session->free_chan_id++;
507 chan->ops = &transport->ops;
508 /*
509 * Note: the channel creation op already writes into the packet
510 * headers. Therefore the "chan" information used as input
511 * should be already accessible.
512 */
513 chan->chan = transport->ops.channel_create(transport_name,
514 chan, buf_addr, subbuf_size, num_subbuf,
515 switch_timer_interval, read_timer_interval);
516 if (!chan->chan)
517 goto create_error;
518 chan->tstate = 1;
519 chan->enabled = 1;
520 chan->transport = transport;
521 chan->channel_type = channel_type;
522 list_add(&chan->list, &session->chan);
523 mutex_unlock(&sessions_mutex);
524 return chan;
525
526 create_error:
527 kfree(chan);
528 nomem:
529 if (transport)
530 module_put(transport->owner);
531 notransport:
532 active:
533 mutex_unlock(&sessions_mutex);
534 return NULL;
535 }
536
537 /*
538 * Only used internally at session destruction for per-cpu channels, and
539 * when metadata channel is released.
540 * Needs to be called with sessions mutex held.
541 */
542 static
543 void _lttng_channel_destroy(struct lttng_channel *chan)
544 {
545 chan->ops->channel_destroy(chan->chan);
546 module_put(chan->transport->owner);
547 list_del(&chan->list);
548 lttng_destroy_context(chan->ctx);
549 kfree(chan);
550 }
551
552 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
553 {
554 BUG_ON(chan->channel_type != METADATA_CHANNEL);
555
556 /* Protect the metadata cache with the sessions_mutex. */
557 mutex_lock(&sessions_mutex);
558 _lttng_channel_destroy(chan);
559 mutex_unlock(&sessions_mutex);
560 }
561 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
562
563 static
564 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
565 {
566 stream->finalized = 1;
567 wake_up_interruptible(&stream->read_wait);
568 }
569
570 /*
571 * Supports event creation while tracing session is active.
572 * Needs to be called with sessions mutex held.
573 */
574 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
575 struct lttng_kernel_event *event_param,
576 void *filter,
577 const struct lttng_event_desc *event_desc,
578 enum lttng_kernel_instrumentation itype)
579 {
580 struct lttng_session *session = chan->session;
581 struct lttng_event *event;
582 const char *event_name;
583 struct hlist_head *head;
584 size_t name_len;
585 uint32_t hash;
586 int ret;
587
588 if (chan->free_event_id == -1U) {
589 ret = -EMFILE;
590 goto full;
591 }
592
593 switch (itype) {
594 case LTTNG_KERNEL_TRACEPOINT:
595 event_name = event_desc->name;
596 break;
597 case LTTNG_KERNEL_KPROBE:
598 case LTTNG_KERNEL_UPROBE:
599 case LTTNG_KERNEL_KRETPROBE:
600 case LTTNG_KERNEL_NOOP:
601 case LTTNG_KERNEL_SYSCALL:
602 event_name = event_param->name;
603 break;
604 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
605 default:
606 WARN_ON_ONCE(1);
607 ret = -EINVAL;
608 goto type_error;
609 }
610 name_len = strlen(event_name);
611 hash = jhash(event_name, name_len, 0);
612 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
613 hlist_for_each_entry(event, head, hlist) {
614 WARN_ON_ONCE(!event->desc);
615 if (!strncmp(event->desc->name, event_name,
616 LTTNG_KERNEL_SYM_NAME_LEN - 1)
617 && chan == event->chan) {
618 ret = -EEXIST;
619 goto exist;
620 }
621 }
622
623 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
624 if (!event) {
625 ret = -ENOMEM;
626 goto cache_error;
627 }
628 event->chan = chan;
629 event->filter = filter;
630 event->id = chan->free_event_id++;
631 event->instrumentation = itype;
632 event->evtype = LTTNG_TYPE_EVENT;
633 INIT_LIST_HEAD(&event->bytecode_runtime_head);
634 INIT_LIST_HEAD(&event->enablers_ref_head);
635
636 switch (itype) {
637 case LTTNG_KERNEL_TRACEPOINT:
638 /* Event will be enabled by enabler sync. */
639 event->enabled = 0;
640 event->registered = 0;
641 event->desc = lttng_event_get(event_name);
642 if (!event->desc) {
643 ret = -ENOENT;
644 goto register_error;
645 }
646 /* Populate lttng_event structure before event registration. */
647 smp_wmb();
648 break;
649 case LTTNG_KERNEL_KPROBE:
650 /*
651 * Needs to be explicitly enabled after creation, since
652 * we may want to apply filters.
653 */
654 event->enabled = 0;
655 event->registered = 1;
656 /*
657 * Populate lttng_event structure before event
658 * registration.
659 */
660 smp_wmb();
661 ret = lttng_kprobes_register(event_name,
662 event_param->u.kprobe.symbol_name,
663 event_param->u.kprobe.offset,
664 event_param->u.kprobe.addr,
665 event);
666 if (ret) {
667 ret = -EINVAL;
668 goto register_error;
669 }
670 ret = try_module_get(event->desc->owner);
671 WARN_ON_ONCE(!ret);
672 break;
673 case LTTNG_KERNEL_KRETPROBE:
674 {
675 struct lttng_event *event_return;
676
677 /* kretprobe defines 2 events */
678 /*
679 * Needs to be explicitly enabled after creation, since
680 * we may want to apply filters.
681 */
682 event->enabled = 0;
683 event->registered = 1;
684 event_return =
685 kmem_cache_zalloc(event_cache, GFP_KERNEL);
686 if (!event_return) {
687 ret = -ENOMEM;
688 goto register_error;
689 }
690 event_return->chan = chan;
691 event_return->filter = filter;
692 event_return->id = chan->free_event_id++;
693 event_return->enabled = 0;
694 event_return->registered = 1;
695 event_return->instrumentation = itype;
696 /*
697 * Populate lttng_event structure before kretprobe registration.
698 */
699 smp_wmb();
700 ret = lttng_kretprobes_register(event_name,
701 event_param->u.kretprobe.symbol_name,
702 event_param->u.kretprobe.offset,
703 event_param->u.kretprobe.addr,
704 event, event_return);
705 if (ret) {
706 kmem_cache_free(event_cache, event_return);
707 ret = -EINVAL;
708 goto register_error;
709 }
710 /* Take 2 refs on the module: one per event. */
711 ret = try_module_get(event->desc->owner);
712 WARN_ON_ONCE(!ret);
713 ret = try_module_get(event->desc->owner);
714 WARN_ON_ONCE(!ret);
715 ret = _lttng_event_metadata_statedump(chan->session, chan,
716 event_return);
717 WARN_ON_ONCE(ret > 0);
718 if (ret) {
719 kmem_cache_free(event_cache, event_return);
720 module_put(event->desc->owner);
721 module_put(event->desc->owner);
722 goto statedump_error;
723 }
724 list_add(&event_return->list, &chan->session->events);
725 break;
726 }
727 case LTTNG_KERNEL_NOOP:
728 case LTTNG_KERNEL_SYSCALL:
729 /*
730 * Needs to be explicitly enabled after creation, since
731 * we may want to apply filters.
732 */
733 event->enabled = 0;
734 event->registered = 0;
735 event->desc = event_desc;
736 if (!event->desc) {
737 ret = -EINVAL;
738 goto register_error;
739 }
740 break;
741 case LTTNG_KERNEL_UPROBE:
742 /*
743 * Needs to be explicitly enabled after creation, since
744 * we may want to apply filters.
745 */
746 event->enabled = 0;
747 event->registered = 1;
748
749 /*
750 * Populate lttng_event structure before event
751 * registration.
752 */
753 smp_wmb();
754
755 ret = lttng_uprobes_register(event_param->name,
756 event_param->u.uprobe.fd,
757 event);
758 if (ret)
759 goto register_error;
760 ret = try_module_get(event->desc->owner);
761 WARN_ON_ONCE(!ret);
762 break;
763 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
764 default:
765 WARN_ON_ONCE(1);
766 ret = -EINVAL;
767 goto register_error;
768 }
769 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
770 WARN_ON_ONCE(ret > 0);
771 if (ret) {
772 goto statedump_error;
773 }
774 hlist_add_head(&event->hlist, head);
775 list_add(&event->list, &chan->session->events);
776 return event;
777
778 statedump_error:
779 /* If a statedump error occurs, events will not be readable. */
780 register_error:
781 kmem_cache_free(event_cache, event);
782 cache_error:
783 exist:
784 type_error:
785 full:
786 return ERR_PTR(ret);
787 }
788
789 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
790 struct lttng_kernel_event *event_param,
791 void *filter,
792 const struct lttng_event_desc *event_desc,
793 enum lttng_kernel_instrumentation itype)
794 {
795 struct lttng_event *event;
796
797 mutex_lock(&sessions_mutex);
798 event = _lttng_event_create(chan, event_param, filter, event_desc,
799 itype);
800 mutex_unlock(&sessions_mutex);
801 return event;
802 }
803
804 /* Only used for tracepoints for now. */
805 static
806 void register_event(struct lttng_event *event)
807 {
808 const struct lttng_event_desc *desc;
809 int ret = -EINVAL;
810
811 if (event->registered)
812 return;
813
814 desc = event->desc;
815 switch (event->instrumentation) {
816 case LTTNG_KERNEL_TRACEPOINT:
817 ret = lttng_tracepoint_probe_register(desc->kname,
818 desc->probe_callback,
819 event);
820 break;
821 case LTTNG_KERNEL_SYSCALL:
822 ret = lttng_syscall_filter_enable(event->chan,
823 desc->name);
824 break;
825 case LTTNG_KERNEL_KPROBE:
826 case LTTNG_KERNEL_UPROBE:
827 case LTTNG_KERNEL_KRETPROBE:
828 case LTTNG_KERNEL_NOOP:
829 ret = 0;
830 break;
831 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
832 default:
833 WARN_ON_ONCE(1);
834 }
835 if (!ret)
836 event->registered = 1;
837 }
838
839 /*
840 * Only used internally at session destruction.
841 */
842 int _lttng_event_unregister(struct lttng_event *event)
843 {
844 const struct lttng_event_desc *desc;
845 int ret = -EINVAL;
846
847 if (!event->registered)
848 return 0;
849
850 desc = event->desc;
851 switch (event->instrumentation) {
852 case LTTNG_KERNEL_TRACEPOINT:
853 ret = lttng_tracepoint_probe_unregister(event->desc->kname,
854 event->desc->probe_callback,
855 event);
856 break;
857 case LTTNG_KERNEL_KPROBE:
858 lttng_kprobes_unregister(event);
859 ret = 0;
860 break;
861 case LTTNG_KERNEL_KRETPROBE:
862 lttng_kretprobes_unregister(event);
863 ret = 0;
864 break;
865 case LTTNG_KERNEL_SYSCALL:
866 ret = lttng_syscall_filter_disable(event->chan,
867 desc->name);
868 break;
869 case LTTNG_KERNEL_NOOP:
870 ret = 0;
871 break;
872 case LTTNG_KERNEL_UPROBE:
873 lttng_uprobes_unregister(event);
874 ret = 0;
875 break;
876 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
877 default:
878 WARN_ON_ONCE(1);
879 }
880 if (!ret)
881 event->registered = 0;
882 return ret;
883 }
884
885 /*
886 * Only used internally at session destruction.
887 */
888 static
889 void _lttng_event_destroy(struct lttng_event *event)
890 {
891 switch (event->instrumentation) {
892 case LTTNG_KERNEL_TRACEPOINT:
893 lttng_event_put(event->desc);
894 break;
895 case LTTNG_KERNEL_KPROBE:
896 module_put(event->desc->owner);
897 lttng_kprobes_destroy_private(event);
898 break;
899 case LTTNG_KERNEL_KRETPROBE:
900 module_put(event->desc->owner);
901 lttng_kretprobes_destroy_private(event);
902 break;
903 case LTTNG_KERNEL_NOOP:
904 case LTTNG_KERNEL_SYSCALL:
905 break;
906 case LTTNG_KERNEL_UPROBE:
907 module_put(event->desc->owner);
908 lttng_uprobes_destroy_private(event);
909 break;
910 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
911 default:
912 WARN_ON_ONCE(1);
913 }
914 list_del(&event->list);
915 lttng_destroy_context(event->ctx);
916 kmem_cache_free(event_cache, event);
917 }
918
919 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
920 enum tracker_type tracker_type)
921 {
922 switch (tracker_type) {
923 case TRACKER_PID:
924 return &session->pid_tracker;
925 case TRACKER_VPID:
926 return &session->vpid_tracker;
927 case TRACKER_UID:
928 return &session->uid_tracker;
929 case TRACKER_VUID:
930 return &session->vuid_tracker;
931 case TRACKER_GID:
932 return &session->gid_tracker;
933 case TRACKER_VGID:
934 return &session->vgid_tracker;
935 default:
936 WARN_ON_ONCE(1);
937 return NULL;
938 }
939 }
940
941 int lttng_session_track_id(struct lttng_session *session,
942 enum tracker_type tracker_type, int id)
943 {
944 struct lttng_id_tracker *tracker;
945 int ret;
946
947 tracker = get_tracker(session, tracker_type);
948 if (!tracker)
949 return -EINVAL;
950 if (id < -1)
951 return -EINVAL;
952 mutex_lock(&sessions_mutex);
953 if (id == -1) {
954 /* track all ids: destroy tracker. */
955 lttng_id_tracker_destroy(tracker, true);
956 ret = 0;
957 } else {
958 ret = lttng_id_tracker_add(tracker, id);
959 }
960 mutex_unlock(&sessions_mutex);
961 return ret;
962 }
963
964 int lttng_session_untrack_id(struct lttng_session *session,
965 enum tracker_type tracker_type, int id)
966 {
967 struct lttng_id_tracker *tracker;
968 int ret;
969
970 tracker = get_tracker(session, tracker_type);
971 if (!tracker)
972 return -EINVAL;
973 if (id < -1)
974 return -EINVAL;
975 mutex_lock(&sessions_mutex);
976 if (id == -1) {
977 /* untrack all ids: replace by empty tracker. */
978 ret = lttng_id_tracker_empty_set(tracker);
979 } else {
980 ret = lttng_id_tracker_del(tracker, id);
981 }
982 mutex_unlock(&sessions_mutex);
983 return ret;
984 }
985
986 static
987 void *id_list_start(struct seq_file *m, loff_t *pos)
988 {
989 struct lttng_id_tracker *id_tracker = m->private;
990 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
991 struct lttng_id_hash_node *e;
992 int iter = 0, i;
993
994 mutex_lock(&sessions_mutex);
995 if (id_tracker_p) {
996 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
997 struct hlist_head *head = &id_tracker_p->id_hash[i];
998
999 hlist_for_each_entry(e, head, hlist) {
1000 if (iter++ >= *pos)
1001 return e;
1002 }
1003 }
1004 } else {
1005 /* ID tracker disabled. */
1006 if (iter >= *pos && iter == 0) {
1007 return id_tracker_p; /* empty tracker */
1008 }
1009 iter++;
1010 }
1011 /* End of list */
1012 return NULL;
1013 }
1014
1015 /* Called with sessions_mutex held. */
1016 static
1017 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1018 {
1019 struct lttng_id_tracker *id_tracker = m->private;
1020 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1021 struct lttng_id_hash_node *e;
1022 int iter = 0, i;
1023
1024 (*ppos)++;
1025 if (id_tracker_p) {
1026 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1027 struct hlist_head *head = &id_tracker_p->id_hash[i];
1028
1029 hlist_for_each_entry(e, head, hlist) {
1030 if (iter++ >= *ppos)
1031 return e;
1032 }
1033 }
1034 } else {
1035 /* ID tracker disabled. */
1036 if (iter >= *ppos && iter == 0)
1037 return p; /* empty tracker */
1038 iter++;
1039 }
1040
1041 /* End of list */
1042 return NULL;
1043 }
1044
1045 static
1046 void id_list_stop(struct seq_file *m, void *p)
1047 {
1048 mutex_unlock(&sessions_mutex);
1049 }
1050
1051 static
1052 int id_list_show(struct seq_file *m, void *p)
1053 {
1054 struct lttng_id_tracker *id_tracker = m->private;
1055 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1056 int id;
1057
1058 if (p == id_tracker_p) {
1059 /* Tracker disabled. */
1060 id = -1;
1061 } else {
1062 const struct lttng_id_hash_node *e = p;
1063
1064 id = lttng_id_tracker_get_node_id(e);
1065 }
1066 switch (id_tracker->tracker_type) {
1067 case TRACKER_PID:
1068 seq_printf(m, "process { pid = %d; };\n", id);
1069 break;
1070 case TRACKER_VPID:
1071 seq_printf(m, "process { vpid = %d; };\n", id);
1072 break;
1073 case TRACKER_UID:
1074 seq_printf(m, "user { uid = %d; };\n", id);
1075 break;
1076 case TRACKER_VUID:
1077 seq_printf(m, "user { vuid = %d; };\n", id);
1078 break;
1079 case TRACKER_GID:
1080 seq_printf(m, "group { gid = %d; };\n", id);
1081 break;
1082 case TRACKER_VGID:
1083 seq_printf(m, "group { vgid = %d; };\n", id);
1084 break;
1085 default:
1086 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1087 }
1088 return 0;
1089 }
1090
1091 static
1092 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1093 .start = id_list_start,
1094 .next = id_list_next,
1095 .stop = id_list_stop,
1096 .show = id_list_show,
1097 };
1098
1099 static
1100 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1101 {
1102 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1103 }
1104
1105 static
1106 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1107 {
1108 struct seq_file *m = file->private_data;
1109 struct lttng_id_tracker *id_tracker = m->private;
1110 int ret;
1111
1112 WARN_ON_ONCE(!id_tracker);
1113 ret = seq_release(inode, file);
1114 if (!ret)
1115 fput(id_tracker->session->file);
1116 return ret;
1117 }
1118
1119 const struct file_operations lttng_tracker_ids_list_fops = {
1120 .owner = THIS_MODULE,
1121 .open = lttng_tracker_ids_list_open,
1122 .read = seq_read,
1123 .llseek = seq_lseek,
1124 .release = lttng_tracker_ids_list_release,
1125 };
1126
1127 int lttng_session_list_tracker_ids(struct lttng_session *session,
1128 enum tracker_type tracker_type)
1129 {
1130 struct file *tracker_ids_list_file;
1131 struct seq_file *m;
1132 int file_fd, ret;
1133
1134 file_fd = get_unused_fd_flags(0);
1135 if (file_fd < 0) {
1136 ret = file_fd;
1137 goto fd_error;
1138 }
1139
1140 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1141 &lttng_tracker_ids_list_fops,
1142 NULL, O_RDWR);
1143 if (IS_ERR(tracker_ids_list_file)) {
1144 ret = PTR_ERR(tracker_ids_list_file);
1145 goto file_error;
1146 }
1147 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1148 ret = -EOVERFLOW;
1149 goto refcount_error;
1150 }
1151 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1152 if (ret < 0)
1153 goto open_error;
1154 m = tracker_ids_list_file->private_data;
1155
1156 m->private = get_tracker(session, tracker_type);
1157 BUG_ON(!m->private);
1158 fd_install(file_fd, tracker_ids_list_file);
1159
1160 return file_fd;
1161
1162 open_error:
1163 atomic_long_dec(&session->file->f_count);
1164 refcount_error:
1165 fput(tracker_ids_list_file);
1166 file_error:
1167 put_unused_fd(file_fd);
1168 fd_error:
1169 return ret;
1170 }
1171
1172 /*
1173 * Enabler management.
1174 */
1175 static
1176 int lttng_match_enabler_star_glob(const char *desc_name,
1177 const char *pattern)
1178 {
1179 if (!strutils_star_glob_match(pattern, SIZE_MAX,
1180 desc_name, SIZE_MAX))
1181 return 0;
1182 return 1;
1183 }
1184
1185 static
1186 int lttng_match_enabler_name(const char *desc_name,
1187 const char *name)
1188 {
1189 if (strcmp(desc_name, name))
1190 return 0;
1191 return 1;
1192 }
1193
1194 static
1195 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1196 struct lttng_enabler *enabler)
1197 {
1198 const char *desc_name, *enabler_name;
1199
1200 enabler_name = enabler->event_param.name;
1201 switch (enabler->event_param.instrumentation) {
1202 case LTTNG_KERNEL_TRACEPOINT:
1203 desc_name = desc->name;
1204 break;
1205 case LTTNG_KERNEL_SYSCALL:
1206 desc_name = desc->name;
1207 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1208 desc_name += strlen("compat_");
1209 if (!strncmp(desc_name, "syscall_exit_",
1210 strlen("syscall_exit_"))) {
1211 desc_name += strlen("syscall_exit_");
1212 } else if (!strncmp(desc_name, "syscall_entry_",
1213 strlen("syscall_entry_"))) {
1214 desc_name += strlen("syscall_entry_");
1215 } else {
1216 WARN_ON_ONCE(1);
1217 return -EINVAL;
1218 }
1219 break;
1220 default:
1221 WARN_ON_ONCE(1);
1222 return -EINVAL;
1223 }
1224 switch (enabler->type) {
1225 case LTTNG_ENABLER_STAR_GLOB:
1226 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1227 case LTTNG_ENABLER_NAME:
1228 return lttng_match_enabler_name(desc_name, enabler_name);
1229 default:
1230 return -EINVAL;
1231 }
1232 }
1233
1234 static
1235 int lttng_event_match_enabler(struct lttng_event *event,
1236 struct lttng_enabler *enabler)
1237 {
1238 if (enabler->event_param.instrumentation != event->instrumentation)
1239 return 0;
1240 if (lttng_desc_match_enabler(event->desc, enabler)
1241 && event->chan == enabler->chan)
1242 return 1;
1243 else
1244 return 0;
1245 }
1246
1247 static
1248 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1249 struct lttng_enabler *enabler)
1250 {
1251 struct lttng_enabler_ref *enabler_ref;
1252
1253 list_for_each_entry(enabler_ref,
1254 &event->enablers_ref_head, node) {
1255 if (enabler_ref->ref == enabler)
1256 return enabler_ref;
1257 }
1258 return NULL;
1259 }
1260
1261 static
1262 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1263 {
1264 struct lttng_session *session = enabler->chan->session;
1265 struct lttng_probe_desc *probe_desc;
1266 const struct lttng_event_desc *desc;
1267 int i;
1268 struct list_head *probe_list;
1269
1270 probe_list = lttng_get_probe_list_head();
1271 /*
1272 * For each probe event, if we find that a probe event matches
1273 * our enabler, create an associated lttng_event if not
1274 * already present.
1275 */
1276 list_for_each_entry(probe_desc, probe_list, head) {
1277 for (i = 0; i < probe_desc->nr_events; i++) {
1278 int found = 0;
1279 struct hlist_head *head;
1280 const char *event_name;
1281 size_t name_len;
1282 uint32_t hash;
1283 struct lttng_event *event;
1284
1285 desc = probe_desc->event_desc[i];
1286 if (!lttng_desc_match_enabler(desc, enabler))
1287 continue;
1288 event_name = desc->name;
1289 name_len = strlen(event_name);
1290
1291 /*
1292 * Check if already created.
1293 */
1294 hash = jhash(event_name, name_len, 0);
1295 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1296 hlist_for_each_entry(event, head, hlist) {
1297 if (event->desc == desc
1298 && event->chan == enabler->chan)
1299 found = 1;
1300 }
1301 if (found)
1302 continue;
1303
1304 /*
1305 * We need to create an event for this
1306 * event probe.
1307 */
1308 event = _lttng_event_create(enabler->chan,
1309 NULL, NULL, desc,
1310 LTTNG_KERNEL_TRACEPOINT);
1311 if (!event) {
1312 printk(KERN_INFO "Unable to create event %s\n",
1313 probe_desc->event_desc[i]->name);
1314 }
1315 }
1316 }
1317 }
1318
1319 static
1320 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1321 {
1322 int ret;
1323
1324 ret = lttng_syscalls_register(enabler->chan, NULL);
1325 WARN_ON_ONCE(ret);
1326 }
1327
1328 /*
1329 * Create struct lttng_event if it is missing and present in the list of
1330 * tracepoint probes.
1331 * Should be called with sessions mutex held.
1332 */
1333 static
1334 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1335 {
1336 switch (enabler->event_param.instrumentation) {
1337 case LTTNG_KERNEL_TRACEPOINT:
1338 lttng_create_tracepoint_if_missing(enabler);
1339 break;
1340 case LTTNG_KERNEL_SYSCALL:
1341 lttng_create_syscall_if_missing(enabler);
1342 break;
1343 default:
1344 WARN_ON_ONCE(1);
1345 break;
1346 }
1347 }
1348
1349 /*
1350 * Create events associated with an enabler (if not already present),
1351 * and add backward reference from the event to the enabler.
1352 * Should be called with sessions mutex held.
1353 */
1354 static
1355 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1356 {
1357 struct lttng_session *session = enabler->chan->session;
1358 struct lttng_event *event;
1359
1360 /* First ensure that probe events are created for this enabler. */
1361 lttng_create_event_if_missing(enabler);
1362
1363 /* For each event matching enabler in session event list. */
1364 list_for_each_entry(event, &session->events, list) {
1365 struct lttng_enabler_ref *enabler_ref;
1366
1367 if (!lttng_event_match_enabler(event, enabler))
1368 continue;
1369 enabler_ref = lttng_event_enabler_ref(event, enabler);
1370 if (!enabler_ref) {
1371 /*
1372 * If no backward ref, create it.
1373 * Add backward ref from event to enabler.
1374 */
1375 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1376 if (!enabler_ref)
1377 return -ENOMEM;
1378 enabler_ref->ref = enabler;
1379 list_add(&enabler_ref->node,
1380 &event->enablers_ref_head);
1381 }
1382
1383 /*
1384 * Link filter bytecodes if not linked yet.
1385 */
1386 lttng_enabler_event_link_bytecode(event, enabler);
1387
1388 /* TODO: merge event context. */
1389 }
1390 return 0;
1391 }
1392
1393 /*
1394 * Called at module load: connect the probe on all enablers matching
1395 * this event.
1396 * Called with sessions lock held.
1397 */
1398 int lttng_fix_pending_events(void)
1399 {
1400 struct lttng_session *session;
1401
1402 list_for_each_entry(session, &sessions, list)
1403 lttng_session_lazy_sync_enablers(session);
1404 return 0;
1405 }
1406
1407 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1408 struct lttng_kernel_event *event_param,
1409 struct lttng_channel *chan)
1410 {
1411 struct lttng_enabler *enabler;
1412
1413 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1414 if (!enabler)
1415 return NULL;
1416 enabler->type = type;
1417 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1418 memcpy(&enabler->event_param, event_param,
1419 sizeof(enabler->event_param));
1420 enabler->chan = chan;
1421 /* ctx left NULL */
1422 enabler->enabled = 0;
1423 enabler->evtype = LTTNG_TYPE_ENABLER;
1424 mutex_lock(&sessions_mutex);
1425 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1426 lttng_session_lazy_sync_enablers(enabler->chan->session);
1427 mutex_unlock(&sessions_mutex);
1428 return enabler;
1429 }
1430
1431 int lttng_enabler_enable(struct lttng_enabler *enabler)
1432 {
1433 mutex_lock(&sessions_mutex);
1434 enabler->enabled = 1;
1435 lttng_session_lazy_sync_enablers(enabler->chan->session);
1436 mutex_unlock(&sessions_mutex);
1437 return 0;
1438 }
1439
1440 int lttng_enabler_disable(struct lttng_enabler *enabler)
1441 {
1442 mutex_lock(&sessions_mutex);
1443 enabler->enabled = 0;
1444 lttng_session_lazy_sync_enablers(enabler->chan->session);
1445 mutex_unlock(&sessions_mutex);
1446 return 0;
1447 }
1448
1449 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1450 struct lttng_kernel_filter_bytecode __user *bytecode)
1451 {
1452 struct lttng_filter_bytecode_node *bytecode_node;
1453 uint32_t bytecode_len;
1454 int ret;
1455
1456 ret = get_user(bytecode_len, &bytecode->len);
1457 if (ret)
1458 return ret;
1459 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1460 GFP_KERNEL);
1461 if (!bytecode_node)
1462 return -ENOMEM;
1463 ret = copy_from_user(&bytecode_node->bc, bytecode,
1464 sizeof(*bytecode) + bytecode_len);
1465 if (ret)
1466 goto error_free;
1467 bytecode_node->enabler = enabler;
1468 /* Enforce length based on allocated size */
1469 bytecode_node->bc.len = bytecode_len;
1470 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1471 lttng_session_lazy_sync_enablers(enabler->chan->session);
1472 return 0;
1473
1474 error_free:
1475 kfree(bytecode_node);
1476 return ret;
1477 }
1478
1479 int lttng_event_add_callsite(struct lttng_event *event,
1480 struct lttng_kernel_event_callsite __user *callsite)
1481 {
1482
1483 switch (event->instrumentation) {
1484 case LTTNG_KERNEL_UPROBE:
1485 return lttng_uprobes_add_callsite(event, callsite);
1486 default:
1487 return -EINVAL;
1488 }
1489 }
1490
1491 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1492 struct lttng_kernel_context *context_param)
1493 {
1494 return -ENOSYS;
1495 }
1496
1497 static
1498 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1499 {
1500 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1501
1502 /* Destroy filter bytecode */
1503 list_for_each_entry_safe(filter_node, tmp_filter_node,
1504 &enabler->filter_bytecode_head, node) {
1505 kfree(filter_node);
1506 }
1507
1508 /* Destroy contexts */
1509 lttng_destroy_context(enabler->ctx);
1510
1511 list_del(&enabler->node);
1512 kfree(enabler);
1513 }
1514
1515 /*
1516 * lttng_session_sync_enablers should be called just before starting a
1517 * session.
1518 * Should be called with sessions mutex held.
1519 */
1520 static
1521 void lttng_session_sync_enablers(struct lttng_session *session)
1522 {
1523 struct lttng_enabler *enabler;
1524 struct lttng_event *event;
1525
1526 list_for_each_entry(enabler, &session->enablers_head, node)
1527 lttng_enabler_ref_events(enabler);
1528 /*
1529 * For each event, if at least one of its enablers is enabled,
1530 * and its channel and session transient states are enabled, we
1531 * enable the event, else we disable it.
1532 */
1533 list_for_each_entry(event, &session->events, list) {
1534 struct lttng_enabler_ref *enabler_ref;
1535 struct lttng_bytecode_runtime *runtime;
1536 int enabled = 0, has_enablers_without_bytecode = 0;
1537
1538 switch (event->instrumentation) {
1539 case LTTNG_KERNEL_TRACEPOINT:
1540 case LTTNG_KERNEL_SYSCALL:
1541 /* Enable events */
1542 list_for_each_entry(enabler_ref,
1543 &event->enablers_ref_head, node) {
1544 if (enabler_ref->ref->enabled) {
1545 enabled = 1;
1546 break;
1547 }
1548 }
1549 break;
1550 default:
1551 /* Not handled with lazy sync. */
1552 continue;
1553 }
1554 /*
1555 * Enabled state is based on union of enablers, with
1556 * intesection of session and channel transient enable
1557 * states.
1558 */
1559 enabled = enabled && session->tstate && event->chan->tstate;
1560
1561 WRITE_ONCE(event->enabled, enabled);
1562 /*
1563 * Sync tracepoint registration with event enabled
1564 * state.
1565 */
1566 if (enabled) {
1567 register_event(event);
1568 } else {
1569 _lttng_event_unregister(event);
1570 }
1571
1572 /* Check if has enablers without bytecode enabled */
1573 list_for_each_entry(enabler_ref,
1574 &event->enablers_ref_head, node) {
1575 if (enabler_ref->ref->enabled
1576 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1577 has_enablers_without_bytecode = 1;
1578 break;
1579 }
1580 }
1581 event->has_enablers_without_bytecode =
1582 has_enablers_without_bytecode;
1583
1584 /* Enable filters */
1585 list_for_each_entry(runtime,
1586 &event->bytecode_runtime_head, node)
1587 lttng_filter_sync_state(runtime);
1588 }
1589 }
1590
1591 /*
1592 * Apply enablers to session events, adding events to session if need
1593 * be. It is required after each modification applied to an active
1594 * session, and right before session "start".
1595 * "lazy" sync means we only sync if required.
1596 * Should be called with sessions mutex held.
1597 */
1598 static
1599 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1600 {
1601 /* We can skip if session is not active */
1602 if (!session->active)
1603 return;
1604 lttng_session_sync_enablers(session);
1605 }
1606
1607 /*
1608 * Serialize at most one packet worth of metadata into a metadata
1609 * channel.
1610 * We grab the metadata cache mutex to get exclusive access to our metadata
1611 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1612 * allows us to do racy operations such as looking for remaining space left in
1613 * packet and write, since mutual exclusion protects us from concurrent writes.
1614 * Mutual exclusion on the metadata cache allow us to read the cache content
1615 * without racing against reallocation of the cache by updates.
1616 * Returns the number of bytes written in the channel, 0 if no data
1617 * was written and a negative value on error.
1618 */
1619 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1620 struct channel *chan)
1621 {
1622 struct lib_ring_buffer_ctx ctx;
1623 int ret = 0;
1624 size_t len, reserve_len;
1625
1626 /*
1627 * Ensure we support mutiple get_next / put sequences followed by
1628 * put_next. The metadata cache lock protects reading the metadata
1629 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1630 * "flush" operations on the buffer invoked by different processes.
1631 * Moreover, since the metadata cache memory can be reallocated, we
1632 * need to have exclusive access against updates even though we only
1633 * read it.
1634 */
1635 mutex_lock(&stream->metadata_cache->lock);
1636 WARN_ON(stream->metadata_in < stream->metadata_out);
1637 if (stream->metadata_in != stream->metadata_out)
1638 goto end;
1639
1640 /* Metadata regenerated, change the version. */
1641 if (stream->metadata_cache->version != stream->version)
1642 stream->version = stream->metadata_cache->version;
1643
1644 len = stream->metadata_cache->metadata_written -
1645 stream->metadata_in;
1646 if (!len)
1647 goto end;
1648 reserve_len = min_t(size_t,
1649 stream->transport->ops.packet_avail_size(chan),
1650 len);
1651 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1652 sizeof(char), -1);
1653 /*
1654 * If reservation failed, return an error to the caller.
1655 */
1656 ret = stream->transport->ops.event_reserve(&ctx, 0);
1657 if (ret != 0) {
1658 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1659 goto end;
1660 }
1661 stream->transport->ops.event_write(&ctx,
1662 stream->metadata_cache->data + stream->metadata_in,
1663 reserve_len);
1664 stream->transport->ops.event_commit(&ctx);
1665 stream->metadata_in += reserve_len;
1666 ret = reserve_len;
1667
1668 end:
1669 mutex_unlock(&stream->metadata_cache->lock);
1670 return ret;
1671 }
1672
1673 /*
1674 * Write the metadata to the metadata cache.
1675 * Must be called with sessions_mutex held.
1676 * The metadata cache lock protects us from concurrent read access from
1677 * thread outputting metadata content to ring buffer.
1678 */
1679 int lttng_metadata_printf(struct lttng_session *session,
1680 const char *fmt, ...)
1681 {
1682 char *str;
1683 size_t len;
1684 va_list ap;
1685 struct lttng_metadata_stream *stream;
1686
1687 WARN_ON_ONCE(!READ_ONCE(session->active));
1688
1689 va_start(ap, fmt);
1690 str = kvasprintf(GFP_KERNEL, fmt, ap);
1691 va_end(ap);
1692 if (!str)
1693 return -ENOMEM;
1694
1695 len = strlen(str);
1696 mutex_lock(&session->metadata_cache->lock);
1697 if (session->metadata_cache->metadata_written + len >
1698 session->metadata_cache->cache_alloc) {
1699 char *tmp_cache_realloc;
1700 unsigned int tmp_cache_alloc_size;
1701
1702 tmp_cache_alloc_size = max_t(unsigned int,
1703 session->metadata_cache->cache_alloc + len,
1704 session->metadata_cache->cache_alloc << 1);
1705 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
1706 if (!tmp_cache_realloc)
1707 goto err;
1708 if (session->metadata_cache->data) {
1709 memcpy(tmp_cache_realloc,
1710 session->metadata_cache->data,
1711 session->metadata_cache->cache_alloc);
1712 vfree(session->metadata_cache->data);
1713 }
1714
1715 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1716 session->metadata_cache->data = tmp_cache_realloc;
1717 }
1718 memcpy(session->metadata_cache->data +
1719 session->metadata_cache->metadata_written,
1720 str, len);
1721 session->metadata_cache->metadata_written += len;
1722 mutex_unlock(&session->metadata_cache->lock);
1723 kfree(str);
1724
1725 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1726 wake_up_interruptible(&stream->read_wait);
1727
1728 return 0;
1729
1730 err:
1731 mutex_unlock(&session->metadata_cache->lock);
1732 kfree(str);
1733 return -ENOMEM;
1734 }
1735
1736 static
1737 int print_tabs(struct lttng_session *session, size_t nesting)
1738 {
1739 size_t i;
1740
1741 for (i = 0; i < nesting; i++) {
1742 int ret;
1743
1744 ret = lttng_metadata_printf(session, " ");
1745 if (ret) {
1746 return ret;
1747 }
1748 }
1749 return 0;
1750 }
1751
1752 static
1753 int lttng_field_name_statedump(struct lttng_session *session,
1754 const struct lttng_event_field *field,
1755 size_t nesting)
1756 {
1757 return lttng_metadata_printf(session, " _%s;\n", field->name);
1758 }
1759
1760 static
1761 int _lttng_integer_type_statedump(struct lttng_session *session,
1762 const struct lttng_type *type,
1763 size_t nesting)
1764 {
1765 int ret;
1766
1767 WARN_ON_ONCE(type->atype != atype_integer);
1768 ret = print_tabs(session, nesting);
1769 if (ret)
1770 return ret;
1771 ret = lttng_metadata_printf(session,
1772 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
1773 type->u.integer.size,
1774 type->u.integer.alignment,
1775 type->u.integer.signedness,
1776 (type->u.integer.encoding == lttng_encode_none)
1777 ? "none"
1778 : (type->u.integer.encoding == lttng_encode_UTF8)
1779 ? "UTF8"
1780 : "ASCII",
1781 type->u.integer.base,
1782 #if __BYTE_ORDER == __BIG_ENDIAN
1783 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
1784 #else
1785 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
1786 #endif
1787 );
1788 return ret;
1789 }
1790
1791 /*
1792 * Must be called with sessions_mutex held.
1793 */
1794 static
1795 int _lttng_struct_type_statedump(struct lttng_session *session,
1796 const struct lttng_type *type,
1797 size_t nesting)
1798 {
1799 int ret;
1800 uint32_t i, nr_fields;
1801 unsigned int alignment;
1802
1803 WARN_ON_ONCE(type->atype != atype_struct_nestable);
1804
1805 ret = print_tabs(session, nesting);
1806 if (ret)
1807 return ret;
1808 ret = lttng_metadata_printf(session,
1809 "struct {\n");
1810 if (ret)
1811 return ret;
1812 nr_fields = type->u.struct_nestable.nr_fields;
1813 for (i = 0; i < nr_fields; i++) {
1814 const struct lttng_event_field *iter_field;
1815
1816 iter_field = &type->u.struct_nestable.fields[i];
1817 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1818 if (ret)
1819 return ret;
1820 }
1821 ret = print_tabs(session, nesting);
1822 if (ret)
1823 return ret;
1824 alignment = type->u.struct_nestable.alignment;
1825 if (alignment) {
1826 ret = lttng_metadata_printf(session,
1827 "} align(%u)",
1828 alignment);
1829 } else {
1830 ret = lttng_metadata_printf(session,
1831 "}");
1832 }
1833 return ret;
1834 }
1835
1836 /*
1837 * Must be called with sessions_mutex held.
1838 */
1839 static
1840 int _lttng_struct_field_statedump(struct lttng_session *session,
1841 const struct lttng_event_field *field,
1842 size_t nesting)
1843 {
1844 int ret;
1845
1846 ret = _lttng_struct_type_statedump(session,
1847 &field->type, nesting);
1848 if (ret)
1849 return ret;
1850 return lttng_field_name_statedump(session, field, nesting);
1851 }
1852
1853 /*
1854 * Must be called with sessions_mutex held.
1855 */
1856 static
1857 int _lttng_variant_type_statedump(struct lttng_session *session,
1858 const struct lttng_type *type,
1859 size_t nesting)
1860 {
1861 int ret;
1862 uint32_t i, nr_choices;
1863
1864 WARN_ON_ONCE(type->atype != atype_variant_nestable);
1865 /*
1866 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
1867 */
1868 if (type->u.variant_nestable.alignment != 0)
1869 return -EINVAL;
1870 ret = print_tabs(session, nesting);
1871 if (ret)
1872 return ret;
1873 ret = lttng_metadata_printf(session,
1874 "variant <_%s> {\n",
1875 type->u.variant_nestable.tag_name);
1876 if (ret)
1877 return ret;
1878 nr_choices = type->u.variant_nestable.nr_choices;
1879 for (i = 0; i < nr_choices; i++) {
1880 const struct lttng_event_field *iter_field;
1881
1882 iter_field = &type->u.variant_nestable.choices[i];
1883 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1884 if (ret)
1885 return ret;
1886 }
1887 ret = print_tabs(session, nesting);
1888 if (ret)
1889 return ret;
1890 ret = lttng_metadata_printf(session,
1891 "}");
1892 return ret;
1893 }
1894
1895 /*
1896 * Must be called with sessions_mutex held.
1897 */
1898 static
1899 int _lttng_variant_field_statedump(struct lttng_session *session,
1900 const struct lttng_event_field *field,
1901 size_t nesting)
1902 {
1903 int ret;
1904
1905 ret = _lttng_variant_type_statedump(session,
1906 &field->type, nesting);
1907 if (ret)
1908 return ret;
1909 return lttng_field_name_statedump(session, field, nesting);
1910 }
1911
1912 /*
1913 * Must be called with sessions_mutex held.
1914 */
1915 static
1916 int _lttng_array_field_statedump(struct lttng_session *session,
1917 const struct lttng_event_field *field,
1918 size_t nesting)
1919 {
1920 int ret;
1921 const struct lttng_type *elem_type;
1922
1923 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
1924
1925 if (field->type.u.array_nestable.alignment) {
1926 ret = print_tabs(session, nesting);
1927 if (ret)
1928 return ret;
1929 ret = lttng_metadata_printf(session,
1930 "struct { } align(%u) _%s_padding;\n",
1931 field->type.u.array_nestable.alignment * CHAR_BIT,
1932 field->name);
1933 if (ret)
1934 return ret;
1935 }
1936 /*
1937 * Nested compound types: Only array of structures and variants are
1938 * currently supported.
1939 */
1940 elem_type = field->type.u.array_nestable.elem_type;
1941 switch (elem_type->atype) {
1942 case atype_integer:
1943 case atype_struct_nestable:
1944 case atype_variant_nestable:
1945 ret = _lttng_type_statedump(session, elem_type, nesting);
1946 if (ret)
1947 return ret;
1948 break;
1949
1950 default:
1951 return -EINVAL;
1952 }
1953 ret = lttng_metadata_printf(session,
1954 " _%s[%u];\n",
1955 field->name,
1956 field->type.u.array_nestable.length);
1957 return ret;
1958 }
1959
1960 /*
1961 * Must be called with sessions_mutex held.
1962 */
1963 static
1964 int _lttng_sequence_field_statedump(struct lttng_session *session,
1965 const struct lttng_event_field *field,
1966 size_t nesting)
1967 {
1968 int ret;
1969 const char *length_name;
1970 const struct lttng_type *elem_type;
1971
1972 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
1973
1974 length_name = field->type.u.sequence_nestable.length_name;
1975
1976 if (field->type.u.sequence_nestable.alignment) {
1977 ret = print_tabs(session, nesting);
1978 if (ret)
1979 return ret;
1980 ret = lttng_metadata_printf(session,
1981 "struct { } align(%u) _%s_padding;\n",
1982 field->type.u.sequence_nestable.alignment * CHAR_BIT,
1983 field->name);
1984 if (ret)
1985 return ret;
1986 }
1987
1988 /*
1989 * Nested compound types: Only array of structures and variants are
1990 * currently supported.
1991 */
1992 elem_type = field->type.u.sequence_nestable.elem_type;
1993 switch (elem_type->atype) {
1994 case atype_integer:
1995 case atype_struct_nestable:
1996 case atype_variant_nestable:
1997 ret = _lttng_type_statedump(session, elem_type, nesting);
1998 if (ret)
1999 return ret;
2000 break;
2001
2002 default:
2003 return -EINVAL;
2004 }
2005 ret = lttng_metadata_printf(session,
2006 " _%s[ _%s ];\n",
2007 field->name,
2008 field->type.u.sequence_nestable.length_name);
2009 return ret;
2010 }
2011
2012 /*
2013 * Must be called with sessions_mutex held.
2014 */
2015 static
2016 int _lttng_enum_type_statedump(struct lttng_session *session,
2017 const struct lttng_type *type,
2018 size_t nesting)
2019 {
2020 const struct lttng_enum_desc *enum_desc;
2021 const struct lttng_type *container_type;
2022 int ret;
2023 unsigned int i, nr_entries;
2024
2025 container_type = type->u.enum_nestable.container_type;
2026 if (container_type->atype != atype_integer) {
2027 ret = -EINVAL;
2028 goto end;
2029 }
2030 enum_desc = type->u.enum_nestable.desc;
2031 nr_entries = enum_desc->nr_entries;
2032
2033 ret = print_tabs(session, nesting);
2034 if (ret)
2035 goto end;
2036 ret = lttng_metadata_printf(session, "enum : ");
2037 if (ret)
2038 goto end;
2039 ret = _lttng_integer_type_statedump(session, container_type, 0);
2040 if (ret)
2041 goto end;
2042 ret = lttng_metadata_printf(session, " {\n");
2043 if (ret)
2044 goto end;
2045 /* Dump all entries */
2046 for (i = 0; i < nr_entries; i++) {
2047 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2048 int j, len;
2049
2050 ret = print_tabs(session, nesting + 1);
2051 if (ret)
2052 goto end;
2053 ret = lttng_metadata_printf(session,
2054 "\"");
2055 if (ret)
2056 goto end;
2057 len = strlen(entry->string);
2058 /* Escape the character '"' */
2059 for (j = 0; j < len; j++) {
2060 char c = entry->string[j];
2061
2062 switch (c) {
2063 case '"':
2064 ret = lttng_metadata_printf(session,
2065 "\\\"");
2066 break;
2067 case '\\':
2068 ret = lttng_metadata_printf(session,
2069 "\\\\");
2070 break;
2071 default:
2072 ret = lttng_metadata_printf(session,
2073 "%c", c);
2074 break;
2075 }
2076 if (ret)
2077 goto end;
2078 }
2079 ret = lttng_metadata_printf(session, "\"");
2080 if (ret)
2081 goto end;
2082
2083 if (entry->options.is_auto) {
2084 ret = lttng_metadata_printf(session, ",\n");
2085 if (ret)
2086 goto end;
2087 } else {
2088 ret = lttng_metadata_printf(session,
2089 " = ");
2090 if (ret)
2091 goto end;
2092 if (entry->start.signedness)
2093 ret = lttng_metadata_printf(session,
2094 "%lld", (long long) entry->start.value);
2095 else
2096 ret = lttng_metadata_printf(session,
2097 "%llu", entry->start.value);
2098 if (ret)
2099 goto end;
2100 if (entry->start.signedness == entry->end.signedness &&
2101 entry->start.value
2102 == entry->end.value) {
2103 ret = lttng_metadata_printf(session,
2104 ",\n");
2105 } else {
2106 if (entry->end.signedness) {
2107 ret = lttng_metadata_printf(session,
2108 " ... %lld,\n",
2109 (long long) entry->end.value);
2110 } else {
2111 ret = lttng_metadata_printf(session,
2112 " ... %llu,\n",
2113 entry->end.value);
2114 }
2115 }
2116 if (ret)
2117 goto end;
2118 }
2119 }
2120 ret = print_tabs(session, nesting);
2121 if (ret)
2122 goto end;
2123 ret = lttng_metadata_printf(session, "}");
2124 end:
2125 return ret;
2126 }
2127
2128 /*
2129 * Must be called with sessions_mutex held.
2130 */
2131 static
2132 int _lttng_enum_field_statedump(struct lttng_session *session,
2133 const struct lttng_event_field *field,
2134 size_t nesting)
2135 {
2136 int ret;
2137
2138 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
2139 if (ret)
2140 return ret;
2141 return lttng_field_name_statedump(session, field, nesting);
2142 }
2143
2144 static
2145 int _lttng_integer_field_statedump(struct lttng_session *session,
2146 const struct lttng_event_field *field,
2147 size_t nesting)
2148 {
2149 int ret;
2150
2151 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
2152 if (ret)
2153 return ret;
2154 return lttng_field_name_statedump(session, field, nesting);
2155 }
2156
2157 static
2158 int _lttng_string_type_statedump(struct lttng_session *session,
2159 const struct lttng_type *type,
2160 size_t nesting)
2161 {
2162 int ret;
2163
2164 WARN_ON_ONCE(type->atype != atype_string);
2165 /* Default encoding is UTF8 */
2166 ret = print_tabs(session, nesting);
2167 if (ret)
2168 return ret;
2169 ret = lttng_metadata_printf(session,
2170 "string%s",
2171 type->u.string.encoding == lttng_encode_ASCII ?
2172 " { encoding = ASCII; }" : "");
2173 return ret;
2174 }
2175
2176 static
2177 int _lttng_string_field_statedump(struct lttng_session *session,
2178 const struct lttng_event_field *field,
2179 size_t nesting)
2180 {
2181 int ret;
2182
2183 WARN_ON_ONCE(field->type.atype != atype_string);
2184 ret = _lttng_string_type_statedump(session, &field->type, nesting);
2185 if (ret)
2186 return ret;
2187 return lttng_field_name_statedump(session, field, nesting);
2188 }
2189
2190 /*
2191 * Must be called with sessions_mutex held.
2192 */
2193 static
2194 int _lttng_type_statedump(struct lttng_session *session,
2195 const struct lttng_type *type,
2196 size_t nesting)
2197 {
2198 int ret = 0;
2199
2200 switch (type->atype) {
2201 case atype_integer:
2202 ret = _lttng_integer_type_statedump(session, type, nesting);
2203 break;
2204 case atype_enum_nestable:
2205 ret = _lttng_enum_type_statedump(session, type, nesting);
2206 break;
2207 case atype_string:
2208 ret = _lttng_string_type_statedump(session, type, nesting);
2209 break;
2210 case atype_struct_nestable:
2211 ret = _lttng_struct_type_statedump(session, type, nesting);
2212 break;
2213 case atype_variant_nestable:
2214 ret = _lttng_variant_type_statedump(session, type, nesting);
2215 break;
2216
2217 /* Nested arrays and sequences are not supported yet. */
2218 case atype_array_nestable:
2219 case atype_sequence_nestable:
2220 default:
2221 WARN_ON_ONCE(1);
2222 return -EINVAL;
2223 }
2224 return ret;
2225 }
2226
2227 /*
2228 * Must be called with sessions_mutex held.
2229 */
2230 static
2231 int _lttng_field_statedump(struct lttng_session *session,
2232 const struct lttng_event_field *field,
2233 size_t nesting)
2234 {
2235 int ret = 0;
2236
2237 switch (field->type.atype) {
2238 case atype_integer:
2239 ret = _lttng_integer_field_statedump(session, field, nesting);
2240 break;
2241 case atype_enum_nestable:
2242 ret = _lttng_enum_field_statedump(session, field, nesting);
2243 break;
2244 case atype_string:
2245 ret = _lttng_string_field_statedump(session, field, nesting);
2246 break;
2247 case atype_struct_nestable:
2248 ret = _lttng_struct_field_statedump(session, field, nesting);
2249 break;
2250 case atype_array_nestable:
2251 ret = _lttng_array_field_statedump(session, field, nesting);
2252 break;
2253 case atype_sequence_nestable:
2254 ret = _lttng_sequence_field_statedump(session, field, nesting);
2255 break;
2256 case atype_variant_nestable:
2257 ret = _lttng_variant_field_statedump(session, field, nesting);
2258 break;
2259
2260 default:
2261 WARN_ON_ONCE(1);
2262 return -EINVAL;
2263 }
2264 return ret;
2265 }
2266
2267 static
2268 int _lttng_context_metadata_statedump(struct lttng_session *session,
2269 struct lttng_ctx *ctx)
2270 {
2271 int ret = 0;
2272 int i;
2273
2274 if (!ctx)
2275 return 0;
2276 for (i = 0; i < ctx->nr_fields; i++) {
2277 const struct lttng_ctx_field *field = &ctx->fields[i];
2278
2279 ret = _lttng_field_statedump(session, &field->event_field, 2);
2280 if (ret)
2281 return ret;
2282 }
2283 return ret;
2284 }
2285
2286 static
2287 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2288 struct lttng_event *event)
2289 {
2290 const struct lttng_event_desc *desc = event->desc;
2291 int ret = 0;
2292 int i;
2293
2294 for (i = 0; i < desc->nr_fields; i++) {
2295 const struct lttng_event_field *field = &desc->fields[i];
2296
2297 ret = _lttng_field_statedump(session, field, 2);
2298 if (ret)
2299 return ret;
2300 }
2301 return ret;
2302 }
2303
2304 /*
2305 * Must be called with sessions_mutex held.
2306 */
2307 static
2308 int _lttng_event_metadata_statedump(struct lttng_session *session,
2309 struct lttng_channel *chan,
2310 struct lttng_event *event)
2311 {
2312 int ret = 0;
2313
2314 if (event->metadata_dumped || !READ_ONCE(session->active))
2315 return 0;
2316 if (chan->channel_type == METADATA_CHANNEL)
2317 return 0;
2318
2319 ret = lttng_metadata_printf(session,
2320 "event {\n"
2321 " name = \"%s\";\n"
2322 " id = %u;\n"
2323 " stream_id = %u;\n",
2324 event->desc->name,
2325 event->id,
2326 event->chan->id);
2327 if (ret)
2328 goto end;
2329
2330 if (event->ctx) {
2331 ret = lttng_metadata_printf(session,
2332 " context := struct {\n");
2333 if (ret)
2334 goto end;
2335 }
2336 ret = _lttng_context_metadata_statedump(session, event->ctx);
2337 if (ret)
2338 goto end;
2339 if (event->ctx) {
2340 ret = lttng_metadata_printf(session,
2341 " };\n");
2342 if (ret)
2343 goto end;
2344 }
2345
2346 ret = lttng_metadata_printf(session,
2347 " fields := struct {\n"
2348 );
2349 if (ret)
2350 goto end;
2351
2352 ret = _lttng_fields_metadata_statedump(session, event);
2353 if (ret)
2354 goto end;
2355
2356 /*
2357 * LTTng space reservation can only reserve multiples of the
2358 * byte size.
2359 */
2360 ret = lttng_metadata_printf(session,
2361 " };\n"
2362 "};\n\n");
2363 if (ret)
2364 goto end;
2365
2366 event->metadata_dumped = 1;
2367 end:
2368 return ret;
2369
2370 }
2371
2372 /*
2373 * Must be called with sessions_mutex held.
2374 */
2375 static
2376 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2377 struct lttng_channel *chan)
2378 {
2379 int ret = 0;
2380
2381 if (chan->metadata_dumped || !READ_ONCE(session->active))
2382 return 0;
2383
2384 if (chan->channel_type == METADATA_CHANNEL)
2385 return 0;
2386
2387 WARN_ON_ONCE(!chan->header_type);
2388 ret = lttng_metadata_printf(session,
2389 "stream {\n"
2390 " id = %u;\n"
2391 " event.header := %s;\n"
2392 " packet.context := struct packet_context;\n",
2393 chan->id,
2394 chan->header_type == 1 ? "struct event_header_compact" :
2395 "struct event_header_large");
2396 if (ret)
2397 goto end;
2398
2399 if (chan->ctx) {
2400 ret = lttng_metadata_printf(session,
2401 " event.context := struct {\n");
2402 if (ret)
2403 goto end;
2404 }
2405 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2406 if (ret)
2407 goto end;
2408 if (chan->ctx) {
2409 ret = lttng_metadata_printf(session,
2410 " };\n");
2411 if (ret)
2412 goto end;
2413 }
2414
2415 ret = lttng_metadata_printf(session,
2416 "};\n\n");
2417
2418 chan->metadata_dumped = 1;
2419 end:
2420 return ret;
2421 }
2422
2423 /*
2424 * Must be called with sessions_mutex held.
2425 */
2426 static
2427 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2428 {
2429 return lttng_metadata_printf(session,
2430 "struct packet_context {\n"
2431 " uint64_clock_monotonic_t timestamp_begin;\n"
2432 " uint64_clock_monotonic_t timestamp_end;\n"
2433 " uint64_t content_size;\n"
2434 " uint64_t packet_size;\n"
2435 " uint64_t packet_seq_num;\n"
2436 " unsigned long events_discarded;\n"
2437 " uint32_t cpu_id;\n"
2438 "};\n\n"
2439 );
2440 }
2441
2442 /*
2443 * Compact header:
2444 * id: range: 0 - 30.
2445 * id 31 is reserved to indicate an extended header.
2446 *
2447 * Large header:
2448 * id: range: 0 - 65534.
2449 * id 65535 is reserved to indicate an extended header.
2450 *
2451 * Must be called with sessions_mutex held.
2452 */
2453 static
2454 int _lttng_event_header_declare(struct lttng_session *session)
2455 {
2456 return lttng_metadata_printf(session,
2457 "struct event_header_compact {\n"
2458 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2459 " variant <id> {\n"
2460 " struct {\n"
2461 " uint27_clock_monotonic_t timestamp;\n"
2462 " } compact;\n"
2463 " struct {\n"
2464 " uint32_t id;\n"
2465 " uint64_clock_monotonic_t timestamp;\n"
2466 " } extended;\n"
2467 " } v;\n"
2468 "} align(%u);\n"
2469 "\n"
2470 "struct event_header_large {\n"
2471 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2472 " variant <id> {\n"
2473 " struct {\n"
2474 " uint32_clock_monotonic_t timestamp;\n"
2475 " } compact;\n"
2476 " struct {\n"
2477 " uint32_t id;\n"
2478 " uint64_clock_monotonic_t timestamp;\n"
2479 " } extended;\n"
2480 " } v;\n"
2481 "} align(%u);\n\n",
2482 lttng_alignof(uint32_t) * CHAR_BIT,
2483 lttng_alignof(uint16_t) * CHAR_BIT
2484 );
2485 }
2486
2487 /*
2488 * Approximation of NTP time of day to clock monotonic correlation,
2489 * taken at start of trace.
2490 * Yes, this is only an approximation. Yes, we can (and will) do better
2491 * in future versions.
2492 * This function may return a negative offset. It may happen if the
2493 * system sets the REALTIME clock to 0 after boot.
2494 *
2495 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2496 * y2038 compliant.
2497 */
2498 static
2499 int64_t measure_clock_offset(void)
2500 {
2501 uint64_t monotonic_avg, monotonic[2], realtime;
2502 uint64_t tcf = trace_clock_freq();
2503 int64_t offset;
2504 unsigned long flags;
2505 struct timespec64 rts = { 0, 0 };
2506
2507 /* Disable interrupts to increase correlation precision. */
2508 local_irq_save(flags);
2509 monotonic[0] = trace_clock_read64();
2510 ktime_get_real_ts64(&rts);
2511 monotonic[1] = trace_clock_read64();
2512 local_irq_restore(flags);
2513
2514 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2515 realtime = (uint64_t) rts.tv_sec * tcf;
2516 if (tcf == NSEC_PER_SEC) {
2517 realtime += rts.tv_nsec;
2518 } else {
2519 uint64_t n = rts.tv_nsec * tcf;
2520
2521 do_div(n, NSEC_PER_SEC);
2522 realtime += n;
2523 }
2524 offset = (int64_t) realtime - monotonic_avg;
2525 return offset;
2526 }
2527
2528 static
2529 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2530 {
2531 int ret = 0;
2532 size_t i;
2533 char cur;
2534
2535 i = 0;
2536 cur = string[i];
2537 while (cur != '\0') {
2538 switch (cur) {
2539 case '\n':
2540 ret = lttng_metadata_printf(session, "%s", "\\n");
2541 break;
2542 case '\\':
2543 case '"':
2544 ret = lttng_metadata_printf(session, "%c", '\\');
2545 if (ret)
2546 goto error;
2547 /* We still print the current char */
2548 /* Fallthrough */
2549 default:
2550 ret = lttng_metadata_printf(session, "%c", cur);
2551 break;
2552 }
2553
2554 if (ret)
2555 goto error;
2556
2557 cur = string[++i];
2558 }
2559 error:
2560 return ret;
2561 }
2562
2563 static
2564 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2565 const char *field_value)
2566 {
2567 int ret;
2568
2569 ret = lttng_metadata_printf(session, " %s = \"", field);
2570 if (ret)
2571 goto error;
2572
2573 ret = print_escaped_ctf_string(session, field_value);
2574 if (ret)
2575 goto error;
2576
2577 ret = lttng_metadata_printf(session, "\";\n");
2578
2579 error:
2580 return ret;
2581 }
2582
2583 /*
2584 * Output metadata into this session's metadata buffers.
2585 * Must be called with sessions_mutex held.
2586 */
2587 static
2588 int _lttng_session_metadata_statedump(struct lttng_session *session)
2589 {
2590 unsigned char *uuid_c = session->uuid.b;
2591 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2592 const char *product_uuid;
2593 struct lttng_channel *chan;
2594 struct lttng_event *event;
2595 int ret = 0;
2596
2597 if (!READ_ONCE(session->active))
2598 return 0;
2599 if (session->metadata_dumped)
2600 goto skip_session;
2601
2602 snprintf(uuid_s, sizeof(uuid_s),
2603 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2604 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2605 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2606 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2607 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2608
2609 ret = lttng_metadata_printf(session,
2610 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2611 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2612 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2613 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2614 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2615 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2616 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2617 "\n"
2618 "trace {\n"
2619 " major = %u;\n"
2620 " minor = %u;\n"
2621 " uuid = \"%s\";\n"
2622 " byte_order = %s;\n"
2623 " packet.header := struct {\n"
2624 " uint32_t magic;\n"
2625 " uint8_t uuid[16];\n"
2626 " uint32_t stream_id;\n"
2627 " uint64_t stream_instance_id;\n"
2628 " };\n"
2629 "};\n\n",
2630 lttng_alignof(uint8_t) * CHAR_BIT,
2631 lttng_alignof(uint16_t) * CHAR_BIT,
2632 lttng_alignof(uint32_t) * CHAR_BIT,
2633 lttng_alignof(uint64_t) * CHAR_BIT,
2634 sizeof(unsigned long) * CHAR_BIT,
2635 lttng_alignof(unsigned long) * CHAR_BIT,
2636 CTF_SPEC_MAJOR,
2637 CTF_SPEC_MINOR,
2638 uuid_s,
2639 #if __BYTE_ORDER == __BIG_ENDIAN
2640 "be"
2641 #else
2642 "le"
2643 #endif
2644 );
2645 if (ret)
2646 goto end;
2647
2648 ret = lttng_metadata_printf(session,
2649 "env {\n"
2650 " hostname = \"%s\";\n"
2651 " domain = \"kernel\";\n"
2652 " sysname = \"%s\";\n"
2653 " kernel_release = \"%s\";\n"
2654 " kernel_version = \"%s\";\n"
2655 " tracer_name = \"lttng-modules\";\n"
2656 " tracer_major = %d;\n"
2657 " tracer_minor = %d;\n"
2658 " tracer_patchlevel = %d;\n"
2659 " trace_buffering_scheme = \"global\";\n",
2660 current->nsproxy->uts_ns->name.nodename,
2661 utsname()->sysname,
2662 utsname()->release,
2663 utsname()->version,
2664 LTTNG_MODULES_MAJOR_VERSION,
2665 LTTNG_MODULES_MINOR_VERSION,
2666 LTTNG_MODULES_PATCHLEVEL_VERSION
2667 );
2668 if (ret)
2669 goto end;
2670
2671 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2672 if (ret)
2673 goto end;
2674 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2675 session->creation_time);
2676 if (ret)
2677 goto end;
2678
2679 /* Add the product UUID to the 'env' section */
2680 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
2681 if (product_uuid) {
2682 ret = lttng_metadata_printf(session,
2683 " product_uuid = \"%s\";\n",
2684 product_uuid
2685 );
2686 if (ret)
2687 goto end;
2688 }
2689
2690 /* Close the 'env' section */
2691 ret = lttng_metadata_printf(session, "};\n\n");
2692 if (ret)
2693 goto end;
2694
2695 ret = lttng_metadata_printf(session,
2696 "clock {\n"
2697 " name = \"%s\";\n",
2698 trace_clock_name()
2699 );
2700 if (ret)
2701 goto end;
2702
2703 if (!trace_clock_uuid(clock_uuid_s)) {
2704 ret = lttng_metadata_printf(session,
2705 " uuid = \"%s\";\n",
2706 clock_uuid_s
2707 );
2708 if (ret)
2709 goto end;
2710 }
2711
2712 ret = lttng_metadata_printf(session,
2713 " description = \"%s\";\n"
2714 " freq = %llu; /* Frequency, in Hz */\n"
2715 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2716 " offset = %lld;\n"
2717 "};\n\n",
2718 trace_clock_description(),
2719 (unsigned long long) trace_clock_freq(),
2720 (long long) measure_clock_offset()
2721 );
2722 if (ret)
2723 goto end;
2724
2725 ret = lttng_metadata_printf(session,
2726 "typealias integer {\n"
2727 " size = 27; align = 1; signed = false;\n"
2728 " map = clock.%s.value;\n"
2729 "} := uint27_clock_monotonic_t;\n"
2730 "\n"
2731 "typealias integer {\n"
2732 " size = 32; align = %u; signed = false;\n"
2733 " map = clock.%s.value;\n"
2734 "} := uint32_clock_monotonic_t;\n"
2735 "\n"
2736 "typealias integer {\n"
2737 " size = 64; align = %u; signed = false;\n"
2738 " map = clock.%s.value;\n"
2739 "} := uint64_clock_monotonic_t;\n\n",
2740 trace_clock_name(),
2741 lttng_alignof(uint32_t) * CHAR_BIT,
2742 trace_clock_name(),
2743 lttng_alignof(uint64_t) * CHAR_BIT,
2744 trace_clock_name()
2745 );
2746 if (ret)
2747 goto end;
2748
2749 ret = _lttng_stream_packet_context_declare(session);
2750 if (ret)
2751 goto end;
2752
2753 ret = _lttng_event_header_declare(session);
2754 if (ret)
2755 goto end;
2756
2757 skip_session:
2758 list_for_each_entry(chan, &session->chan, list) {
2759 ret = _lttng_channel_metadata_statedump(session, chan);
2760 if (ret)
2761 goto end;
2762 }
2763
2764 list_for_each_entry(event, &session->events, list) {
2765 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2766 if (ret)
2767 goto end;
2768 }
2769 session->metadata_dumped = 1;
2770 end:
2771 return ret;
2772 }
2773
2774 /**
2775 * lttng_transport_register - LTT transport registration
2776 * @transport: transport structure
2777 *
2778 * Registers a transport which can be used as output to extract the data out of
2779 * LTTng.
2780 */
2781 void lttng_transport_register(struct lttng_transport *transport)
2782 {
2783 mutex_lock(&sessions_mutex);
2784 list_add_tail(&transport->node, &lttng_transport_list);
2785 mutex_unlock(&sessions_mutex);
2786 }
2787 EXPORT_SYMBOL_GPL(lttng_transport_register);
2788
2789 /**
2790 * lttng_transport_unregister - LTT transport unregistration
2791 * @transport: transport structure
2792 */
2793 void lttng_transport_unregister(struct lttng_transport *transport)
2794 {
2795 mutex_lock(&sessions_mutex);
2796 list_del(&transport->node);
2797 mutex_unlock(&sessions_mutex);
2798 }
2799 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2800
2801 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
2802
2803 enum cpuhp_state lttng_hp_prepare;
2804 enum cpuhp_state lttng_hp_online;
2805
2806 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2807 {
2808 struct lttng_cpuhp_node *lttng_node;
2809
2810 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2811 switch (lttng_node->component) {
2812 case LTTNG_RING_BUFFER_FRONTEND:
2813 return 0;
2814 case LTTNG_RING_BUFFER_BACKEND:
2815 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2816 case LTTNG_RING_BUFFER_ITER:
2817 return 0;
2818 case LTTNG_CONTEXT_PERF_COUNTERS:
2819 return 0;
2820 default:
2821 return -EINVAL;
2822 }
2823 }
2824
2825 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2826 {
2827 struct lttng_cpuhp_node *lttng_node;
2828
2829 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2830 switch (lttng_node->component) {
2831 case LTTNG_RING_BUFFER_FRONTEND:
2832 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2833 case LTTNG_RING_BUFFER_BACKEND:
2834 return 0;
2835 case LTTNG_RING_BUFFER_ITER:
2836 return 0;
2837 case LTTNG_CONTEXT_PERF_COUNTERS:
2838 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2839 default:
2840 return -EINVAL;
2841 }
2842 }
2843
2844 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2845 {
2846 struct lttng_cpuhp_node *lttng_node;
2847
2848 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2849 switch (lttng_node->component) {
2850 case LTTNG_RING_BUFFER_FRONTEND:
2851 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2852 case LTTNG_RING_BUFFER_BACKEND:
2853 return 0;
2854 case LTTNG_RING_BUFFER_ITER:
2855 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2856 case LTTNG_CONTEXT_PERF_COUNTERS:
2857 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2858 default:
2859 return -EINVAL;
2860 }
2861 }
2862
2863 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2864 {
2865 struct lttng_cpuhp_node *lttng_node;
2866
2867 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2868 switch (lttng_node->component) {
2869 case LTTNG_RING_BUFFER_FRONTEND:
2870 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2871 case LTTNG_RING_BUFFER_BACKEND:
2872 return 0;
2873 case LTTNG_RING_BUFFER_ITER:
2874 return 0;
2875 case LTTNG_CONTEXT_PERF_COUNTERS:
2876 return 0;
2877 default:
2878 return -EINVAL;
2879 }
2880 }
2881
2882 static int __init lttng_init_cpu_hotplug(void)
2883 {
2884 int ret;
2885
2886 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2887 lttng_hotplug_prepare,
2888 lttng_hotplug_dead);
2889 if (ret < 0) {
2890 return ret;
2891 }
2892 lttng_hp_prepare = ret;
2893 lttng_rb_set_hp_prepare(ret);
2894
2895 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2896 lttng_hotplug_online,
2897 lttng_hotplug_offline);
2898 if (ret < 0) {
2899 cpuhp_remove_multi_state(lttng_hp_prepare);
2900 lttng_hp_prepare = 0;
2901 return ret;
2902 }
2903 lttng_hp_online = ret;
2904 lttng_rb_set_hp_online(ret);
2905
2906 return 0;
2907 }
2908
2909 static void __exit lttng_exit_cpu_hotplug(void)
2910 {
2911 lttng_rb_set_hp_online(0);
2912 cpuhp_remove_multi_state(lttng_hp_online);
2913 lttng_rb_set_hp_prepare(0);
2914 cpuhp_remove_multi_state(lttng_hp_prepare);
2915 }
2916
2917 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2918 static int lttng_init_cpu_hotplug(void)
2919 {
2920 return 0;
2921 }
2922 static void lttng_exit_cpu_hotplug(void)
2923 {
2924 }
2925 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2926
2927
2928 static int __init lttng_events_init(void)
2929 {
2930 int ret;
2931
2932 ret = lttng_probes_init();
2933 if (ret)
2934 return ret;
2935 ret = lttng_context_init();
2936 if (ret)
2937 return ret;
2938 ret = lttng_tracepoint_init();
2939 if (ret)
2940 goto error_tp;
2941 event_cache = KMEM_CACHE(lttng_event, 0);
2942 if (!event_cache) {
2943 ret = -ENOMEM;
2944 goto error_kmem;
2945 }
2946 ret = lttng_abi_init();
2947 if (ret)
2948 goto error_abi;
2949 ret = lttng_logger_init();
2950 if (ret)
2951 goto error_logger;
2952 ret = lttng_init_cpu_hotplug();
2953 if (ret)
2954 goto error_hotplug;
2955 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
2956 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2957 __stringify(LTTNG_MODULES_MINOR_VERSION),
2958 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2959 LTTNG_MODULES_EXTRAVERSION,
2960 LTTNG_VERSION_NAME,
2961 #ifdef LTTNG_EXTRA_VERSION_GIT
2962 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2963 #else
2964 "",
2965 #endif
2966 #ifdef LTTNG_EXTRA_VERSION_NAME
2967 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2968 #else
2969 "");
2970 #endif
2971 return 0;
2972
2973 error_hotplug:
2974 lttng_logger_exit();
2975 error_logger:
2976 lttng_abi_exit();
2977 error_abi:
2978 kmem_cache_destroy(event_cache);
2979 error_kmem:
2980 lttng_tracepoint_exit();
2981 error_tp:
2982 lttng_context_exit();
2983 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
2984 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2985 __stringify(LTTNG_MODULES_MINOR_VERSION),
2986 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2987 LTTNG_MODULES_EXTRAVERSION,
2988 LTTNG_VERSION_NAME,
2989 #ifdef LTTNG_EXTRA_VERSION_GIT
2990 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2991 #else
2992 "",
2993 #endif
2994 #ifdef LTTNG_EXTRA_VERSION_NAME
2995 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2996 #else
2997 "");
2998 #endif
2999 return ret;
3000 }
3001
3002 module_init(lttng_events_init);
3003
3004 static void __exit lttng_events_exit(void)
3005 {
3006 struct lttng_session *session, *tmpsession;
3007
3008 lttng_exit_cpu_hotplug();
3009 lttng_logger_exit();
3010 lttng_abi_exit();
3011 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3012 lttng_session_destroy(session);
3013 kmem_cache_destroy(event_cache);
3014 lttng_tracepoint_exit();
3015 lttng_context_exit();
3016 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3017 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3018 __stringify(LTTNG_MODULES_MINOR_VERSION),
3019 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3020 LTTNG_MODULES_EXTRAVERSION,
3021 LTTNG_VERSION_NAME,
3022 #ifdef LTTNG_EXTRA_VERSION_GIT
3023 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3024 #else
3025 "",
3026 #endif
3027 #ifdef LTTNG_EXTRA_VERSION_NAME
3028 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3029 #else
3030 "");
3031 #endif
3032 }
3033
3034 module_exit(lttng_events_exit);
3035
3036 #include "extra_version/patches.i"
3037 #ifdef LTTNG_EXTRA_VERSION_GIT
3038 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3039 #endif
3040 #ifdef LTTNG_EXTRA_VERSION_NAME
3041 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3042 #endif
3043 MODULE_LICENSE("GPL and additional rights");
3044 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3045 MODULE_DESCRIPTION("LTTng tracer");
3046 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3047 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3048 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3049 LTTNG_MODULES_EXTRAVERSION);
This page took 0.089442 seconds and 4 git commands to generate.