Namespace all logging statements
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/jhash.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/dmi.h>
31
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/tracer.h>
41 #include <lttng/abi-old.h>
42 #include <lttng/endian.h>
43 #include <lttng/string-utils.h>
44 #include <ringbuffer/backend.h>
45 #include <ringbuffer/frontend.h>
46 #include <wrapper/time.h>
47
48 #define METADATA_CACHE_DEFAULT_SIZE 4096
49
50 static LIST_HEAD(sessions);
51 static LIST_HEAD(lttng_transport_list);
52 /*
53 * Protect the sessions and metadata caches.
54 */
55 static DEFINE_MUTEX(sessions_mutex);
56 static struct kmem_cache *event_cache;
57
58 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
59 static void lttng_session_sync_enablers(struct lttng_session *session);
60 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
61
62 static void _lttng_event_destroy(struct lttng_event *event);
63 static void _lttng_channel_destroy(struct lttng_channel *chan);
64 static int _lttng_event_unregister(struct lttng_event *event);
65 static
66 int _lttng_event_metadata_statedump(struct lttng_session *session,
67 struct lttng_channel *chan,
68 struct lttng_event *event);
69 static
70 int _lttng_session_metadata_statedump(struct lttng_session *session);
71 static
72 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
73 static
74 int _lttng_type_statedump(struct lttng_session *session,
75 const struct lttng_type *type,
76 size_t nesting);
77 static
78 int _lttng_field_statedump(struct lttng_session *session,
79 const struct lttng_event_field *field,
80 size_t nesting);
81
82 void synchronize_trace(void)
83 {
84 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
85 synchronize_rcu();
86 #else
87 synchronize_sched();
88 #endif
89
90 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
91 #ifdef CONFIG_PREEMPT_RT_FULL
92 synchronize_rcu();
93 #endif
94 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
95 #ifdef CONFIG_PREEMPT_RT
96 synchronize_rcu();
97 #endif
98 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
99 }
100
101 void lttng_lock_sessions(void)
102 {
103 mutex_lock(&sessions_mutex);
104 }
105
106 void lttng_unlock_sessions(void)
107 {
108 mutex_unlock(&sessions_mutex);
109 }
110
111 /*
112 * Called with sessions lock held.
113 */
114 int lttng_session_active(void)
115 {
116 struct lttng_session *iter;
117
118 list_for_each_entry(iter, &sessions, list) {
119 if (iter->active)
120 return 1;
121 }
122 return 0;
123 }
124
125 struct lttng_session *lttng_session_create(void)
126 {
127 struct lttng_session *session;
128 struct lttng_metadata_cache *metadata_cache;
129 int i;
130
131 mutex_lock(&sessions_mutex);
132 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
133 if (!session)
134 goto err;
135 INIT_LIST_HEAD(&session->chan);
136 INIT_LIST_HEAD(&session->events);
137 lttng_guid_gen(&session->uuid);
138
139 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
140 GFP_KERNEL);
141 if (!metadata_cache)
142 goto err_free_session;
143 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
144 if (!metadata_cache->data)
145 goto err_free_cache;
146 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
147 kref_init(&metadata_cache->refcount);
148 mutex_init(&metadata_cache->lock);
149 session->metadata_cache = metadata_cache;
150 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
151 memcpy(&metadata_cache->uuid, &session->uuid,
152 sizeof(metadata_cache->uuid));
153 INIT_LIST_HEAD(&session->enablers_head);
154 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
155 INIT_HLIST_HEAD(&session->events_ht.table[i]);
156 list_add(&session->list, &sessions);
157 session->pid_tracker.session = session;
158 session->pid_tracker.tracker_type = TRACKER_PID;
159 session->vpid_tracker.session = session;
160 session->vpid_tracker.tracker_type = TRACKER_VPID;
161 session->uid_tracker.session = session;
162 session->uid_tracker.tracker_type = TRACKER_UID;
163 session->vuid_tracker.session = session;
164 session->vuid_tracker.tracker_type = TRACKER_VUID;
165 session->gid_tracker.session = session;
166 session->gid_tracker.tracker_type = TRACKER_GID;
167 session->vgid_tracker.session = session;
168 session->vgid_tracker.tracker_type = TRACKER_VGID;
169 mutex_unlock(&sessions_mutex);
170 return session;
171
172 err_free_cache:
173 kfree(metadata_cache);
174 err_free_session:
175 lttng_kvfree(session);
176 err:
177 mutex_unlock(&sessions_mutex);
178 return NULL;
179 }
180
181 void metadata_cache_destroy(struct kref *kref)
182 {
183 struct lttng_metadata_cache *cache =
184 container_of(kref, struct lttng_metadata_cache, refcount);
185 vfree(cache->data);
186 kfree(cache);
187 }
188
189 void lttng_session_destroy(struct lttng_session *session)
190 {
191 struct lttng_channel *chan, *tmpchan;
192 struct lttng_event *event, *tmpevent;
193 struct lttng_metadata_stream *metadata_stream;
194 struct lttng_enabler *enabler, *tmpenabler;
195 int ret;
196
197 mutex_lock(&sessions_mutex);
198 WRITE_ONCE(session->active, 0);
199 list_for_each_entry(chan, &session->chan, list) {
200 ret = lttng_syscalls_unregister(chan);
201 WARN_ON(ret);
202 }
203 list_for_each_entry(event, &session->events, list) {
204 ret = _lttng_event_unregister(event);
205 WARN_ON(ret);
206 }
207 synchronize_trace(); /* Wait for in-flight events to complete */
208 list_for_each_entry_safe(enabler, tmpenabler,
209 &session->enablers_head, node)
210 lttng_enabler_destroy(enabler);
211 list_for_each_entry_safe(event, tmpevent, &session->events, list)
212 _lttng_event_destroy(event);
213 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
214 BUG_ON(chan->channel_type == METADATA_CHANNEL);
215 _lttng_channel_destroy(chan);
216 }
217 mutex_lock(&session->metadata_cache->lock);
218 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
219 _lttng_metadata_channel_hangup(metadata_stream);
220 mutex_unlock(&session->metadata_cache->lock);
221 lttng_id_tracker_destroy(&session->pid_tracker, false);
222 lttng_id_tracker_destroy(&session->vpid_tracker, false);
223 lttng_id_tracker_destroy(&session->uid_tracker, false);
224 lttng_id_tracker_destroy(&session->vuid_tracker, false);
225 lttng_id_tracker_destroy(&session->gid_tracker, false);
226 lttng_id_tracker_destroy(&session->vgid_tracker, false);
227 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
228 list_del(&session->list);
229 mutex_unlock(&sessions_mutex);
230 lttng_kvfree(session);
231 }
232
233 int lttng_session_statedump(struct lttng_session *session)
234 {
235 int ret;
236
237 mutex_lock(&sessions_mutex);
238 ret = lttng_statedump_start(session);
239 mutex_unlock(&sessions_mutex);
240 return ret;
241 }
242
243 int lttng_session_enable(struct lttng_session *session)
244 {
245 int ret = 0;
246 struct lttng_channel *chan;
247
248 mutex_lock(&sessions_mutex);
249 if (session->active) {
250 ret = -EBUSY;
251 goto end;
252 }
253
254 /* Set transient enabler state to "enabled" */
255 session->tstate = 1;
256
257 /* We need to sync enablers with session before activation. */
258 lttng_session_sync_enablers(session);
259
260 /*
261 * Snapshot the number of events per channel to know the type of header
262 * we need to use.
263 */
264 list_for_each_entry(chan, &session->chan, list) {
265 if (chan->header_type)
266 continue; /* don't change it if session stop/restart */
267 if (chan->free_event_id < 31)
268 chan->header_type = 1; /* compact */
269 else
270 chan->header_type = 2; /* large */
271 }
272
273 /* Clear each stream's quiescent state. */
274 list_for_each_entry(chan, &session->chan, list) {
275 if (chan->channel_type != METADATA_CHANNEL)
276 lib_ring_buffer_clear_quiescent_channel(chan->chan);
277 }
278
279 WRITE_ONCE(session->active, 1);
280 WRITE_ONCE(session->been_active, 1);
281 ret = _lttng_session_metadata_statedump(session);
282 if (ret) {
283 WRITE_ONCE(session->active, 0);
284 goto end;
285 }
286 ret = lttng_statedump_start(session);
287 if (ret)
288 WRITE_ONCE(session->active, 0);
289 end:
290 mutex_unlock(&sessions_mutex);
291 return ret;
292 }
293
294 int lttng_session_disable(struct lttng_session *session)
295 {
296 int ret = 0;
297 struct lttng_channel *chan;
298
299 mutex_lock(&sessions_mutex);
300 if (!session->active) {
301 ret = -EBUSY;
302 goto end;
303 }
304 WRITE_ONCE(session->active, 0);
305
306 /* Set transient enabler state to "disabled" */
307 session->tstate = 0;
308 lttng_session_sync_enablers(session);
309
310 /* Set each stream's quiescent state. */
311 list_for_each_entry(chan, &session->chan, list) {
312 if (chan->channel_type != METADATA_CHANNEL)
313 lib_ring_buffer_set_quiescent_channel(chan->chan);
314 }
315 end:
316 mutex_unlock(&sessions_mutex);
317 return ret;
318 }
319
320 int lttng_session_metadata_regenerate(struct lttng_session *session)
321 {
322 int ret = 0;
323 struct lttng_channel *chan;
324 struct lttng_event *event;
325 struct lttng_metadata_cache *cache = session->metadata_cache;
326 struct lttng_metadata_stream *stream;
327
328 mutex_lock(&sessions_mutex);
329 if (!session->active) {
330 ret = -EBUSY;
331 goto end;
332 }
333
334 mutex_lock(&cache->lock);
335 memset(cache->data, 0, cache->cache_alloc);
336 cache->metadata_written = 0;
337 cache->version++;
338 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
339 stream->metadata_out = 0;
340 stream->metadata_in = 0;
341 }
342 mutex_unlock(&cache->lock);
343
344 session->metadata_dumped = 0;
345 list_for_each_entry(chan, &session->chan, list) {
346 chan->metadata_dumped = 0;
347 }
348
349 list_for_each_entry(event, &session->events, list) {
350 event->metadata_dumped = 0;
351 }
352
353 ret = _lttng_session_metadata_statedump(session);
354
355 end:
356 mutex_unlock(&sessions_mutex);
357 return ret;
358 }
359
360 int lttng_channel_enable(struct lttng_channel *channel)
361 {
362 int ret = 0;
363
364 mutex_lock(&sessions_mutex);
365 if (channel->channel_type == METADATA_CHANNEL) {
366 ret = -EPERM;
367 goto end;
368 }
369 if (channel->enabled) {
370 ret = -EEXIST;
371 goto end;
372 }
373 /* Set transient enabler state to "enabled" */
374 channel->tstate = 1;
375 lttng_session_sync_enablers(channel->session);
376 /* Set atomically the state to "enabled" */
377 WRITE_ONCE(channel->enabled, 1);
378 end:
379 mutex_unlock(&sessions_mutex);
380 return ret;
381 }
382
383 int lttng_channel_disable(struct lttng_channel *channel)
384 {
385 int ret = 0;
386
387 mutex_lock(&sessions_mutex);
388 if (channel->channel_type == METADATA_CHANNEL) {
389 ret = -EPERM;
390 goto end;
391 }
392 if (!channel->enabled) {
393 ret = -EEXIST;
394 goto end;
395 }
396 /* Set atomically the state to "disabled" */
397 WRITE_ONCE(channel->enabled, 0);
398 /* Set transient enabler state to "enabled" */
399 channel->tstate = 0;
400 lttng_session_sync_enablers(channel->session);
401 end:
402 mutex_unlock(&sessions_mutex);
403 return ret;
404 }
405
406 int lttng_event_enable(struct lttng_event *event)
407 {
408 int ret = 0;
409
410 mutex_lock(&sessions_mutex);
411 if (event->chan->channel_type == METADATA_CHANNEL) {
412 ret = -EPERM;
413 goto end;
414 }
415 if (event->enabled) {
416 ret = -EEXIST;
417 goto end;
418 }
419 switch (event->instrumentation) {
420 case LTTNG_KERNEL_TRACEPOINT:
421 case LTTNG_KERNEL_SYSCALL:
422 ret = -EINVAL;
423 break;
424 case LTTNG_KERNEL_KPROBE:
425 case LTTNG_KERNEL_UPROBE:
426 case LTTNG_KERNEL_NOOP:
427 WRITE_ONCE(event->enabled, 1);
428 break;
429 case LTTNG_KERNEL_KRETPROBE:
430 ret = lttng_kretprobes_event_enable_state(event, 1);
431 break;
432 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
433 default:
434 WARN_ON_ONCE(1);
435 ret = -EINVAL;
436 }
437 end:
438 mutex_unlock(&sessions_mutex);
439 return ret;
440 }
441
442 int lttng_event_disable(struct lttng_event *event)
443 {
444 int ret = 0;
445
446 mutex_lock(&sessions_mutex);
447 if (event->chan->channel_type == METADATA_CHANNEL) {
448 ret = -EPERM;
449 goto end;
450 }
451 if (!event->enabled) {
452 ret = -EEXIST;
453 goto end;
454 }
455 switch (event->instrumentation) {
456 case LTTNG_KERNEL_TRACEPOINT:
457 case LTTNG_KERNEL_SYSCALL:
458 ret = -EINVAL;
459 break;
460 case LTTNG_KERNEL_KPROBE:
461 case LTTNG_KERNEL_UPROBE:
462 case LTTNG_KERNEL_NOOP:
463 WRITE_ONCE(event->enabled, 0);
464 break;
465 case LTTNG_KERNEL_KRETPROBE:
466 ret = lttng_kretprobes_event_enable_state(event, 0);
467 break;
468 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
469 default:
470 WARN_ON_ONCE(1);
471 ret = -EINVAL;
472 }
473 end:
474 mutex_unlock(&sessions_mutex);
475 return ret;
476 }
477
478 static struct lttng_transport *lttng_transport_find(const char *name)
479 {
480 struct lttng_transport *transport;
481
482 list_for_each_entry(transport, &lttng_transport_list, node) {
483 if (!strcmp(transport->name, name))
484 return transport;
485 }
486 return NULL;
487 }
488
489 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
490 const char *transport_name,
491 void *buf_addr,
492 size_t subbuf_size, size_t num_subbuf,
493 unsigned int switch_timer_interval,
494 unsigned int read_timer_interval,
495 enum channel_type channel_type)
496 {
497 struct lttng_channel *chan;
498 struct lttng_transport *transport = NULL;
499
500 mutex_lock(&sessions_mutex);
501 if (session->been_active && channel_type != METADATA_CHANNEL)
502 goto active; /* Refuse to add channel to active session */
503 transport = lttng_transport_find(transport_name);
504 if (!transport) {
505 printk(KERN_WARNING "LTTng: transport %s not found\n",
506 transport_name);
507 goto notransport;
508 }
509 if (!try_module_get(transport->owner)) {
510 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
511 goto notransport;
512 }
513 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
514 if (!chan)
515 goto nomem;
516 chan->session = session;
517 chan->id = session->free_chan_id++;
518 chan->ops = &transport->ops;
519 /*
520 * Note: the channel creation op already writes into the packet
521 * headers. Therefore the "chan" information used as input
522 * should be already accessible.
523 */
524 chan->chan = transport->ops.channel_create(transport_name,
525 chan, buf_addr, subbuf_size, num_subbuf,
526 switch_timer_interval, read_timer_interval);
527 if (!chan->chan)
528 goto create_error;
529 chan->tstate = 1;
530 chan->enabled = 1;
531 chan->transport = transport;
532 chan->channel_type = channel_type;
533 list_add(&chan->list, &session->chan);
534 mutex_unlock(&sessions_mutex);
535 return chan;
536
537 create_error:
538 kfree(chan);
539 nomem:
540 if (transport)
541 module_put(transport->owner);
542 notransport:
543 active:
544 mutex_unlock(&sessions_mutex);
545 return NULL;
546 }
547
548 /*
549 * Only used internally at session destruction for per-cpu channels, and
550 * when metadata channel is released.
551 * Needs to be called with sessions mutex held.
552 */
553 static
554 void _lttng_channel_destroy(struct lttng_channel *chan)
555 {
556 chan->ops->channel_destroy(chan->chan);
557 module_put(chan->transport->owner);
558 list_del(&chan->list);
559 lttng_destroy_context(chan->ctx);
560 kfree(chan);
561 }
562
563 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
564 {
565 BUG_ON(chan->channel_type != METADATA_CHANNEL);
566
567 /* Protect the metadata cache with the sessions_mutex. */
568 mutex_lock(&sessions_mutex);
569 _lttng_channel_destroy(chan);
570 mutex_unlock(&sessions_mutex);
571 }
572 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
573
574 static
575 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
576 {
577 stream->finalized = 1;
578 wake_up_interruptible(&stream->read_wait);
579 }
580
581 /*
582 * Supports event creation while tracing session is active.
583 * Needs to be called with sessions mutex held.
584 */
585 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
586 struct lttng_kernel_event *event_param,
587 void *filter,
588 const struct lttng_event_desc *event_desc,
589 enum lttng_kernel_instrumentation itype)
590 {
591 struct lttng_session *session = chan->session;
592 struct lttng_event *event;
593 const char *event_name;
594 struct hlist_head *head;
595 size_t name_len;
596 uint32_t hash;
597 int ret;
598
599 if (chan->free_event_id == -1U) {
600 ret = -EMFILE;
601 goto full;
602 }
603
604 switch (itype) {
605 case LTTNG_KERNEL_TRACEPOINT:
606 event_name = event_desc->name;
607 break;
608 case LTTNG_KERNEL_KPROBE:
609 case LTTNG_KERNEL_UPROBE:
610 case LTTNG_KERNEL_KRETPROBE:
611 case LTTNG_KERNEL_NOOP:
612 case LTTNG_KERNEL_SYSCALL:
613 event_name = event_param->name;
614 break;
615 case LTTNG_KERNEL_FUNCTION: /* Fall-through. */
616 default:
617 WARN_ON_ONCE(1);
618 ret = -EINVAL;
619 goto type_error;
620 }
621 name_len = strlen(event_name);
622 hash = jhash(event_name, name_len, 0);
623 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
624 lttng_hlist_for_each_entry(event, head, hlist) {
625 WARN_ON_ONCE(!event->desc);
626 if (!strncmp(event->desc->name, event_name,
627 LTTNG_KERNEL_SYM_NAME_LEN - 1)
628 && chan == event->chan) {
629 ret = -EEXIST;
630 goto exist;
631 }
632 }
633
634 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
635 if (!event) {
636 ret = -ENOMEM;
637 goto cache_error;
638 }
639 event->chan = chan;
640 event->filter = filter;
641 event->id = chan->free_event_id++;
642 event->instrumentation = itype;
643 event->evtype = LTTNG_TYPE_EVENT;
644 INIT_LIST_HEAD(&event->bytecode_runtime_head);
645 INIT_LIST_HEAD(&event->enablers_ref_head);
646
647 switch (itype) {
648 case LTTNG_KERNEL_TRACEPOINT:
649 /* Event will be enabled by enabler sync. */
650 event->enabled = 0;
651 event->registered = 0;
652 event->desc = lttng_event_get(event_name);
653 if (!event->desc) {
654 ret = -ENOENT;
655 goto register_error;
656 }
657 /* Populate lttng_event structure before event registration. */
658 smp_wmb();
659 break;
660 case LTTNG_KERNEL_KPROBE:
661 /*
662 * Needs to be explicitly enabled after creation, since
663 * we may want to apply filters.
664 */
665 event->enabled = 0;
666 event->registered = 1;
667 /*
668 * Populate lttng_event structure before event
669 * registration.
670 */
671 smp_wmb();
672 ret = lttng_kprobes_register(event_name,
673 event_param->u.kprobe.symbol_name,
674 event_param->u.kprobe.offset,
675 event_param->u.kprobe.addr,
676 event);
677 if (ret) {
678 ret = -EINVAL;
679 goto register_error;
680 }
681 ret = try_module_get(event->desc->owner);
682 WARN_ON_ONCE(!ret);
683 break;
684 case LTTNG_KERNEL_KRETPROBE:
685 {
686 struct lttng_event *event_return;
687
688 /* kretprobe defines 2 events */
689 /*
690 * Needs to be explicitly enabled after creation, since
691 * we may want to apply filters.
692 */
693 event->enabled = 0;
694 event->registered = 1;
695 event_return =
696 kmem_cache_zalloc(event_cache, GFP_KERNEL);
697 if (!event_return) {
698 ret = -ENOMEM;
699 goto register_error;
700 }
701 event_return->chan = chan;
702 event_return->filter = filter;
703 event_return->id = chan->free_event_id++;
704 event_return->enabled = 0;
705 event_return->registered = 1;
706 event_return->instrumentation = itype;
707 /*
708 * Populate lttng_event structure before kretprobe registration.
709 */
710 smp_wmb();
711 ret = lttng_kretprobes_register(event_name,
712 event_param->u.kretprobe.symbol_name,
713 event_param->u.kretprobe.offset,
714 event_param->u.kretprobe.addr,
715 event, event_return);
716 if (ret) {
717 kmem_cache_free(event_cache, event_return);
718 ret = -EINVAL;
719 goto register_error;
720 }
721 /* Take 2 refs on the module: one per event. */
722 ret = try_module_get(event->desc->owner);
723 WARN_ON_ONCE(!ret);
724 ret = try_module_get(event->desc->owner);
725 WARN_ON_ONCE(!ret);
726 ret = _lttng_event_metadata_statedump(chan->session, chan,
727 event_return);
728 WARN_ON_ONCE(ret > 0);
729 if (ret) {
730 kmem_cache_free(event_cache, event_return);
731 module_put(event->desc->owner);
732 module_put(event->desc->owner);
733 goto statedump_error;
734 }
735 list_add(&event_return->list, &chan->session->events);
736 break;
737 }
738 case LTTNG_KERNEL_NOOP:
739 case LTTNG_KERNEL_SYSCALL:
740 /*
741 * Needs to be explicitly enabled after creation, since
742 * we may want to apply filters.
743 */
744 event->enabled = 0;
745 event->registered = 0;
746 event->desc = event_desc;
747 if (!event->desc) {
748 ret = -EINVAL;
749 goto register_error;
750 }
751 break;
752 case LTTNG_KERNEL_UPROBE:
753 /*
754 * Needs to be explicitly enabled after creation, since
755 * we may want to apply filters.
756 */
757 event->enabled = 0;
758 event->registered = 1;
759
760 /*
761 * Populate lttng_event structure before event
762 * registration.
763 */
764 smp_wmb();
765
766 ret = lttng_uprobes_register(event_param->name,
767 event_param->u.uprobe.fd,
768 event);
769 if (ret)
770 goto register_error;
771 ret = try_module_get(event->desc->owner);
772 WARN_ON_ONCE(!ret);
773 break;
774 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
775 default:
776 WARN_ON_ONCE(1);
777 ret = -EINVAL;
778 goto register_error;
779 }
780 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
781 WARN_ON_ONCE(ret > 0);
782 if (ret) {
783 goto statedump_error;
784 }
785 hlist_add_head(&event->hlist, head);
786 list_add(&event->list, &chan->session->events);
787 return event;
788
789 statedump_error:
790 /* If a statedump error occurs, events will not be readable. */
791 register_error:
792 kmem_cache_free(event_cache, event);
793 cache_error:
794 exist:
795 type_error:
796 full:
797 return ERR_PTR(ret);
798 }
799
800 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
801 struct lttng_kernel_event *event_param,
802 void *filter,
803 const struct lttng_event_desc *event_desc,
804 enum lttng_kernel_instrumentation itype)
805 {
806 struct lttng_event *event;
807
808 mutex_lock(&sessions_mutex);
809 event = _lttng_event_create(chan, event_param, filter, event_desc,
810 itype);
811 mutex_unlock(&sessions_mutex);
812 return event;
813 }
814
815 /* Only used for tracepoints for now. */
816 static
817 void register_event(struct lttng_event *event)
818 {
819 const struct lttng_event_desc *desc;
820 int ret = -EINVAL;
821
822 if (event->registered)
823 return;
824
825 desc = event->desc;
826 switch (event->instrumentation) {
827 case LTTNG_KERNEL_TRACEPOINT:
828 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
829 desc->probe_callback,
830 event);
831 break;
832 case LTTNG_KERNEL_SYSCALL:
833 ret = lttng_syscall_filter_enable(event->chan,
834 desc->name);
835 break;
836 case LTTNG_KERNEL_KPROBE:
837 case LTTNG_KERNEL_UPROBE:
838 case LTTNG_KERNEL_KRETPROBE:
839 case LTTNG_KERNEL_NOOP:
840 ret = 0;
841 break;
842 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
843 default:
844 WARN_ON_ONCE(1);
845 }
846 if (!ret)
847 event->registered = 1;
848 }
849
850 /*
851 * Only used internally at session destruction.
852 */
853 int _lttng_event_unregister(struct lttng_event *event)
854 {
855 const struct lttng_event_desc *desc;
856 int ret = -EINVAL;
857
858 if (!event->registered)
859 return 0;
860
861 desc = event->desc;
862 switch (event->instrumentation) {
863 case LTTNG_KERNEL_TRACEPOINT:
864 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
865 event->desc->probe_callback,
866 event);
867 break;
868 case LTTNG_KERNEL_KPROBE:
869 lttng_kprobes_unregister(event);
870 ret = 0;
871 break;
872 case LTTNG_KERNEL_KRETPROBE:
873 lttng_kretprobes_unregister(event);
874 ret = 0;
875 break;
876 case LTTNG_KERNEL_SYSCALL:
877 ret = lttng_syscall_filter_disable(event->chan,
878 desc->name);
879 break;
880 case LTTNG_KERNEL_NOOP:
881 ret = 0;
882 break;
883 case LTTNG_KERNEL_UPROBE:
884 lttng_uprobes_unregister(event);
885 ret = 0;
886 break;
887 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
888 default:
889 WARN_ON_ONCE(1);
890 }
891 if (!ret)
892 event->registered = 0;
893 return ret;
894 }
895
896 /*
897 * Only used internally at session destruction.
898 */
899 static
900 void _lttng_event_destroy(struct lttng_event *event)
901 {
902 switch (event->instrumentation) {
903 case LTTNG_KERNEL_TRACEPOINT:
904 lttng_event_put(event->desc);
905 break;
906 case LTTNG_KERNEL_KPROBE:
907 module_put(event->desc->owner);
908 lttng_kprobes_destroy_private(event);
909 break;
910 case LTTNG_KERNEL_KRETPROBE:
911 module_put(event->desc->owner);
912 lttng_kretprobes_destroy_private(event);
913 break;
914 case LTTNG_KERNEL_NOOP:
915 case LTTNG_KERNEL_SYSCALL:
916 break;
917 case LTTNG_KERNEL_UPROBE:
918 module_put(event->desc->owner);
919 lttng_uprobes_destroy_private(event);
920 break;
921 case LTTNG_KERNEL_FUNCTION: /* Fall-through */
922 default:
923 WARN_ON_ONCE(1);
924 }
925 list_del(&event->list);
926 lttng_destroy_context(event->ctx);
927 kmem_cache_free(event_cache, event);
928 }
929
930 struct lttng_id_tracker *get_tracker(struct lttng_session *session,
931 enum tracker_type tracker_type)
932 {
933 switch (tracker_type) {
934 case TRACKER_PID:
935 return &session->pid_tracker;
936 case TRACKER_VPID:
937 return &session->vpid_tracker;
938 case TRACKER_UID:
939 return &session->uid_tracker;
940 case TRACKER_VUID:
941 return &session->vuid_tracker;
942 case TRACKER_GID:
943 return &session->gid_tracker;
944 case TRACKER_VGID:
945 return &session->vgid_tracker;
946 default:
947 WARN_ON_ONCE(1);
948 return NULL;
949 }
950 }
951
952 int lttng_session_track_id(struct lttng_session *session,
953 enum tracker_type tracker_type, int id)
954 {
955 struct lttng_id_tracker *tracker;
956 int ret;
957
958 tracker = get_tracker(session, tracker_type);
959 if (!tracker)
960 return -EINVAL;
961 if (id < -1)
962 return -EINVAL;
963 mutex_lock(&sessions_mutex);
964 if (id == -1) {
965 /* track all ids: destroy tracker. */
966 lttng_id_tracker_destroy(tracker, true);
967 ret = 0;
968 } else {
969 ret = lttng_id_tracker_add(tracker, id);
970 }
971 mutex_unlock(&sessions_mutex);
972 return ret;
973 }
974
975 int lttng_session_untrack_id(struct lttng_session *session,
976 enum tracker_type tracker_type, int id)
977 {
978 struct lttng_id_tracker *tracker;
979 int ret;
980
981 tracker = get_tracker(session, tracker_type);
982 if (!tracker)
983 return -EINVAL;
984 if (id < -1)
985 return -EINVAL;
986 mutex_lock(&sessions_mutex);
987 if (id == -1) {
988 /* untrack all ids: replace by empty tracker. */
989 ret = lttng_id_tracker_empty_set(tracker);
990 } else {
991 ret = lttng_id_tracker_del(tracker, id);
992 }
993 mutex_unlock(&sessions_mutex);
994 return ret;
995 }
996
997 static
998 void *id_list_start(struct seq_file *m, loff_t *pos)
999 {
1000 struct lttng_id_tracker *id_tracker = m->private;
1001 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1002 struct lttng_id_hash_node *e;
1003 int iter = 0, i;
1004
1005 mutex_lock(&sessions_mutex);
1006 if (id_tracker_p) {
1007 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1008 struct hlist_head *head = &id_tracker_p->id_hash[i];
1009
1010 lttng_hlist_for_each_entry(e, head, hlist) {
1011 if (iter++ >= *pos)
1012 return e;
1013 }
1014 }
1015 } else {
1016 /* ID tracker disabled. */
1017 if (iter >= *pos && iter == 0) {
1018 return id_tracker_p; /* empty tracker */
1019 }
1020 iter++;
1021 }
1022 /* End of list */
1023 return NULL;
1024 }
1025
1026 /* Called with sessions_mutex held. */
1027 static
1028 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1029 {
1030 struct lttng_id_tracker *id_tracker = m->private;
1031 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1032 struct lttng_id_hash_node *e;
1033 int iter = 0, i;
1034
1035 (*ppos)++;
1036 if (id_tracker_p) {
1037 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1038 struct hlist_head *head = &id_tracker_p->id_hash[i];
1039
1040 lttng_hlist_for_each_entry(e, head, hlist) {
1041 if (iter++ >= *ppos)
1042 return e;
1043 }
1044 }
1045 } else {
1046 /* ID tracker disabled. */
1047 if (iter >= *ppos && iter == 0)
1048 return p; /* empty tracker */
1049 iter++;
1050 }
1051
1052 /* End of list */
1053 return NULL;
1054 }
1055
1056 static
1057 void id_list_stop(struct seq_file *m, void *p)
1058 {
1059 mutex_unlock(&sessions_mutex);
1060 }
1061
1062 static
1063 int id_list_show(struct seq_file *m, void *p)
1064 {
1065 struct lttng_id_tracker *id_tracker = m->private;
1066 struct lttng_id_tracker_rcu *id_tracker_p = id_tracker->p;
1067 int id;
1068
1069 if (p == id_tracker_p) {
1070 /* Tracker disabled. */
1071 id = -1;
1072 } else {
1073 const struct lttng_id_hash_node *e = p;
1074
1075 id = lttng_id_tracker_get_node_id(e);
1076 }
1077 switch (id_tracker->tracker_type) {
1078 case TRACKER_PID:
1079 seq_printf(m, "process { pid = %d; };\n", id);
1080 break;
1081 case TRACKER_VPID:
1082 seq_printf(m, "process { vpid = %d; };\n", id);
1083 break;
1084 case TRACKER_UID:
1085 seq_printf(m, "user { uid = %d; };\n", id);
1086 break;
1087 case TRACKER_VUID:
1088 seq_printf(m, "user { vuid = %d; };\n", id);
1089 break;
1090 case TRACKER_GID:
1091 seq_printf(m, "group { gid = %d; };\n", id);
1092 break;
1093 case TRACKER_VGID:
1094 seq_printf(m, "group { vgid = %d; };\n", id);
1095 break;
1096 default:
1097 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1098 }
1099 return 0;
1100 }
1101
1102 static
1103 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1104 .start = id_list_start,
1105 .next = id_list_next,
1106 .stop = id_list_stop,
1107 .show = id_list_show,
1108 };
1109
1110 static
1111 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1112 {
1113 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1114 }
1115
1116 static
1117 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1118 {
1119 struct seq_file *m = file->private_data;
1120 struct lttng_id_tracker *id_tracker = m->private;
1121 int ret;
1122
1123 WARN_ON_ONCE(!id_tracker);
1124 ret = seq_release(inode, file);
1125 if (!ret)
1126 fput(id_tracker->session->file);
1127 return ret;
1128 }
1129
1130 const struct file_operations lttng_tracker_ids_list_fops = {
1131 .owner = THIS_MODULE,
1132 .open = lttng_tracker_ids_list_open,
1133 .read = seq_read,
1134 .llseek = seq_lseek,
1135 .release = lttng_tracker_ids_list_release,
1136 };
1137
1138 int lttng_session_list_tracker_ids(struct lttng_session *session,
1139 enum tracker_type tracker_type)
1140 {
1141 struct file *tracker_ids_list_file;
1142 struct seq_file *m;
1143 int file_fd, ret;
1144
1145 file_fd = lttng_get_unused_fd();
1146 if (file_fd < 0) {
1147 ret = file_fd;
1148 goto fd_error;
1149 }
1150
1151 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1152 &lttng_tracker_ids_list_fops,
1153 NULL, O_RDWR);
1154 if (IS_ERR(tracker_ids_list_file)) {
1155 ret = PTR_ERR(tracker_ids_list_file);
1156 goto file_error;
1157 }
1158 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1159 ret = -EOVERFLOW;
1160 goto refcount_error;
1161 }
1162 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1163 if (ret < 0)
1164 goto open_error;
1165 m = tracker_ids_list_file->private_data;
1166
1167 m->private = get_tracker(session, tracker_type);
1168 BUG_ON(!m->private);
1169 fd_install(file_fd, tracker_ids_list_file);
1170
1171 return file_fd;
1172
1173 open_error:
1174 atomic_long_dec(&session->file->f_count);
1175 refcount_error:
1176 fput(tracker_ids_list_file);
1177 file_error:
1178 put_unused_fd(file_fd);
1179 fd_error:
1180 return ret;
1181 }
1182
1183 /*
1184 * Enabler management.
1185 */
1186 static
1187 int lttng_match_enabler_star_glob(const char *desc_name,
1188 const char *pattern)
1189 {
1190 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1191 desc_name, LTTNG_SIZE_MAX))
1192 return 0;
1193 return 1;
1194 }
1195
1196 static
1197 int lttng_match_enabler_name(const char *desc_name,
1198 const char *name)
1199 {
1200 if (strcmp(desc_name, name))
1201 return 0;
1202 return 1;
1203 }
1204
1205 static
1206 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1207 struct lttng_enabler *enabler)
1208 {
1209 const char *desc_name, *enabler_name;
1210
1211 enabler_name = enabler->event_param.name;
1212 switch (enabler->event_param.instrumentation) {
1213 case LTTNG_KERNEL_TRACEPOINT:
1214 desc_name = desc->name;
1215 break;
1216 case LTTNG_KERNEL_SYSCALL:
1217 desc_name = desc->name;
1218 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1219 desc_name += strlen("compat_");
1220 if (!strncmp(desc_name, "syscall_exit_",
1221 strlen("syscall_exit_"))) {
1222 desc_name += strlen("syscall_exit_");
1223 } else if (!strncmp(desc_name, "syscall_entry_",
1224 strlen("syscall_entry_"))) {
1225 desc_name += strlen("syscall_entry_");
1226 } else {
1227 WARN_ON_ONCE(1);
1228 return -EINVAL;
1229 }
1230 break;
1231 default:
1232 WARN_ON_ONCE(1);
1233 return -EINVAL;
1234 }
1235 switch (enabler->type) {
1236 case LTTNG_ENABLER_STAR_GLOB:
1237 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1238 case LTTNG_ENABLER_NAME:
1239 return lttng_match_enabler_name(desc_name, enabler_name);
1240 default:
1241 return -EINVAL;
1242 }
1243 }
1244
1245 static
1246 int lttng_event_match_enabler(struct lttng_event *event,
1247 struct lttng_enabler *enabler)
1248 {
1249 if (enabler->event_param.instrumentation != event->instrumentation)
1250 return 0;
1251 if (lttng_desc_match_enabler(event->desc, enabler)
1252 && event->chan == enabler->chan)
1253 return 1;
1254 else
1255 return 0;
1256 }
1257
1258 static
1259 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1260 struct lttng_enabler *enabler)
1261 {
1262 struct lttng_enabler_ref *enabler_ref;
1263
1264 list_for_each_entry(enabler_ref,
1265 &event->enablers_ref_head, node) {
1266 if (enabler_ref->ref == enabler)
1267 return enabler_ref;
1268 }
1269 return NULL;
1270 }
1271
1272 static
1273 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1274 {
1275 struct lttng_session *session = enabler->chan->session;
1276 struct lttng_probe_desc *probe_desc;
1277 const struct lttng_event_desc *desc;
1278 int i;
1279 struct list_head *probe_list;
1280
1281 probe_list = lttng_get_probe_list_head();
1282 /*
1283 * For each probe event, if we find that a probe event matches
1284 * our enabler, create an associated lttng_event if not
1285 * already present.
1286 */
1287 list_for_each_entry(probe_desc, probe_list, head) {
1288 for (i = 0; i < probe_desc->nr_events; i++) {
1289 int found = 0;
1290 struct hlist_head *head;
1291 const char *event_name;
1292 size_t name_len;
1293 uint32_t hash;
1294 struct lttng_event *event;
1295
1296 desc = probe_desc->event_desc[i];
1297 if (!lttng_desc_match_enabler(desc, enabler))
1298 continue;
1299 event_name = desc->name;
1300 name_len = strlen(event_name);
1301
1302 /*
1303 * Check if already created.
1304 */
1305 hash = jhash(event_name, name_len, 0);
1306 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1307 lttng_hlist_for_each_entry(event, head, hlist) {
1308 if (event->desc == desc
1309 && event->chan == enabler->chan)
1310 found = 1;
1311 }
1312 if (found)
1313 continue;
1314
1315 /*
1316 * We need to create an event for this
1317 * event probe.
1318 */
1319 event = _lttng_event_create(enabler->chan,
1320 NULL, NULL, desc,
1321 LTTNG_KERNEL_TRACEPOINT);
1322 if (!event) {
1323 printk(KERN_INFO "LTTng: Unable to create event %s\n",
1324 probe_desc->event_desc[i]->name);
1325 }
1326 }
1327 }
1328 }
1329
1330 static
1331 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1332 {
1333 int ret;
1334
1335 ret = lttng_syscalls_register(enabler->chan, NULL);
1336 WARN_ON_ONCE(ret);
1337 }
1338
1339 /*
1340 * Create struct lttng_event if it is missing and present in the list of
1341 * tracepoint probes.
1342 * Should be called with sessions mutex held.
1343 */
1344 static
1345 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1346 {
1347 switch (enabler->event_param.instrumentation) {
1348 case LTTNG_KERNEL_TRACEPOINT:
1349 lttng_create_tracepoint_if_missing(enabler);
1350 break;
1351 case LTTNG_KERNEL_SYSCALL:
1352 lttng_create_syscall_if_missing(enabler);
1353 break;
1354 default:
1355 WARN_ON_ONCE(1);
1356 break;
1357 }
1358 }
1359
1360 /*
1361 * Create events associated with an enabler (if not already present),
1362 * and add backward reference from the event to the enabler.
1363 * Should be called with sessions mutex held.
1364 */
1365 static
1366 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1367 {
1368 struct lttng_session *session = enabler->chan->session;
1369 struct lttng_event *event;
1370
1371 /* First ensure that probe events are created for this enabler. */
1372 lttng_create_event_if_missing(enabler);
1373
1374 /* For each event matching enabler in session event list. */
1375 list_for_each_entry(event, &session->events, list) {
1376 struct lttng_enabler_ref *enabler_ref;
1377
1378 if (!lttng_event_match_enabler(event, enabler))
1379 continue;
1380 enabler_ref = lttng_event_enabler_ref(event, enabler);
1381 if (!enabler_ref) {
1382 /*
1383 * If no backward ref, create it.
1384 * Add backward ref from event to enabler.
1385 */
1386 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1387 if (!enabler_ref)
1388 return -ENOMEM;
1389 enabler_ref->ref = enabler;
1390 list_add(&enabler_ref->node,
1391 &event->enablers_ref_head);
1392 }
1393
1394 /*
1395 * Link filter bytecodes if not linked yet.
1396 */
1397 lttng_enabler_event_link_bytecode(event, enabler);
1398
1399 /* TODO: merge event context. */
1400 }
1401 return 0;
1402 }
1403
1404 /*
1405 * Called at module load: connect the probe on all enablers matching
1406 * this event.
1407 * Called with sessions lock held.
1408 */
1409 int lttng_fix_pending_events(void)
1410 {
1411 struct lttng_session *session;
1412
1413 list_for_each_entry(session, &sessions, list)
1414 lttng_session_lazy_sync_enablers(session);
1415 return 0;
1416 }
1417
1418 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1419 struct lttng_kernel_event *event_param,
1420 struct lttng_channel *chan)
1421 {
1422 struct lttng_enabler *enabler;
1423
1424 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1425 if (!enabler)
1426 return NULL;
1427 enabler->type = type;
1428 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1429 memcpy(&enabler->event_param, event_param,
1430 sizeof(enabler->event_param));
1431 enabler->chan = chan;
1432 /* ctx left NULL */
1433 enabler->enabled = 0;
1434 enabler->evtype = LTTNG_TYPE_ENABLER;
1435 mutex_lock(&sessions_mutex);
1436 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1437 lttng_session_lazy_sync_enablers(enabler->chan->session);
1438 mutex_unlock(&sessions_mutex);
1439 return enabler;
1440 }
1441
1442 int lttng_enabler_enable(struct lttng_enabler *enabler)
1443 {
1444 mutex_lock(&sessions_mutex);
1445 enabler->enabled = 1;
1446 lttng_session_lazy_sync_enablers(enabler->chan->session);
1447 mutex_unlock(&sessions_mutex);
1448 return 0;
1449 }
1450
1451 int lttng_enabler_disable(struct lttng_enabler *enabler)
1452 {
1453 mutex_lock(&sessions_mutex);
1454 enabler->enabled = 0;
1455 lttng_session_lazy_sync_enablers(enabler->chan->session);
1456 mutex_unlock(&sessions_mutex);
1457 return 0;
1458 }
1459
1460 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1461 struct lttng_kernel_filter_bytecode __user *bytecode)
1462 {
1463 struct lttng_filter_bytecode_node *bytecode_node;
1464 uint32_t bytecode_len;
1465 int ret;
1466
1467 ret = get_user(bytecode_len, &bytecode->len);
1468 if (ret)
1469 return ret;
1470 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1471 GFP_KERNEL);
1472 if (!bytecode_node)
1473 return -ENOMEM;
1474 ret = copy_from_user(&bytecode_node->bc, bytecode,
1475 sizeof(*bytecode) + bytecode_len);
1476 if (ret)
1477 goto error_free;
1478 bytecode_node->enabler = enabler;
1479 /* Enforce length based on allocated size */
1480 bytecode_node->bc.len = bytecode_len;
1481 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1482 lttng_session_lazy_sync_enablers(enabler->chan->session);
1483 return 0;
1484
1485 error_free:
1486 kfree(bytecode_node);
1487 return ret;
1488 }
1489
1490 int lttng_event_add_callsite(struct lttng_event *event,
1491 struct lttng_kernel_event_callsite __user *callsite)
1492 {
1493
1494 switch (event->instrumentation) {
1495 case LTTNG_KERNEL_UPROBE:
1496 return lttng_uprobes_add_callsite(event, callsite);
1497 default:
1498 return -EINVAL;
1499 }
1500 }
1501
1502 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1503 struct lttng_kernel_context *context_param)
1504 {
1505 return -ENOSYS;
1506 }
1507
1508 static
1509 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1510 {
1511 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1512
1513 /* Destroy filter bytecode */
1514 list_for_each_entry_safe(filter_node, tmp_filter_node,
1515 &enabler->filter_bytecode_head, node) {
1516 kfree(filter_node);
1517 }
1518
1519 /* Destroy contexts */
1520 lttng_destroy_context(enabler->ctx);
1521
1522 list_del(&enabler->node);
1523 kfree(enabler);
1524 }
1525
1526 /*
1527 * lttng_session_sync_enablers should be called just before starting a
1528 * session.
1529 * Should be called with sessions mutex held.
1530 */
1531 static
1532 void lttng_session_sync_enablers(struct lttng_session *session)
1533 {
1534 struct lttng_enabler *enabler;
1535 struct lttng_event *event;
1536
1537 list_for_each_entry(enabler, &session->enablers_head, node)
1538 lttng_enabler_ref_events(enabler);
1539 /*
1540 * For each event, if at least one of its enablers is enabled,
1541 * and its channel and session transient states are enabled, we
1542 * enable the event, else we disable it.
1543 */
1544 list_for_each_entry(event, &session->events, list) {
1545 struct lttng_enabler_ref *enabler_ref;
1546 struct lttng_bytecode_runtime *runtime;
1547 int enabled = 0, has_enablers_without_bytecode = 0;
1548
1549 switch (event->instrumentation) {
1550 case LTTNG_KERNEL_TRACEPOINT:
1551 case LTTNG_KERNEL_SYSCALL:
1552 /* Enable events */
1553 list_for_each_entry(enabler_ref,
1554 &event->enablers_ref_head, node) {
1555 if (enabler_ref->ref->enabled) {
1556 enabled = 1;
1557 break;
1558 }
1559 }
1560 break;
1561 default:
1562 /* Not handled with lazy sync. */
1563 continue;
1564 }
1565 /*
1566 * Enabled state is based on union of enablers, with
1567 * intesection of session and channel transient enable
1568 * states.
1569 */
1570 enabled = enabled && session->tstate && event->chan->tstate;
1571
1572 WRITE_ONCE(event->enabled, enabled);
1573 /*
1574 * Sync tracepoint registration with event enabled
1575 * state.
1576 */
1577 if (enabled) {
1578 register_event(event);
1579 } else {
1580 _lttng_event_unregister(event);
1581 }
1582
1583 /* Check if has enablers without bytecode enabled */
1584 list_for_each_entry(enabler_ref,
1585 &event->enablers_ref_head, node) {
1586 if (enabler_ref->ref->enabled
1587 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1588 has_enablers_without_bytecode = 1;
1589 break;
1590 }
1591 }
1592 event->has_enablers_without_bytecode =
1593 has_enablers_without_bytecode;
1594
1595 /* Enable filters */
1596 list_for_each_entry(runtime,
1597 &event->bytecode_runtime_head, node)
1598 lttng_filter_sync_state(runtime);
1599 }
1600 }
1601
1602 /*
1603 * Apply enablers to session events, adding events to session if need
1604 * be. It is required after each modification applied to an active
1605 * session, and right before session "start".
1606 * "lazy" sync means we only sync if required.
1607 * Should be called with sessions mutex held.
1608 */
1609 static
1610 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1611 {
1612 /* We can skip if session is not active */
1613 if (!session->active)
1614 return;
1615 lttng_session_sync_enablers(session);
1616 }
1617
1618 /*
1619 * Serialize at most one packet worth of metadata into a metadata
1620 * channel.
1621 * We grab the metadata cache mutex to get exclusive access to our metadata
1622 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1623 * allows us to do racy operations such as looking for remaining space left in
1624 * packet and write, since mutual exclusion protects us from concurrent writes.
1625 * Mutual exclusion on the metadata cache allow us to read the cache content
1626 * without racing against reallocation of the cache by updates.
1627 * Returns the number of bytes written in the channel, 0 if no data
1628 * was written and a negative value on error.
1629 */
1630 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1631 struct channel *chan, bool *coherent)
1632 {
1633 struct lib_ring_buffer_ctx ctx;
1634 int ret = 0;
1635 size_t len, reserve_len;
1636
1637 /*
1638 * Ensure we support mutiple get_next / put sequences followed by
1639 * put_next. The metadata cache lock protects reading the metadata
1640 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1641 * "flush" operations on the buffer invoked by different processes.
1642 * Moreover, since the metadata cache memory can be reallocated, we
1643 * need to have exclusive access against updates even though we only
1644 * read it.
1645 */
1646 mutex_lock(&stream->metadata_cache->lock);
1647 WARN_ON(stream->metadata_in < stream->metadata_out);
1648 if (stream->metadata_in != stream->metadata_out)
1649 goto end;
1650
1651 /* Metadata regenerated, change the version. */
1652 if (stream->metadata_cache->version != stream->version)
1653 stream->version = stream->metadata_cache->version;
1654
1655 len = stream->metadata_cache->metadata_written -
1656 stream->metadata_in;
1657 if (!len)
1658 goto end;
1659 reserve_len = min_t(size_t,
1660 stream->transport->ops.packet_avail_size(chan),
1661 len);
1662 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1663 sizeof(char), -1);
1664 /*
1665 * If reservation failed, return an error to the caller.
1666 */
1667 ret = stream->transport->ops.event_reserve(&ctx, 0);
1668 if (ret != 0) {
1669 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1670 stream->coherent = false;
1671 goto end;
1672 }
1673 stream->transport->ops.event_write(&ctx,
1674 stream->metadata_cache->data + stream->metadata_in,
1675 reserve_len);
1676 stream->transport->ops.event_commit(&ctx);
1677 stream->metadata_in += reserve_len;
1678 if (reserve_len < len)
1679 stream->coherent = false;
1680 else
1681 stream->coherent = true;
1682 ret = reserve_len;
1683
1684 end:
1685 if (coherent)
1686 *coherent = stream->coherent;
1687 mutex_unlock(&stream->metadata_cache->lock);
1688 return ret;
1689 }
1690
1691 static
1692 void lttng_metadata_begin(struct lttng_session *session)
1693 {
1694 if (atomic_inc_return(&session->metadata_cache->producing) == 1)
1695 mutex_lock(&session->metadata_cache->lock);
1696 }
1697
1698 static
1699 void lttng_metadata_end(struct lttng_session *session)
1700 {
1701 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1702 if (atomic_dec_return(&session->metadata_cache->producing) == 0) {
1703 struct lttng_metadata_stream *stream;
1704
1705 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1706 wake_up_interruptible(&stream->read_wait);
1707 mutex_unlock(&session->metadata_cache->lock);
1708 }
1709 }
1710
1711 /*
1712 * Write the metadata to the metadata cache.
1713 * Must be called with sessions_mutex held.
1714 * The metadata cache lock protects us from concurrent read access from
1715 * thread outputting metadata content to ring buffer.
1716 * The content of the printf is printed as a single atomic metadata
1717 * transaction.
1718 */
1719 int lttng_metadata_printf(struct lttng_session *session,
1720 const char *fmt, ...)
1721 {
1722 char *str;
1723 size_t len;
1724 va_list ap;
1725
1726 WARN_ON_ONCE(!READ_ONCE(session->active));
1727
1728 va_start(ap, fmt);
1729 str = kvasprintf(GFP_KERNEL, fmt, ap);
1730 va_end(ap);
1731 if (!str)
1732 return -ENOMEM;
1733
1734 len = strlen(str);
1735 WARN_ON_ONCE(!atomic_read(&session->metadata_cache->producing));
1736 if (session->metadata_cache->metadata_written + len >
1737 session->metadata_cache->cache_alloc) {
1738 char *tmp_cache_realloc;
1739 unsigned int tmp_cache_alloc_size;
1740
1741 tmp_cache_alloc_size = max_t(unsigned int,
1742 session->metadata_cache->cache_alloc + len,
1743 session->metadata_cache->cache_alloc << 1);
1744 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
1745 if (!tmp_cache_realloc)
1746 goto err;
1747 if (session->metadata_cache->data) {
1748 memcpy(tmp_cache_realloc,
1749 session->metadata_cache->data,
1750 session->metadata_cache->cache_alloc);
1751 vfree(session->metadata_cache->data);
1752 }
1753
1754 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1755 session->metadata_cache->data = tmp_cache_realloc;
1756 }
1757 memcpy(session->metadata_cache->data +
1758 session->metadata_cache->metadata_written,
1759 str, len);
1760 session->metadata_cache->metadata_written += len;
1761 kfree(str);
1762
1763 return 0;
1764
1765 err:
1766 kfree(str);
1767 return -ENOMEM;
1768 }
1769
1770 static
1771 int print_tabs(struct lttng_session *session, size_t nesting)
1772 {
1773 size_t i;
1774
1775 for (i = 0; i < nesting; i++) {
1776 int ret;
1777
1778 ret = lttng_metadata_printf(session, " ");
1779 if (ret) {
1780 return ret;
1781 }
1782 }
1783 return 0;
1784 }
1785
1786 static
1787 int lttng_field_name_statedump(struct lttng_session *session,
1788 const struct lttng_event_field *field,
1789 size_t nesting)
1790 {
1791 return lttng_metadata_printf(session, " _%s;\n", field->name);
1792 }
1793
1794 static
1795 int _lttng_integer_type_statedump(struct lttng_session *session,
1796 const struct lttng_type *type,
1797 size_t nesting)
1798 {
1799 int ret;
1800
1801 WARN_ON_ONCE(type->atype != atype_integer);
1802 ret = print_tabs(session, nesting);
1803 if (ret)
1804 return ret;
1805 ret = lttng_metadata_printf(session,
1806 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
1807 type->u.integer.size,
1808 type->u.integer.alignment,
1809 type->u.integer.signedness,
1810 (type->u.integer.encoding == lttng_encode_none)
1811 ? "none"
1812 : (type->u.integer.encoding == lttng_encode_UTF8)
1813 ? "UTF8"
1814 : "ASCII",
1815 type->u.integer.base,
1816 #if __BYTE_ORDER == __BIG_ENDIAN
1817 type->u.integer.reverse_byte_order ? " byte_order = le;" : ""
1818 #else
1819 type->u.integer.reverse_byte_order ? " byte_order = be;" : ""
1820 #endif
1821 );
1822 return ret;
1823 }
1824
1825 /*
1826 * Must be called with sessions_mutex held.
1827 */
1828 static
1829 int _lttng_struct_type_statedump(struct lttng_session *session,
1830 const struct lttng_type *type,
1831 size_t nesting)
1832 {
1833 int ret;
1834 uint32_t i, nr_fields;
1835 unsigned int alignment;
1836
1837 WARN_ON_ONCE(type->atype != atype_struct_nestable);
1838
1839 ret = print_tabs(session, nesting);
1840 if (ret)
1841 return ret;
1842 ret = lttng_metadata_printf(session,
1843 "struct {\n");
1844 if (ret)
1845 return ret;
1846 nr_fields = type->u.struct_nestable.nr_fields;
1847 for (i = 0; i < nr_fields; i++) {
1848 const struct lttng_event_field *iter_field;
1849
1850 iter_field = &type->u.struct_nestable.fields[i];
1851 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1852 if (ret)
1853 return ret;
1854 }
1855 ret = print_tabs(session, nesting);
1856 if (ret)
1857 return ret;
1858 alignment = type->u.struct_nestable.alignment;
1859 if (alignment) {
1860 ret = lttng_metadata_printf(session,
1861 "} align(%u)",
1862 alignment);
1863 } else {
1864 ret = lttng_metadata_printf(session,
1865 "}");
1866 }
1867 return ret;
1868 }
1869
1870 /*
1871 * Must be called with sessions_mutex held.
1872 */
1873 static
1874 int _lttng_struct_field_statedump(struct lttng_session *session,
1875 const struct lttng_event_field *field,
1876 size_t nesting)
1877 {
1878 int ret;
1879
1880 ret = _lttng_struct_type_statedump(session,
1881 &field->type, nesting);
1882 if (ret)
1883 return ret;
1884 return lttng_field_name_statedump(session, field, nesting);
1885 }
1886
1887 /*
1888 * Must be called with sessions_mutex held.
1889 */
1890 static
1891 int _lttng_variant_type_statedump(struct lttng_session *session,
1892 const struct lttng_type *type,
1893 size_t nesting)
1894 {
1895 int ret;
1896 uint32_t i, nr_choices;
1897
1898 WARN_ON_ONCE(type->atype != atype_variant_nestable);
1899 /*
1900 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
1901 */
1902 if (type->u.variant_nestable.alignment != 0)
1903 return -EINVAL;
1904 ret = print_tabs(session, nesting);
1905 if (ret)
1906 return ret;
1907 ret = lttng_metadata_printf(session,
1908 "variant <_%s> {\n",
1909 type->u.variant_nestable.tag_name);
1910 if (ret)
1911 return ret;
1912 nr_choices = type->u.variant_nestable.nr_choices;
1913 for (i = 0; i < nr_choices; i++) {
1914 const struct lttng_event_field *iter_field;
1915
1916 iter_field = &type->u.variant_nestable.choices[i];
1917 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1918 if (ret)
1919 return ret;
1920 }
1921 ret = print_tabs(session, nesting);
1922 if (ret)
1923 return ret;
1924 ret = lttng_metadata_printf(session,
1925 "}");
1926 return ret;
1927 }
1928
1929 /*
1930 * Must be called with sessions_mutex held.
1931 */
1932 static
1933 int _lttng_variant_field_statedump(struct lttng_session *session,
1934 const struct lttng_event_field *field,
1935 size_t nesting)
1936 {
1937 int ret;
1938
1939 ret = _lttng_variant_type_statedump(session,
1940 &field->type, nesting);
1941 if (ret)
1942 return ret;
1943 return lttng_field_name_statedump(session, field, nesting);
1944 }
1945
1946 /*
1947 * Must be called with sessions_mutex held.
1948 */
1949 static
1950 int _lttng_array_field_statedump(struct lttng_session *session,
1951 const struct lttng_event_field *field,
1952 size_t nesting)
1953 {
1954 int ret;
1955 const struct lttng_type *elem_type;
1956
1957 WARN_ON_ONCE(field->type.atype != atype_array_nestable);
1958
1959 if (field->type.u.array_nestable.alignment) {
1960 ret = print_tabs(session, nesting);
1961 if (ret)
1962 return ret;
1963 ret = lttng_metadata_printf(session,
1964 "struct { } align(%u) _%s_padding;\n",
1965 field->type.u.array_nestable.alignment * CHAR_BIT,
1966 field->name);
1967 if (ret)
1968 return ret;
1969 }
1970 /*
1971 * Nested compound types: Only array of structures and variants are
1972 * currently supported.
1973 */
1974 elem_type = field->type.u.array_nestable.elem_type;
1975 switch (elem_type->atype) {
1976 case atype_integer:
1977 case atype_struct_nestable:
1978 case atype_variant_nestable:
1979 ret = _lttng_type_statedump(session, elem_type, nesting);
1980 if (ret)
1981 return ret;
1982 break;
1983
1984 default:
1985 return -EINVAL;
1986 }
1987 ret = lttng_metadata_printf(session,
1988 " _%s[%u];\n",
1989 field->name,
1990 field->type.u.array_nestable.length);
1991 return ret;
1992 }
1993
1994 /*
1995 * Must be called with sessions_mutex held.
1996 */
1997 static
1998 int _lttng_sequence_field_statedump(struct lttng_session *session,
1999 const struct lttng_event_field *field,
2000 size_t nesting)
2001 {
2002 int ret;
2003 const char *length_name;
2004 const struct lttng_type *elem_type;
2005
2006 WARN_ON_ONCE(field->type.atype != atype_sequence_nestable);
2007
2008 length_name = field->type.u.sequence_nestable.length_name;
2009
2010 if (field->type.u.sequence_nestable.alignment) {
2011 ret = print_tabs(session, nesting);
2012 if (ret)
2013 return ret;
2014 ret = lttng_metadata_printf(session,
2015 "struct { } align(%u) _%s_padding;\n",
2016 field->type.u.sequence_nestable.alignment * CHAR_BIT,
2017 field->name);
2018 if (ret)
2019 return ret;
2020 }
2021
2022 /*
2023 * Nested compound types: Only array of structures and variants are
2024 * currently supported.
2025 */
2026 elem_type = field->type.u.sequence_nestable.elem_type;
2027 switch (elem_type->atype) {
2028 case atype_integer:
2029 case atype_struct_nestable:
2030 case atype_variant_nestable:
2031 ret = _lttng_type_statedump(session, elem_type, nesting);
2032 if (ret)
2033 return ret;
2034 break;
2035
2036 default:
2037 return -EINVAL;
2038 }
2039 ret = lttng_metadata_printf(session,
2040 " _%s[ _%s ];\n",
2041 field->name,
2042 field->type.u.sequence_nestable.length_name);
2043 return ret;
2044 }
2045
2046 /*
2047 * Must be called with sessions_mutex held.
2048 */
2049 static
2050 int _lttng_enum_type_statedump(struct lttng_session *session,
2051 const struct lttng_type *type,
2052 size_t nesting)
2053 {
2054 const struct lttng_enum_desc *enum_desc;
2055 const struct lttng_type *container_type;
2056 int ret;
2057 unsigned int i, nr_entries;
2058
2059 container_type = type->u.enum_nestable.container_type;
2060 if (container_type->atype != atype_integer) {
2061 ret = -EINVAL;
2062 goto end;
2063 }
2064 enum_desc = type->u.enum_nestable.desc;
2065 nr_entries = enum_desc->nr_entries;
2066
2067 ret = print_tabs(session, nesting);
2068 if (ret)
2069 goto end;
2070 ret = lttng_metadata_printf(session, "enum : ");
2071 if (ret)
2072 goto end;
2073 ret = _lttng_integer_type_statedump(session, container_type, 0);
2074 if (ret)
2075 goto end;
2076 ret = lttng_metadata_printf(session, " {\n");
2077 if (ret)
2078 goto end;
2079 /* Dump all entries */
2080 for (i = 0; i < nr_entries; i++) {
2081 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
2082 int j, len;
2083
2084 ret = print_tabs(session, nesting + 1);
2085 if (ret)
2086 goto end;
2087 ret = lttng_metadata_printf(session,
2088 "\"");
2089 if (ret)
2090 goto end;
2091 len = strlen(entry->string);
2092 /* Escape the character '"' */
2093 for (j = 0; j < len; j++) {
2094 char c = entry->string[j];
2095
2096 switch (c) {
2097 case '"':
2098 ret = lttng_metadata_printf(session,
2099 "\\\"");
2100 break;
2101 case '\\':
2102 ret = lttng_metadata_printf(session,
2103 "\\\\");
2104 break;
2105 default:
2106 ret = lttng_metadata_printf(session,
2107 "%c", c);
2108 break;
2109 }
2110 if (ret)
2111 goto end;
2112 }
2113 ret = lttng_metadata_printf(session, "\"");
2114 if (ret)
2115 goto end;
2116
2117 if (entry->options.is_auto) {
2118 ret = lttng_metadata_printf(session, ",\n");
2119 if (ret)
2120 goto end;
2121 } else {
2122 ret = lttng_metadata_printf(session,
2123 " = ");
2124 if (ret)
2125 goto end;
2126 if (entry->start.signedness)
2127 ret = lttng_metadata_printf(session,
2128 "%lld", (long long) entry->start.value);
2129 else
2130 ret = lttng_metadata_printf(session,
2131 "%llu", entry->start.value);
2132 if (ret)
2133 goto end;
2134 if (entry->start.signedness == entry->end.signedness &&
2135 entry->start.value
2136 == entry->end.value) {
2137 ret = lttng_metadata_printf(session,
2138 ",\n");
2139 } else {
2140 if (entry->end.signedness) {
2141 ret = lttng_metadata_printf(session,
2142 " ... %lld,\n",
2143 (long long) entry->end.value);
2144 } else {
2145 ret = lttng_metadata_printf(session,
2146 " ... %llu,\n",
2147 entry->end.value);
2148 }
2149 }
2150 if (ret)
2151 goto end;
2152 }
2153 }
2154 ret = print_tabs(session, nesting);
2155 if (ret)
2156 goto end;
2157 ret = lttng_metadata_printf(session, "}");
2158 end:
2159 return ret;
2160 }
2161
2162 /*
2163 * Must be called with sessions_mutex held.
2164 */
2165 static
2166 int _lttng_enum_field_statedump(struct lttng_session *session,
2167 const struct lttng_event_field *field,
2168 size_t nesting)
2169 {
2170 int ret;
2171
2172 ret = _lttng_enum_type_statedump(session, &field->type, nesting);
2173 if (ret)
2174 return ret;
2175 return lttng_field_name_statedump(session, field, nesting);
2176 }
2177
2178 static
2179 int _lttng_integer_field_statedump(struct lttng_session *session,
2180 const struct lttng_event_field *field,
2181 size_t nesting)
2182 {
2183 int ret;
2184
2185 ret = _lttng_integer_type_statedump(session, &field->type, nesting);
2186 if (ret)
2187 return ret;
2188 return lttng_field_name_statedump(session, field, nesting);
2189 }
2190
2191 static
2192 int _lttng_string_type_statedump(struct lttng_session *session,
2193 const struct lttng_type *type,
2194 size_t nesting)
2195 {
2196 int ret;
2197
2198 WARN_ON_ONCE(type->atype != atype_string);
2199 /* Default encoding is UTF8 */
2200 ret = print_tabs(session, nesting);
2201 if (ret)
2202 return ret;
2203 ret = lttng_metadata_printf(session,
2204 "string%s",
2205 type->u.string.encoding == lttng_encode_ASCII ?
2206 " { encoding = ASCII; }" : "");
2207 return ret;
2208 }
2209
2210 static
2211 int _lttng_string_field_statedump(struct lttng_session *session,
2212 const struct lttng_event_field *field,
2213 size_t nesting)
2214 {
2215 int ret;
2216
2217 WARN_ON_ONCE(field->type.atype != atype_string);
2218 ret = _lttng_string_type_statedump(session, &field->type, nesting);
2219 if (ret)
2220 return ret;
2221 return lttng_field_name_statedump(session, field, nesting);
2222 }
2223
2224 /*
2225 * Must be called with sessions_mutex held.
2226 */
2227 static
2228 int _lttng_type_statedump(struct lttng_session *session,
2229 const struct lttng_type *type,
2230 size_t nesting)
2231 {
2232 int ret = 0;
2233
2234 switch (type->atype) {
2235 case atype_integer:
2236 ret = _lttng_integer_type_statedump(session, type, nesting);
2237 break;
2238 case atype_enum_nestable:
2239 ret = _lttng_enum_type_statedump(session, type, nesting);
2240 break;
2241 case atype_string:
2242 ret = _lttng_string_type_statedump(session, type, nesting);
2243 break;
2244 case atype_struct_nestable:
2245 ret = _lttng_struct_type_statedump(session, type, nesting);
2246 break;
2247 case atype_variant_nestable:
2248 ret = _lttng_variant_type_statedump(session, type, nesting);
2249 break;
2250
2251 /* Nested arrays and sequences are not supported yet. */
2252 case atype_array_nestable:
2253 case atype_sequence_nestable:
2254 default:
2255 WARN_ON_ONCE(1);
2256 return -EINVAL;
2257 }
2258 return ret;
2259 }
2260
2261 /*
2262 * Must be called with sessions_mutex held.
2263 */
2264 static
2265 int _lttng_field_statedump(struct lttng_session *session,
2266 const struct lttng_event_field *field,
2267 size_t nesting)
2268 {
2269 int ret = 0;
2270
2271 switch (field->type.atype) {
2272 case atype_integer:
2273 ret = _lttng_integer_field_statedump(session, field, nesting);
2274 break;
2275 case atype_enum_nestable:
2276 ret = _lttng_enum_field_statedump(session, field, nesting);
2277 break;
2278 case atype_string:
2279 ret = _lttng_string_field_statedump(session, field, nesting);
2280 break;
2281 case atype_struct_nestable:
2282 ret = _lttng_struct_field_statedump(session, field, nesting);
2283 break;
2284 case atype_array_nestable:
2285 ret = _lttng_array_field_statedump(session, field, nesting);
2286 break;
2287 case atype_sequence_nestable:
2288 ret = _lttng_sequence_field_statedump(session, field, nesting);
2289 break;
2290 case atype_variant_nestable:
2291 ret = _lttng_variant_field_statedump(session, field, nesting);
2292 break;
2293
2294 default:
2295 WARN_ON_ONCE(1);
2296 return -EINVAL;
2297 }
2298 return ret;
2299 }
2300
2301 static
2302 int _lttng_context_metadata_statedump(struct lttng_session *session,
2303 struct lttng_ctx *ctx)
2304 {
2305 int ret = 0;
2306 int i;
2307
2308 if (!ctx)
2309 return 0;
2310 for (i = 0; i < ctx->nr_fields; i++) {
2311 const struct lttng_ctx_field *field = &ctx->fields[i];
2312
2313 ret = _lttng_field_statedump(session, &field->event_field, 2);
2314 if (ret)
2315 return ret;
2316 }
2317 return ret;
2318 }
2319
2320 static
2321 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2322 struct lttng_event *event)
2323 {
2324 const struct lttng_event_desc *desc = event->desc;
2325 int ret = 0;
2326 int i;
2327
2328 for (i = 0; i < desc->nr_fields; i++) {
2329 const struct lttng_event_field *field = &desc->fields[i];
2330
2331 ret = _lttng_field_statedump(session, field, 2);
2332 if (ret)
2333 return ret;
2334 }
2335 return ret;
2336 }
2337
2338 /*
2339 * Must be called with sessions_mutex held.
2340 * The entire event metadata is printed as a single atomic metadata
2341 * transaction.
2342 */
2343 static
2344 int _lttng_event_metadata_statedump(struct lttng_session *session,
2345 struct lttng_channel *chan,
2346 struct lttng_event *event)
2347 {
2348 int ret = 0;
2349
2350 if (event->metadata_dumped || !READ_ONCE(session->active))
2351 return 0;
2352 if (chan->channel_type == METADATA_CHANNEL)
2353 return 0;
2354
2355 lttng_metadata_begin(session);
2356
2357 ret = lttng_metadata_printf(session,
2358 "event {\n"
2359 " name = \"%s\";\n"
2360 " id = %u;\n"
2361 " stream_id = %u;\n",
2362 event->desc->name,
2363 event->id,
2364 event->chan->id);
2365 if (ret)
2366 goto end;
2367
2368 if (event->ctx) {
2369 ret = lttng_metadata_printf(session,
2370 " context := struct {\n");
2371 if (ret)
2372 goto end;
2373 }
2374 ret = _lttng_context_metadata_statedump(session, event->ctx);
2375 if (ret)
2376 goto end;
2377 if (event->ctx) {
2378 ret = lttng_metadata_printf(session,
2379 " };\n");
2380 if (ret)
2381 goto end;
2382 }
2383
2384 ret = lttng_metadata_printf(session,
2385 " fields := struct {\n"
2386 );
2387 if (ret)
2388 goto end;
2389
2390 ret = _lttng_fields_metadata_statedump(session, event);
2391 if (ret)
2392 goto end;
2393
2394 /*
2395 * LTTng space reservation can only reserve multiples of the
2396 * byte size.
2397 */
2398 ret = lttng_metadata_printf(session,
2399 " };\n"
2400 "};\n\n");
2401 if (ret)
2402 goto end;
2403
2404 event->metadata_dumped = 1;
2405 end:
2406 lttng_metadata_end(session);
2407 return ret;
2408
2409 }
2410
2411 /*
2412 * Must be called with sessions_mutex held.
2413 * The entire channel metadata is printed as a single atomic metadata
2414 * transaction.
2415 */
2416 static
2417 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2418 struct lttng_channel *chan)
2419 {
2420 int ret = 0;
2421
2422 if (chan->metadata_dumped || !READ_ONCE(session->active))
2423 return 0;
2424
2425 if (chan->channel_type == METADATA_CHANNEL)
2426 return 0;
2427
2428 lttng_metadata_begin(session);
2429
2430 WARN_ON_ONCE(!chan->header_type);
2431 ret = lttng_metadata_printf(session,
2432 "stream {\n"
2433 " id = %u;\n"
2434 " event.header := %s;\n"
2435 " packet.context := struct packet_context;\n",
2436 chan->id,
2437 chan->header_type == 1 ? "struct event_header_compact" :
2438 "struct event_header_large");
2439 if (ret)
2440 goto end;
2441
2442 if (chan->ctx) {
2443 ret = lttng_metadata_printf(session,
2444 " event.context := struct {\n");
2445 if (ret)
2446 goto end;
2447 }
2448 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2449 if (ret)
2450 goto end;
2451 if (chan->ctx) {
2452 ret = lttng_metadata_printf(session,
2453 " };\n");
2454 if (ret)
2455 goto end;
2456 }
2457
2458 ret = lttng_metadata_printf(session,
2459 "};\n\n");
2460
2461 chan->metadata_dumped = 1;
2462 end:
2463 lttng_metadata_end(session);
2464 return ret;
2465 }
2466
2467 /*
2468 * Must be called with sessions_mutex held.
2469 */
2470 static
2471 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2472 {
2473 return lttng_metadata_printf(session,
2474 "struct packet_context {\n"
2475 " uint64_clock_monotonic_t timestamp_begin;\n"
2476 " uint64_clock_monotonic_t timestamp_end;\n"
2477 " uint64_t content_size;\n"
2478 " uint64_t packet_size;\n"
2479 " uint64_t packet_seq_num;\n"
2480 " unsigned long events_discarded;\n"
2481 " uint32_t cpu_id;\n"
2482 "};\n\n"
2483 );
2484 }
2485
2486 /*
2487 * Compact header:
2488 * id: range: 0 - 30.
2489 * id 31 is reserved to indicate an extended header.
2490 *
2491 * Large header:
2492 * id: range: 0 - 65534.
2493 * id 65535 is reserved to indicate an extended header.
2494 *
2495 * Must be called with sessions_mutex held.
2496 */
2497 static
2498 int _lttng_event_header_declare(struct lttng_session *session)
2499 {
2500 return lttng_metadata_printf(session,
2501 "struct event_header_compact {\n"
2502 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2503 " variant <id> {\n"
2504 " struct {\n"
2505 " uint27_clock_monotonic_t timestamp;\n"
2506 " } compact;\n"
2507 " struct {\n"
2508 " uint32_t id;\n"
2509 " uint64_clock_monotonic_t timestamp;\n"
2510 " } extended;\n"
2511 " } v;\n"
2512 "} align(%u);\n"
2513 "\n"
2514 "struct event_header_large {\n"
2515 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2516 " variant <id> {\n"
2517 " struct {\n"
2518 " uint32_clock_monotonic_t timestamp;\n"
2519 " } compact;\n"
2520 " struct {\n"
2521 " uint32_t id;\n"
2522 " uint64_clock_monotonic_t timestamp;\n"
2523 " } extended;\n"
2524 " } v;\n"
2525 "} align(%u);\n\n",
2526 lttng_alignof(uint32_t) * CHAR_BIT,
2527 lttng_alignof(uint16_t) * CHAR_BIT
2528 );
2529 }
2530
2531 /*
2532 * Approximation of NTP time of day to clock monotonic correlation,
2533 * taken at start of trace.
2534 * Yes, this is only an approximation. Yes, we can (and will) do better
2535 * in future versions.
2536 * This function may return a negative offset. It may happen if the
2537 * system sets the REALTIME clock to 0 after boot.
2538 *
2539 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2540 * y2038 compliant.
2541 */
2542 static
2543 int64_t measure_clock_offset(void)
2544 {
2545 uint64_t monotonic_avg, monotonic[2], realtime;
2546 uint64_t tcf = trace_clock_freq();
2547 int64_t offset;
2548 unsigned long flags;
2549 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2550 struct timespec64 rts = { 0, 0 };
2551 #else
2552 struct timespec rts = { 0, 0 };
2553 #endif
2554
2555 /* Disable interrupts to increase correlation precision. */
2556 local_irq_save(flags);
2557 monotonic[0] = trace_clock_read64();
2558 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2559 ktime_get_real_ts64(&rts);
2560 #else
2561 getnstimeofday(&rts);
2562 #endif
2563 monotonic[1] = trace_clock_read64();
2564 local_irq_restore(flags);
2565
2566 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2567 realtime = (uint64_t) rts.tv_sec * tcf;
2568 if (tcf == NSEC_PER_SEC) {
2569 realtime += rts.tv_nsec;
2570 } else {
2571 uint64_t n = rts.tv_nsec * tcf;
2572
2573 do_div(n, NSEC_PER_SEC);
2574 realtime += n;
2575 }
2576 offset = (int64_t) realtime - monotonic_avg;
2577 return offset;
2578 }
2579
2580 static
2581 int print_escaped_ctf_string(struct lttng_session *session, const char *string)
2582 {
2583 int ret = 0;
2584 size_t i;
2585 char cur;
2586
2587 i = 0;
2588 cur = string[i];
2589 while (cur != '\0') {
2590 switch (cur) {
2591 case '\n':
2592 ret = lttng_metadata_printf(session, "%s", "\\n");
2593 break;
2594 case '\\':
2595 case '"':
2596 ret = lttng_metadata_printf(session, "%c", '\\');
2597 if (ret)
2598 goto error;
2599 /* We still print the current char */
2600 /* Fallthrough */
2601 default:
2602 ret = lttng_metadata_printf(session, "%c", cur);
2603 break;
2604 }
2605
2606 if (ret)
2607 goto error;
2608
2609 cur = string[++i];
2610 }
2611 error:
2612 return ret;
2613 }
2614
2615 static
2616 int print_metadata_escaped_field(struct lttng_session *session, const char *field,
2617 const char *field_value)
2618 {
2619 int ret;
2620
2621 ret = lttng_metadata_printf(session, " %s = \"", field);
2622 if (ret)
2623 goto error;
2624
2625 ret = print_escaped_ctf_string(session, field_value);
2626 if (ret)
2627 goto error;
2628
2629 ret = lttng_metadata_printf(session, "\";\n");
2630
2631 error:
2632 return ret;
2633 }
2634
2635 /*
2636 * Output metadata into this session's metadata buffers.
2637 * Must be called with sessions_mutex held.
2638 */
2639 static
2640 int _lttng_session_metadata_statedump(struct lttng_session *session)
2641 {
2642 unsigned char *uuid_c = session->uuid.b;
2643 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2644 const char *product_uuid;
2645 struct lttng_channel *chan;
2646 struct lttng_event *event;
2647 int ret = 0;
2648
2649 if (!READ_ONCE(session->active))
2650 return 0;
2651
2652 lttng_metadata_begin(session);
2653
2654 if (session->metadata_dumped)
2655 goto skip_session;
2656
2657 snprintf(uuid_s, sizeof(uuid_s),
2658 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2659 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2660 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2661 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2662 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2663
2664 ret = lttng_metadata_printf(session,
2665 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2666 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2667 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2668 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2669 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2670 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2671 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2672 "\n"
2673 "trace {\n"
2674 " major = %u;\n"
2675 " minor = %u;\n"
2676 " uuid = \"%s\";\n"
2677 " byte_order = %s;\n"
2678 " packet.header := struct {\n"
2679 " uint32_t magic;\n"
2680 " uint8_t uuid[16];\n"
2681 " uint32_t stream_id;\n"
2682 " uint64_t stream_instance_id;\n"
2683 " };\n"
2684 "};\n\n",
2685 lttng_alignof(uint8_t) * CHAR_BIT,
2686 lttng_alignof(uint16_t) * CHAR_BIT,
2687 lttng_alignof(uint32_t) * CHAR_BIT,
2688 lttng_alignof(uint64_t) * CHAR_BIT,
2689 sizeof(unsigned long) * CHAR_BIT,
2690 lttng_alignof(unsigned long) * CHAR_BIT,
2691 CTF_SPEC_MAJOR,
2692 CTF_SPEC_MINOR,
2693 uuid_s,
2694 #if __BYTE_ORDER == __BIG_ENDIAN
2695 "be"
2696 #else
2697 "le"
2698 #endif
2699 );
2700 if (ret)
2701 goto end;
2702
2703 ret = lttng_metadata_printf(session,
2704 "env {\n"
2705 " hostname = \"%s\";\n"
2706 " domain = \"kernel\";\n"
2707 " sysname = \"%s\";\n"
2708 " kernel_release = \"%s\";\n"
2709 " kernel_version = \"%s\";\n"
2710 " tracer_name = \"lttng-modules\";\n"
2711 " tracer_major = %d;\n"
2712 " tracer_minor = %d;\n"
2713 " tracer_patchlevel = %d;\n"
2714 " trace_buffering_scheme = \"global\";\n",
2715 current->nsproxy->uts_ns->name.nodename,
2716 utsname()->sysname,
2717 utsname()->release,
2718 utsname()->version,
2719 LTTNG_MODULES_MAJOR_VERSION,
2720 LTTNG_MODULES_MINOR_VERSION,
2721 LTTNG_MODULES_PATCHLEVEL_VERSION
2722 );
2723 if (ret)
2724 goto end;
2725
2726 ret = print_metadata_escaped_field(session, "trace_name", session->name);
2727 if (ret)
2728 goto end;
2729 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
2730 session->creation_time);
2731 if (ret)
2732 goto end;
2733
2734 /* Add the product UUID to the 'env' section */
2735 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
2736 if (product_uuid) {
2737 ret = lttng_metadata_printf(session,
2738 " product_uuid = \"%s\";\n",
2739 product_uuid
2740 );
2741 if (ret)
2742 goto end;
2743 }
2744
2745 /* Close the 'env' section */
2746 ret = lttng_metadata_printf(session, "};\n\n");
2747 if (ret)
2748 goto end;
2749
2750 ret = lttng_metadata_printf(session,
2751 "clock {\n"
2752 " name = \"%s\";\n",
2753 trace_clock_name()
2754 );
2755 if (ret)
2756 goto end;
2757
2758 if (!trace_clock_uuid(clock_uuid_s)) {
2759 ret = lttng_metadata_printf(session,
2760 " uuid = \"%s\";\n",
2761 clock_uuid_s
2762 );
2763 if (ret)
2764 goto end;
2765 }
2766
2767 ret = lttng_metadata_printf(session,
2768 " description = \"%s\";\n"
2769 " freq = %llu; /* Frequency, in Hz */\n"
2770 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2771 " offset = %lld;\n"
2772 "};\n\n",
2773 trace_clock_description(),
2774 (unsigned long long) trace_clock_freq(),
2775 (long long) measure_clock_offset()
2776 );
2777 if (ret)
2778 goto end;
2779
2780 ret = lttng_metadata_printf(session,
2781 "typealias integer {\n"
2782 " size = 27; align = 1; signed = false;\n"
2783 " map = clock.%s.value;\n"
2784 "} := uint27_clock_monotonic_t;\n"
2785 "\n"
2786 "typealias integer {\n"
2787 " size = 32; align = %u; signed = false;\n"
2788 " map = clock.%s.value;\n"
2789 "} := uint32_clock_monotonic_t;\n"
2790 "\n"
2791 "typealias integer {\n"
2792 " size = 64; align = %u; signed = false;\n"
2793 " map = clock.%s.value;\n"
2794 "} := uint64_clock_monotonic_t;\n\n",
2795 trace_clock_name(),
2796 lttng_alignof(uint32_t) * CHAR_BIT,
2797 trace_clock_name(),
2798 lttng_alignof(uint64_t) * CHAR_BIT,
2799 trace_clock_name()
2800 );
2801 if (ret)
2802 goto end;
2803
2804 ret = _lttng_stream_packet_context_declare(session);
2805 if (ret)
2806 goto end;
2807
2808 ret = _lttng_event_header_declare(session);
2809 if (ret)
2810 goto end;
2811
2812 skip_session:
2813 list_for_each_entry(chan, &session->chan, list) {
2814 ret = _lttng_channel_metadata_statedump(session, chan);
2815 if (ret)
2816 goto end;
2817 }
2818
2819 list_for_each_entry(event, &session->events, list) {
2820 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2821 if (ret)
2822 goto end;
2823 }
2824 session->metadata_dumped = 1;
2825 end:
2826 lttng_metadata_end(session);
2827 return ret;
2828 }
2829
2830 /**
2831 * lttng_transport_register - LTT transport registration
2832 * @transport: transport structure
2833 *
2834 * Registers a transport which can be used as output to extract the data out of
2835 * LTTng. The module calling this registration function must ensure that no
2836 * trap-inducing code will be executed by the transport functions. E.g.
2837 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
2838 * is made visible to the transport function. This registration acts as a
2839 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
2840 * after its registration must it synchronize the TLBs.
2841 */
2842 void lttng_transport_register(struct lttng_transport *transport)
2843 {
2844 /*
2845 * Make sure no page fault can be triggered by the module about to be
2846 * registered. We deal with this here so we don't have to call
2847 * vmalloc_sync_mappings() in each module's init.
2848 */
2849 wrapper_vmalloc_sync_mappings();
2850
2851 mutex_lock(&sessions_mutex);
2852 list_add_tail(&transport->node, &lttng_transport_list);
2853 mutex_unlock(&sessions_mutex);
2854 }
2855 EXPORT_SYMBOL_GPL(lttng_transport_register);
2856
2857 /**
2858 * lttng_transport_unregister - LTT transport unregistration
2859 * @transport: transport structure
2860 */
2861 void lttng_transport_unregister(struct lttng_transport *transport)
2862 {
2863 mutex_lock(&sessions_mutex);
2864 list_del(&transport->node);
2865 mutex_unlock(&sessions_mutex);
2866 }
2867 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2868
2869 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
2870
2871 enum cpuhp_state lttng_hp_prepare;
2872 enum cpuhp_state lttng_hp_online;
2873
2874 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2875 {
2876 struct lttng_cpuhp_node *lttng_node;
2877
2878 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2879 switch (lttng_node->component) {
2880 case LTTNG_RING_BUFFER_FRONTEND:
2881 return 0;
2882 case LTTNG_RING_BUFFER_BACKEND:
2883 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2884 case LTTNG_RING_BUFFER_ITER:
2885 return 0;
2886 case LTTNG_CONTEXT_PERF_COUNTERS:
2887 return 0;
2888 default:
2889 return -EINVAL;
2890 }
2891 }
2892
2893 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2894 {
2895 struct lttng_cpuhp_node *lttng_node;
2896
2897 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2898 switch (lttng_node->component) {
2899 case LTTNG_RING_BUFFER_FRONTEND:
2900 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2901 case LTTNG_RING_BUFFER_BACKEND:
2902 return 0;
2903 case LTTNG_RING_BUFFER_ITER:
2904 return 0;
2905 case LTTNG_CONTEXT_PERF_COUNTERS:
2906 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2907 default:
2908 return -EINVAL;
2909 }
2910 }
2911
2912 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2913 {
2914 struct lttng_cpuhp_node *lttng_node;
2915
2916 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2917 switch (lttng_node->component) {
2918 case LTTNG_RING_BUFFER_FRONTEND:
2919 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2920 case LTTNG_RING_BUFFER_BACKEND:
2921 return 0;
2922 case LTTNG_RING_BUFFER_ITER:
2923 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2924 case LTTNG_CONTEXT_PERF_COUNTERS:
2925 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2926 default:
2927 return -EINVAL;
2928 }
2929 }
2930
2931 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2932 {
2933 struct lttng_cpuhp_node *lttng_node;
2934
2935 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2936 switch (lttng_node->component) {
2937 case LTTNG_RING_BUFFER_FRONTEND:
2938 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2939 case LTTNG_RING_BUFFER_BACKEND:
2940 return 0;
2941 case LTTNG_RING_BUFFER_ITER:
2942 return 0;
2943 case LTTNG_CONTEXT_PERF_COUNTERS:
2944 return 0;
2945 default:
2946 return -EINVAL;
2947 }
2948 }
2949
2950 static int __init lttng_init_cpu_hotplug(void)
2951 {
2952 int ret;
2953
2954 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2955 lttng_hotplug_prepare,
2956 lttng_hotplug_dead);
2957 if (ret < 0) {
2958 return ret;
2959 }
2960 lttng_hp_prepare = ret;
2961 lttng_rb_set_hp_prepare(ret);
2962
2963 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2964 lttng_hotplug_online,
2965 lttng_hotplug_offline);
2966 if (ret < 0) {
2967 cpuhp_remove_multi_state(lttng_hp_prepare);
2968 lttng_hp_prepare = 0;
2969 return ret;
2970 }
2971 lttng_hp_online = ret;
2972 lttng_rb_set_hp_online(ret);
2973
2974 return 0;
2975 }
2976
2977 static void __exit lttng_exit_cpu_hotplug(void)
2978 {
2979 lttng_rb_set_hp_online(0);
2980 cpuhp_remove_multi_state(lttng_hp_online);
2981 lttng_rb_set_hp_prepare(0);
2982 cpuhp_remove_multi_state(lttng_hp_prepare);
2983 }
2984
2985 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2986 static int lttng_init_cpu_hotplug(void)
2987 {
2988 return 0;
2989 }
2990 static void lttng_exit_cpu_hotplug(void)
2991 {
2992 }
2993 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2994
2995
2996 static int __init lttng_events_init(void)
2997 {
2998 int ret;
2999
3000 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
3001 if (ret)
3002 return ret;
3003 ret = wrapper_get_pfnblock_flags_mask_init();
3004 if (ret)
3005 return ret;
3006 ret = wrapper_get_pageblock_flags_mask_init();
3007 if (ret)
3008 return ret;
3009 ret = lttng_probes_init();
3010 if (ret)
3011 return ret;
3012 ret = lttng_context_init();
3013 if (ret)
3014 return ret;
3015 ret = lttng_tracepoint_init();
3016 if (ret)
3017 goto error_tp;
3018 event_cache = KMEM_CACHE(lttng_event, 0);
3019 if (!event_cache) {
3020 ret = -ENOMEM;
3021 goto error_kmem;
3022 }
3023 ret = lttng_abi_init();
3024 if (ret)
3025 goto error_abi;
3026 ret = lttng_logger_init();
3027 if (ret)
3028 goto error_logger;
3029 ret = lttng_init_cpu_hotplug();
3030 if (ret)
3031 goto error_hotplug;
3032 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
3033 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3034 __stringify(LTTNG_MODULES_MINOR_VERSION),
3035 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3036 LTTNG_MODULES_EXTRAVERSION,
3037 LTTNG_VERSION_NAME,
3038 #ifdef LTTNG_EXTRA_VERSION_GIT
3039 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3040 #else
3041 "",
3042 #endif
3043 #ifdef LTTNG_EXTRA_VERSION_NAME
3044 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3045 #else
3046 "");
3047 #endif
3048 return 0;
3049
3050 error_hotplug:
3051 lttng_logger_exit();
3052 error_logger:
3053 lttng_abi_exit();
3054 error_abi:
3055 kmem_cache_destroy(event_cache);
3056 error_kmem:
3057 lttng_tracepoint_exit();
3058 error_tp:
3059 lttng_context_exit();
3060 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
3061 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3062 __stringify(LTTNG_MODULES_MINOR_VERSION),
3063 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3064 LTTNG_MODULES_EXTRAVERSION,
3065 LTTNG_VERSION_NAME,
3066 #ifdef LTTNG_EXTRA_VERSION_GIT
3067 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3068 #else
3069 "",
3070 #endif
3071 #ifdef LTTNG_EXTRA_VERSION_NAME
3072 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3073 #else
3074 "");
3075 #endif
3076 return ret;
3077 }
3078
3079 module_init(lttng_events_init);
3080
3081 static void __exit lttng_events_exit(void)
3082 {
3083 struct lttng_session *session, *tmpsession;
3084
3085 lttng_exit_cpu_hotplug();
3086 lttng_logger_exit();
3087 lttng_abi_exit();
3088 list_for_each_entry_safe(session, tmpsession, &sessions, list)
3089 lttng_session_destroy(session);
3090 kmem_cache_destroy(event_cache);
3091 lttng_tracepoint_exit();
3092 lttng_context_exit();
3093 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
3094 __stringify(LTTNG_MODULES_MAJOR_VERSION),
3095 __stringify(LTTNG_MODULES_MINOR_VERSION),
3096 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
3097 LTTNG_MODULES_EXTRAVERSION,
3098 LTTNG_VERSION_NAME,
3099 #ifdef LTTNG_EXTRA_VERSION_GIT
3100 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
3101 #else
3102 "",
3103 #endif
3104 #ifdef LTTNG_EXTRA_VERSION_NAME
3105 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
3106 #else
3107 "");
3108 #endif
3109 }
3110
3111 module_exit(lttng_events_exit);
3112
3113 #include <generated/patches.h>
3114 #ifdef LTTNG_EXTRA_VERSION_GIT
3115 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
3116 #endif
3117 #ifdef LTTNG_EXTRA_VERSION_NAME
3118 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
3119 #endif
3120 MODULE_LICENSE("GPL and additional rights");
3121 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
3122 MODULE_DESCRIPTION("LTTng tracer");
3123 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
3124 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
3125 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
3126 LTTNG_MODULES_EXTRAVERSION);
This page took 0.170278 seconds and 4 git commands to generate.