Fix: integer endianness metadata generation
[lttng-modules.git] / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
25 * overrides a function with a define.
26 */
27 #include "wrapper/page_alloc.h"
28
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/jiffies.h>
34 #include <linux/utsname.h>
35 #include <linux/err.h>
36 #include <linux/seq_file.h>
37 #include <linux/file.h>
38 #include <linux/anon_inodes.h>
39 #include "wrapper/file.h"
40 #include <linux/jhash.h>
41 #include <linux/uaccess.h>
42 #include <linux/vmalloc.h>
43
44 #include "wrapper/uuid.h"
45 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
46 #include "wrapper/random.h"
47 #include "wrapper/tracepoint.h"
48 #include "wrapper/list.h"
49 #include "lttng-kernel-version.h"
50 #include "lttng-events.h"
51 #include "lttng-tracer.h"
52 #include "lttng-abi-old.h"
53 #include "lttng-endian.h"
54 #include "wrapper/vzalloc.h"
55
56 #define METADATA_CACHE_DEFAULT_SIZE 4096
57
58 static LIST_HEAD(sessions);
59 static LIST_HEAD(lttng_transport_list);
60 /*
61 * Protect the sessions and metadata caches.
62 */
63 static DEFINE_MUTEX(sessions_mutex);
64 static struct kmem_cache *event_cache;
65
66 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
67 static void lttng_session_sync_enablers(struct lttng_session *session);
68 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
69
70 static void _lttng_event_destroy(struct lttng_event *event);
71 static void _lttng_channel_destroy(struct lttng_channel *chan);
72 static int _lttng_event_unregister(struct lttng_event *event);
73 static
74 int _lttng_event_metadata_statedump(struct lttng_session *session,
75 struct lttng_channel *chan,
76 struct lttng_event *event);
77 static
78 int _lttng_session_metadata_statedump(struct lttng_session *session);
79 static
80 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
81
82 void synchronize_trace(void)
83 {
84 synchronize_sched();
85 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
86 #ifdef CONFIG_PREEMPT_RT_FULL
87 synchronize_rcu();
88 #endif
89 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
90 #ifdef CONFIG_PREEMPT_RT
91 synchronize_rcu();
92 #endif
93 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
94 }
95
96 void lttng_lock_sessions(void)
97 {
98 mutex_lock(&sessions_mutex);
99 }
100
101 void lttng_unlock_sessions(void)
102 {
103 mutex_unlock(&sessions_mutex);
104 }
105
106 /*
107 * Called with sessions lock held.
108 */
109 int lttng_session_active(void)
110 {
111 struct lttng_session *iter;
112
113 list_for_each_entry(iter, &sessions, list) {
114 if (iter->active)
115 return 1;
116 }
117 return 0;
118 }
119
120 struct lttng_session *lttng_session_create(void)
121 {
122 struct lttng_session *session;
123 struct lttng_metadata_cache *metadata_cache;
124 int i;
125
126 mutex_lock(&sessions_mutex);
127 session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
128 if (!session)
129 goto err;
130 INIT_LIST_HEAD(&session->chan);
131 INIT_LIST_HEAD(&session->events);
132 uuid_le_gen(&session->uuid);
133
134 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
135 GFP_KERNEL);
136 if (!metadata_cache)
137 goto err_free_session;
138 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
139 if (!metadata_cache->data)
140 goto err_free_cache;
141 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
142 kref_init(&metadata_cache->refcount);
143 mutex_init(&metadata_cache->lock);
144 session->metadata_cache = metadata_cache;
145 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
146 memcpy(&metadata_cache->uuid, &session->uuid,
147 sizeof(metadata_cache->uuid));
148 INIT_LIST_HEAD(&session->enablers_head);
149 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
150 INIT_HLIST_HEAD(&session->events_ht.table[i]);
151 list_add(&session->list, &sessions);
152 mutex_unlock(&sessions_mutex);
153 return session;
154
155 err_free_cache:
156 kfree(metadata_cache);
157 err_free_session:
158 kfree(session);
159 err:
160 mutex_unlock(&sessions_mutex);
161 return NULL;
162 }
163
164 void metadata_cache_destroy(struct kref *kref)
165 {
166 struct lttng_metadata_cache *cache =
167 container_of(kref, struct lttng_metadata_cache, refcount);
168 vfree(cache->data);
169 kfree(cache);
170 }
171
172 void lttng_session_destroy(struct lttng_session *session)
173 {
174 struct lttng_channel *chan, *tmpchan;
175 struct lttng_event *event, *tmpevent;
176 struct lttng_metadata_stream *metadata_stream;
177 struct lttng_enabler *enabler, *tmpenabler;
178 int ret;
179
180 mutex_lock(&sessions_mutex);
181 ACCESS_ONCE(session->active) = 0;
182 list_for_each_entry(chan, &session->chan, list) {
183 ret = lttng_syscalls_unregister(chan);
184 WARN_ON(ret);
185 }
186 list_for_each_entry(event, &session->events, list) {
187 ret = _lttng_event_unregister(event);
188 WARN_ON(ret);
189 }
190 synchronize_trace(); /* Wait for in-flight events to complete */
191 list_for_each_entry_safe(enabler, tmpenabler,
192 &session->enablers_head, node)
193 lttng_enabler_destroy(enabler);
194 list_for_each_entry_safe(event, tmpevent, &session->events, list)
195 _lttng_event_destroy(event);
196 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
197 BUG_ON(chan->channel_type == METADATA_CHANNEL);
198 _lttng_channel_destroy(chan);
199 }
200 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
201 _lttng_metadata_channel_hangup(metadata_stream);
202 if (session->pid_tracker)
203 lttng_pid_tracker_destroy(session->pid_tracker);
204 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
205 list_del(&session->list);
206 mutex_unlock(&sessions_mutex);
207 kfree(session);
208 }
209
210 int lttng_session_enable(struct lttng_session *session)
211 {
212 int ret = 0;
213 struct lttng_channel *chan;
214
215 mutex_lock(&sessions_mutex);
216 if (session->active) {
217 ret = -EBUSY;
218 goto end;
219 }
220
221 /* Set transient enabler state to "enabled" */
222 session->tstate = 1;
223
224 /*
225 * Snapshot the number of events per channel to know the type of header
226 * we need to use.
227 */
228 list_for_each_entry(chan, &session->chan, list) {
229 if (chan->header_type)
230 continue; /* don't change it if session stop/restart */
231 if (chan->free_event_id < 31)
232 chan->header_type = 1; /* compact */
233 else
234 chan->header_type = 2; /* large */
235 }
236
237 /* We need to sync enablers with session before activation. */
238 lttng_session_sync_enablers(session);
239
240 ACCESS_ONCE(session->active) = 1;
241 ACCESS_ONCE(session->been_active) = 1;
242 ret = _lttng_session_metadata_statedump(session);
243 if (ret) {
244 ACCESS_ONCE(session->active) = 0;
245 goto end;
246 }
247 ret = lttng_statedump_start(session);
248 if (ret)
249 ACCESS_ONCE(session->active) = 0;
250 end:
251 mutex_unlock(&sessions_mutex);
252 return ret;
253 }
254
255 int lttng_session_disable(struct lttng_session *session)
256 {
257 int ret = 0;
258
259 mutex_lock(&sessions_mutex);
260 if (!session->active) {
261 ret = -EBUSY;
262 goto end;
263 }
264 ACCESS_ONCE(session->active) = 0;
265
266 /* Set transient enabler state to "disabled" */
267 session->tstate = 0;
268 lttng_session_sync_enablers(session);
269 end:
270 mutex_unlock(&sessions_mutex);
271 return ret;
272 }
273
274 int lttng_channel_enable(struct lttng_channel *channel)
275 {
276 int ret = 0;
277
278 mutex_lock(&sessions_mutex);
279 if (channel->channel_type == METADATA_CHANNEL) {
280 ret = -EPERM;
281 goto end;
282 }
283 if (channel->enabled) {
284 ret = -EEXIST;
285 goto end;
286 }
287 /* Set transient enabler state to "enabled" */
288 channel->tstate = 1;
289 lttng_session_sync_enablers(channel->session);
290 /* Set atomically the state to "enabled" */
291 ACCESS_ONCE(channel->enabled) = 1;
292 end:
293 mutex_unlock(&sessions_mutex);
294 return ret;
295 }
296
297 int lttng_channel_disable(struct lttng_channel *channel)
298 {
299 int ret = 0;
300
301 mutex_lock(&sessions_mutex);
302 if (channel->channel_type == METADATA_CHANNEL) {
303 ret = -EPERM;
304 goto end;
305 }
306 if (!channel->enabled) {
307 ret = -EEXIST;
308 goto end;
309 }
310 /* Set atomically the state to "disabled" */
311 ACCESS_ONCE(channel->enabled) = 0;
312 /* Set transient enabler state to "enabled" */
313 channel->tstate = 0;
314 lttng_session_sync_enablers(channel->session);
315 end:
316 mutex_unlock(&sessions_mutex);
317 return ret;
318 }
319
320 int lttng_event_enable(struct lttng_event *event)
321 {
322 int ret = 0;
323
324 mutex_lock(&sessions_mutex);
325 if (event->chan->channel_type == METADATA_CHANNEL) {
326 ret = -EPERM;
327 goto end;
328 }
329 if (event->enabled) {
330 ret = -EEXIST;
331 goto end;
332 }
333 switch (event->instrumentation) {
334 case LTTNG_KERNEL_TRACEPOINT:
335 case LTTNG_KERNEL_SYSCALL:
336 ret = -EINVAL;
337 break;
338 case LTTNG_KERNEL_KPROBE:
339 case LTTNG_KERNEL_FUNCTION:
340 case LTTNG_KERNEL_NOOP:
341 ACCESS_ONCE(event->enabled) = 1;
342 break;
343 case LTTNG_KERNEL_KRETPROBE:
344 ret = lttng_kretprobes_event_enable_state(event, 1);
345 break;
346 default:
347 WARN_ON_ONCE(1);
348 ret = -EINVAL;
349 }
350 end:
351 mutex_unlock(&sessions_mutex);
352 return ret;
353 }
354
355 int lttng_event_disable(struct lttng_event *event)
356 {
357 int ret = 0;
358
359 mutex_lock(&sessions_mutex);
360 if (event->chan->channel_type == METADATA_CHANNEL) {
361 ret = -EPERM;
362 goto end;
363 }
364 if (!event->enabled) {
365 ret = -EEXIST;
366 goto end;
367 }
368 switch (event->instrumentation) {
369 case LTTNG_KERNEL_TRACEPOINT:
370 case LTTNG_KERNEL_SYSCALL:
371 ret = -EINVAL;
372 break;
373 case LTTNG_KERNEL_KPROBE:
374 case LTTNG_KERNEL_FUNCTION:
375 case LTTNG_KERNEL_NOOP:
376 ACCESS_ONCE(event->enabled) = 0;
377 break;
378 case LTTNG_KERNEL_KRETPROBE:
379 ret = lttng_kretprobes_event_enable_state(event, 0);
380 break;
381 default:
382 WARN_ON_ONCE(1);
383 ret = -EINVAL;
384 }
385 end:
386 mutex_unlock(&sessions_mutex);
387 return ret;
388 }
389
390 static struct lttng_transport *lttng_transport_find(const char *name)
391 {
392 struct lttng_transport *transport;
393
394 list_for_each_entry(transport, &lttng_transport_list, node) {
395 if (!strcmp(transport->name, name))
396 return transport;
397 }
398 return NULL;
399 }
400
401 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
402 const char *transport_name,
403 void *buf_addr,
404 size_t subbuf_size, size_t num_subbuf,
405 unsigned int switch_timer_interval,
406 unsigned int read_timer_interval,
407 enum channel_type channel_type)
408 {
409 struct lttng_channel *chan;
410 struct lttng_transport *transport = NULL;
411
412 mutex_lock(&sessions_mutex);
413 if (session->been_active && channel_type != METADATA_CHANNEL)
414 goto active; /* Refuse to add channel to active session */
415 transport = lttng_transport_find(transport_name);
416 if (!transport) {
417 printk(KERN_WARNING "LTTng transport %s not found\n",
418 transport_name);
419 goto notransport;
420 }
421 if (!try_module_get(transport->owner)) {
422 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
423 goto notransport;
424 }
425 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
426 if (!chan)
427 goto nomem;
428 chan->session = session;
429 chan->id = session->free_chan_id++;
430 chan->ops = &transport->ops;
431 /*
432 * Note: the channel creation op already writes into the packet
433 * headers. Therefore the "chan" information used as input
434 * should be already accessible.
435 */
436 chan->chan = transport->ops.channel_create(transport_name,
437 chan, buf_addr, subbuf_size, num_subbuf,
438 switch_timer_interval, read_timer_interval);
439 if (!chan->chan)
440 goto create_error;
441 chan->tstate = 1;
442 chan->enabled = 1;
443 chan->transport = transport;
444 chan->channel_type = channel_type;
445 list_add(&chan->list, &session->chan);
446 mutex_unlock(&sessions_mutex);
447 return chan;
448
449 create_error:
450 kfree(chan);
451 nomem:
452 if (transport)
453 module_put(transport->owner);
454 notransport:
455 active:
456 mutex_unlock(&sessions_mutex);
457 return NULL;
458 }
459
460 /*
461 * Only used internally at session destruction for per-cpu channels, and
462 * when metadata channel is released.
463 * Needs to be called with sessions mutex held.
464 */
465 static
466 void _lttng_channel_destroy(struct lttng_channel *chan)
467 {
468 chan->ops->channel_destroy(chan->chan);
469 module_put(chan->transport->owner);
470 list_del(&chan->list);
471 lttng_destroy_context(chan->ctx);
472 kfree(chan);
473 }
474
475 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
476 {
477 BUG_ON(chan->channel_type != METADATA_CHANNEL);
478
479 /* Protect the metadata cache with the sessions_mutex. */
480 mutex_lock(&sessions_mutex);
481 _lttng_channel_destroy(chan);
482 mutex_unlock(&sessions_mutex);
483 }
484 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
485
486 static
487 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
488 {
489 stream->finalized = 1;
490 wake_up_interruptible(&stream->read_wait);
491 }
492
493 /*
494 * Supports event creation while tracing session is active.
495 * Needs to be called with sessions mutex held.
496 */
497 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
498 struct lttng_kernel_event *event_param,
499 void *filter,
500 const struct lttng_event_desc *event_desc,
501 enum lttng_kernel_instrumentation itype)
502 {
503 struct lttng_session *session = chan->session;
504 struct lttng_event *event;
505 const char *event_name;
506 struct hlist_head *head;
507 size_t name_len;
508 uint32_t hash;
509 int ret;
510
511 if (chan->free_event_id == -1U) {
512 ret = -EMFILE;
513 goto full;
514 }
515
516 switch (itype) {
517 case LTTNG_KERNEL_TRACEPOINT:
518 event_name = event_desc->name;
519 break;
520 case LTTNG_KERNEL_KPROBE:
521 case LTTNG_KERNEL_KRETPROBE:
522 case LTTNG_KERNEL_FUNCTION:
523 case LTTNG_KERNEL_NOOP:
524 case LTTNG_KERNEL_SYSCALL:
525 event_name = event_param->name;
526 break;
527 default:
528 WARN_ON_ONCE(1);
529 ret = -EINVAL;
530 goto type_error;
531 }
532 name_len = strlen(event_name);
533 hash = jhash(event_name, name_len, 0);
534 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
535 lttng_hlist_for_each_entry(event, head, hlist) {
536 WARN_ON_ONCE(!event->desc);
537 if (!strncmp(event->desc->name, event_name,
538 LTTNG_KERNEL_SYM_NAME_LEN - 1)
539 && chan == event->chan) {
540 ret = -EEXIST;
541 goto exist;
542 }
543 }
544
545 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
546 if (!event) {
547 ret = -ENOMEM;
548 goto cache_error;
549 }
550 event->chan = chan;
551 event->filter = filter;
552 event->id = chan->free_event_id++;
553 event->instrumentation = itype;
554 event->evtype = LTTNG_TYPE_EVENT;
555 INIT_LIST_HEAD(&event->bytecode_runtime_head);
556 INIT_LIST_HEAD(&event->enablers_ref_head);
557
558 switch (itype) {
559 case LTTNG_KERNEL_TRACEPOINT:
560 /* Event will be enabled by enabler sync. */
561 event->enabled = 0;
562 event->registered = 0;
563 event->desc = lttng_event_get(event_name);
564 if (!event->desc) {
565 ret = -ENOENT;
566 goto register_error;
567 }
568 /* Populate lttng_event structure before event registration. */
569 smp_wmb();
570 break;
571 case LTTNG_KERNEL_KPROBE:
572 /*
573 * Needs to be explicitly enabled after creation, since
574 * we may want to apply filters.
575 */
576 event->enabled = 0;
577 event->registered = 1;
578 /*
579 * Populate lttng_event structure before event
580 * registration.
581 */
582 smp_wmb();
583 ret = lttng_kprobes_register(event_name,
584 event_param->u.kprobe.symbol_name,
585 event_param->u.kprobe.offset,
586 event_param->u.kprobe.addr,
587 event);
588 if (ret) {
589 ret = -EINVAL;
590 goto register_error;
591 }
592 ret = try_module_get(event->desc->owner);
593 WARN_ON_ONCE(!ret);
594 break;
595 case LTTNG_KERNEL_KRETPROBE:
596 {
597 struct lttng_event *event_return;
598
599 /* kretprobe defines 2 events */
600 /*
601 * Needs to be explicitly enabled after creation, since
602 * we may want to apply filters.
603 */
604 event->enabled = 0;
605 event->registered = 1;
606 event_return =
607 kmem_cache_zalloc(event_cache, GFP_KERNEL);
608 if (!event_return) {
609 ret = -ENOMEM;
610 goto register_error;
611 }
612 event_return->chan = chan;
613 event_return->filter = filter;
614 event_return->id = chan->free_event_id++;
615 event_return->enabled = 0;
616 event_return->registered = 1;
617 event_return->instrumentation = itype;
618 /*
619 * Populate lttng_event structure before kretprobe registration.
620 */
621 smp_wmb();
622 ret = lttng_kretprobes_register(event_name,
623 event_param->u.kretprobe.symbol_name,
624 event_param->u.kretprobe.offset,
625 event_param->u.kretprobe.addr,
626 event, event_return);
627 if (ret) {
628 kmem_cache_free(event_cache, event_return);
629 ret = -EINVAL;
630 goto register_error;
631 }
632 /* Take 2 refs on the module: one per event. */
633 ret = try_module_get(event->desc->owner);
634 WARN_ON_ONCE(!ret);
635 ret = try_module_get(event->desc->owner);
636 WARN_ON_ONCE(!ret);
637 ret = _lttng_event_metadata_statedump(chan->session, chan,
638 event_return);
639 WARN_ON_ONCE(ret > 0);
640 if (ret) {
641 kmem_cache_free(event_cache, event_return);
642 module_put(event->desc->owner);
643 module_put(event->desc->owner);
644 goto statedump_error;
645 }
646 list_add(&event_return->list, &chan->session->events);
647 break;
648 }
649 case LTTNG_KERNEL_FUNCTION:
650 /*
651 * Needs to be explicitly enabled after creation, since
652 * we may want to apply filters.
653 */
654 event->enabled = 0;
655 event->registered = 1;
656 /*
657 * Populate lttng_event structure before event
658 * registration.
659 */
660 smp_wmb();
661 ret = lttng_ftrace_register(event_name,
662 event_param->u.ftrace.symbol_name,
663 event);
664 if (ret) {
665 goto register_error;
666 }
667 ret = try_module_get(event->desc->owner);
668 WARN_ON_ONCE(!ret);
669 break;
670 case LTTNG_KERNEL_NOOP:
671 case LTTNG_KERNEL_SYSCALL:
672 /*
673 * Needs to be explicitly enabled after creation, since
674 * we may want to apply filters.
675 */
676 event->enabled = 0;
677 event->registered = 0;
678 event->desc = event_desc;
679 if (!event->desc) {
680 ret = -EINVAL;
681 goto register_error;
682 }
683 break;
684 default:
685 WARN_ON_ONCE(1);
686 ret = -EINVAL;
687 goto register_error;
688 }
689 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
690 WARN_ON_ONCE(ret > 0);
691 if (ret) {
692 goto statedump_error;
693 }
694 hlist_add_head(&event->hlist, head);
695 list_add(&event->list, &chan->session->events);
696 return event;
697
698 statedump_error:
699 /* If a statedump error occurs, events will not be readable. */
700 register_error:
701 kmem_cache_free(event_cache, event);
702 cache_error:
703 exist:
704 type_error:
705 full:
706 return ERR_PTR(ret);
707 }
708
709 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
710 struct lttng_kernel_event *event_param,
711 void *filter,
712 const struct lttng_event_desc *event_desc,
713 enum lttng_kernel_instrumentation itype)
714 {
715 struct lttng_event *event;
716
717 mutex_lock(&sessions_mutex);
718 event = _lttng_event_create(chan, event_param, filter, event_desc,
719 itype);
720 mutex_unlock(&sessions_mutex);
721 return event;
722 }
723
724 /* Only used for tracepoints for now. */
725 static
726 void register_event(struct lttng_event *event)
727 {
728 const struct lttng_event_desc *desc;
729 int ret = -EINVAL;
730
731 if (event->registered)
732 return;
733
734 desc = event->desc;
735 switch (event->instrumentation) {
736 case LTTNG_KERNEL_TRACEPOINT:
737 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
738 desc->probe_callback,
739 event);
740 break;
741 case LTTNG_KERNEL_SYSCALL:
742 ret = lttng_syscall_filter_enable(event->chan,
743 desc->name);
744 break;
745 case LTTNG_KERNEL_KPROBE:
746 case LTTNG_KERNEL_KRETPROBE:
747 case LTTNG_KERNEL_FUNCTION:
748 case LTTNG_KERNEL_NOOP:
749 ret = 0;
750 break;
751 default:
752 WARN_ON_ONCE(1);
753 }
754 if (!ret)
755 event->registered = 1;
756 }
757
758 /*
759 * Only used internally at session destruction.
760 */
761 int _lttng_event_unregister(struct lttng_event *event)
762 {
763 const struct lttng_event_desc *desc;
764 int ret = -EINVAL;
765
766 if (!event->registered)
767 return 0;
768
769 desc = event->desc;
770 switch (event->instrumentation) {
771 case LTTNG_KERNEL_TRACEPOINT:
772 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
773 event->desc->probe_callback,
774 event);
775 break;
776 case LTTNG_KERNEL_KPROBE:
777 lttng_kprobes_unregister(event);
778 ret = 0;
779 break;
780 case LTTNG_KERNEL_KRETPROBE:
781 lttng_kretprobes_unregister(event);
782 ret = 0;
783 break;
784 case LTTNG_KERNEL_FUNCTION:
785 lttng_ftrace_unregister(event);
786 ret = 0;
787 break;
788 case LTTNG_KERNEL_SYSCALL:
789 ret = lttng_syscall_filter_disable(event->chan,
790 desc->name);
791 break;
792 case LTTNG_KERNEL_NOOP:
793 ret = 0;
794 break;
795 default:
796 WARN_ON_ONCE(1);
797 }
798 if (!ret)
799 event->registered = 0;
800 return ret;
801 }
802
803 /*
804 * Only used internally at session destruction.
805 */
806 static
807 void _lttng_event_destroy(struct lttng_event *event)
808 {
809 switch (event->instrumentation) {
810 case LTTNG_KERNEL_TRACEPOINT:
811 lttng_event_put(event->desc);
812 break;
813 case LTTNG_KERNEL_KPROBE:
814 module_put(event->desc->owner);
815 lttng_kprobes_destroy_private(event);
816 break;
817 case LTTNG_KERNEL_KRETPROBE:
818 module_put(event->desc->owner);
819 lttng_kretprobes_destroy_private(event);
820 break;
821 case LTTNG_KERNEL_FUNCTION:
822 module_put(event->desc->owner);
823 lttng_ftrace_destroy_private(event);
824 break;
825 case LTTNG_KERNEL_NOOP:
826 case LTTNG_KERNEL_SYSCALL:
827 break;
828 default:
829 WARN_ON_ONCE(1);
830 }
831 list_del(&event->list);
832 lttng_destroy_context(event->ctx);
833 kmem_cache_free(event_cache, event);
834 }
835
836 int lttng_session_track_pid(struct lttng_session *session, int pid)
837 {
838 int ret;
839
840 if (pid < -1)
841 return -EINVAL;
842 mutex_lock(&sessions_mutex);
843 if (pid == -1) {
844 /* track all pids: destroy tracker. */
845 if (session->pid_tracker) {
846 struct lttng_pid_tracker *lpf;
847
848 lpf = session->pid_tracker;
849 rcu_assign_pointer(session->pid_tracker, NULL);
850 synchronize_trace();
851 lttng_pid_tracker_destroy(lpf);
852 }
853 ret = 0;
854 } else {
855 if (!session->pid_tracker) {
856 struct lttng_pid_tracker *lpf;
857
858 lpf = lttng_pid_tracker_create();
859 if (!lpf) {
860 ret = -ENOMEM;
861 goto unlock;
862 }
863 ret = lttng_pid_tracker_add(lpf, pid);
864 rcu_assign_pointer(session->pid_tracker, lpf);
865 } else {
866 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
867 }
868 }
869 unlock:
870 mutex_unlock(&sessions_mutex);
871 return ret;
872 }
873
874 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
875 {
876 int ret;
877
878 if (pid < -1)
879 return -EINVAL;
880 mutex_lock(&sessions_mutex);
881 if (pid == -1) {
882 /* untrack all pids: replace by empty tracker. */
883 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
884 struct lttng_pid_tracker *lpf;
885
886 lpf = lttng_pid_tracker_create();
887 if (!lpf) {
888 ret = -ENOMEM;
889 goto unlock;
890 }
891 rcu_assign_pointer(session->pid_tracker, lpf);
892 synchronize_trace();
893 if (old_lpf)
894 lttng_pid_tracker_destroy(old_lpf);
895 ret = 0;
896 } else {
897 if (!session->pid_tracker) {
898 ret = -ENOENT;
899 goto unlock;
900 }
901 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
902 }
903 unlock:
904 mutex_unlock(&sessions_mutex);
905 return ret;
906 }
907
908 static
909 void *pid_list_start(struct seq_file *m, loff_t *pos)
910 {
911 struct lttng_session *session = m->private;
912 struct lttng_pid_tracker *lpf;
913 struct lttng_pid_hash_node *e;
914 int iter = 0, i;
915
916 mutex_lock(&sessions_mutex);
917 lpf = session->pid_tracker;
918 if (lpf) {
919 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
920 struct hlist_head *head = &lpf->pid_hash[i];
921
922 lttng_hlist_for_each_entry(e, head, hlist) {
923 if (iter++ >= *pos)
924 return e;
925 }
926 }
927 } else {
928 /* PID tracker disabled. */
929 if (iter >= *pos && iter == 0) {
930 return session; /* empty tracker */
931 }
932 iter++;
933 }
934 /* End of list */
935 return NULL;
936 }
937
938 /* Called with sessions_mutex held. */
939 static
940 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
941 {
942 struct lttng_session *session = m->private;
943 struct lttng_pid_tracker *lpf;
944 struct lttng_pid_hash_node *e;
945 int iter = 0, i;
946
947 (*ppos)++;
948 lpf = session->pid_tracker;
949 if (lpf) {
950 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
951 struct hlist_head *head = &lpf->pid_hash[i];
952
953 lttng_hlist_for_each_entry(e, head, hlist) {
954 if (iter++ >= *ppos)
955 return e;
956 }
957 }
958 } else {
959 /* PID tracker disabled. */
960 if (iter >= *ppos && iter == 0)
961 return session; /* empty tracker */
962 iter++;
963 }
964
965 /* End of list */
966 return NULL;
967 }
968
969 static
970 void pid_list_stop(struct seq_file *m, void *p)
971 {
972 mutex_unlock(&sessions_mutex);
973 }
974
975 static
976 int pid_list_show(struct seq_file *m, void *p)
977 {
978 int pid;
979
980 if (p == m->private) {
981 /* Tracker disabled. */
982 pid = -1;
983 } else {
984 const struct lttng_pid_hash_node *e = p;
985
986 pid = lttng_pid_tracker_get_node_pid(e);
987 }
988 seq_printf(m, "process { pid = %d; };\n", pid);
989 return 0;
990 }
991
992 static
993 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
994 .start = pid_list_start,
995 .next = pid_list_next,
996 .stop = pid_list_stop,
997 .show = pid_list_show,
998 };
999
1000 static
1001 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
1002 {
1003 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
1004 }
1005
1006 static
1007 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
1008 {
1009 struct seq_file *m = file->private_data;
1010 struct lttng_session *session = m->private;
1011 int ret;
1012
1013 WARN_ON_ONCE(!session);
1014 ret = seq_release(inode, file);
1015 if (!ret && session)
1016 fput(session->file);
1017 return ret;
1018 }
1019
1020 const struct file_operations lttng_tracker_pids_list_fops = {
1021 .owner = THIS_MODULE,
1022 .open = lttng_tracker_pids_list_open,
1023 .read = seq_read,
1024 .llseek = seq_lseek,
1025 .release = lttng_tracker_pids_list_release,
1026 };
1027
1028 int lttng_session_list_tracker_pids(struct lttng_session *session)
1029 {
1030 struct file *tracker_pids_list_file;
1031 struct seq_file *m;
1032 int file_fd, ret;
1033
1034 file_fd = lttng_get_unused_fd();
1035 if (file_fd < 0) {
1036 ret = file_fd;
1037 goto fd_error;
1038 }
1039
1040 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
1041 &lttng_tracker_pids_list_fops,
1042 NULL, O_RDWR);
1043 if (IS_ERR(tracker_pids_list_file)) {
1044 ret = PTR_ERR(tracker_pids_list_file);
1045 goto file_error;
1046 }
1047 if (atomic_long_add_unless(&session->file->f_count,
1048 1, INT_MAX) == INT_MAX) {
1049 goto refcount_error;
1050 }
1051 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1052 if (ret < 0)
1053 goto open_error;
1054 m = tracker_pids_list_file->private_data;
1055 m->private = session;
1056 fd_install(file_fd, tracker_pids_list_file);
1057
1058 return file_fd;
1059
1060 open_error:
1061 atomic_long_dec(&session->file->f_count);
1062 refcount_error:
1063 fput(tracker_pids_list_file);
1064 file_error:
1065 put_unused_fd(file_fd);
1066 fd_error:
1067 return ret;
1068 }
1069
1070 /*
1071 * Enabler management.
1072 */
1073 static
1074 int lttng_match_enabler_wildcard(const char *desc_name,
1075 const char *name)
1076 {
1077 /* Compare excluding final '*' */
1078 if (strncmp(desc_name, name, strlen(name) - 1))
1079 return 0;
1080 return 1;
1081 }
1082
1083 static
1084 int lttng_match_enabler_name(const char *desc_name,
1085 const char *name)
1086 {
1087 if (strcmp(desc_name, name))
1088 return 0;
1089 return 1;
1090 }
1091
1092 static
1093 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1094 struct lttng_enabler *enabler)
1095 {
1096 const char *desc_name, *enabler_name;
1097
1098 enabler_name = enabler->event_param.name;
1099 switch (enabler->event_param.instrumentation) {
1100 case LTTNG_KERNEL_TRACEPOINT:
1101 desc_name = desc->name;
1102 break;
1103 case LTTNG_KERNEL_SYSCALL:
1104 desc_name = desc->name;
1105 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1106 desc_name += strlen("compat_");
1107 if (!strncmp(desc_name, "syscall_exit_",
1108 strlen("syscall_exit_"))) {
1109 desc_name += strlen("syscall_exit_");
1110 } else if (!strncmp(desc_name, "syscall_entry_",
1111 strlen("syscall_entry_"))) {
1112 desc_name += strlen("syscall_entry_");
1113 } else {
1114 WARN_ON_ONCE(1);
1115 return -EINVAL;
1116 }
1117 break;
1118 default:
1119 WARN_ON_ONCE(1);
1120 return -EINVAL;
1121 }
1122 switch (enabler->type) {
1123 case LTTNG_ENABLER_WILDCARD:
1124 return lttng_match_enabler_wildcard(desc_name, enabler_name);
1125 case LTTNG_ENABLER_NAME:
1126 return lttng_match_enabler_name(desc_name, enabler_name);
1127 default:
1128 return -EINVAL;
1129 }
1130 }
1131
1132 static
1133 int lttng_event_match_enabler(struct lttng_event *event,
1134 struct lttng_enabler *enabler)
1135 {
1136 if (enabler->event_param.instrumentation != event->instrumentation)
1137 return 0;
1138 if (lttng_desc_match_enabler(event->desc, enabler)
1139 && event->chan == enabler->chan)
1140 return 1;
1141 else
1142 return 0;
1143 }
1144
1145 static
1146 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1147 struct lttng_enabler *enabler)
1148 {
1149 struct lttng_enabler_ref *enabler_ref;
1150
1151 list_for_each_entry(enabler_ref,
1152 &event->enablers_ref_head, node) {
1153 if (enabler_ref->ref == enabler)
1154 return enabler_ref;
1155 }
1156 return NULL;
1157 }
1158
1159 static
1160 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1161 {
1162 struct lttng_session *session = enabler->chan->session;
1163 struct lttng_probe_desc *probe_desc;
1164 const struct lttng_event_desc *desc;
1165 int i;
1166 struct list_head *probe_list;
1167
1168 probe_list = lttng_get_probe_list_head();
1169 /*
1170 * For each probe event, if we find that a probe event matches
1171 * our enabler, create an associated lttng_event if not
1172 * already present.
1173 */
1174 list_for_each_entry(probe_desc, probe_list, head) {
1175 for (i = 0; i < probe_desc->nr_events; i++) {
1176 int found = 0;
1177 struct hlist_head *head;
1178 const char *event_name;
1179 size_t name_len;
1180 uint32_t hash;
1181 struct lttng_event *event;
1182
1183 desc = probe_desc->event_desc[i];
1184 if (!lttng_desc_match_enabler(desc, enabler))
1185 continue;
1186 event_name = desc->name;
1187 name_len = strlen(event_name);
1188
1189 /*
1190 * Check if already created.
1191 */
1192 hash = jhash(event_name, name_len, 0);
1193 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1194 lttng_hlist_for_each_entry(event, head, hlist) {
1195 if (event->desc == desc
1196 && event->chan == enabler->chan)
1197 found = 1;
1198 }
1199 if (found)
1200 continue;
1201
1202 /*
1203 * We need to create an event for this
1204 * event probe.
1205 */
1206 event = _lttng_event_create(enabler->chan,
1207 NULL, NULL, desc,
1208 LTTNG_KERNEL_TRACEPOINT);
1209 if (!event) {
1210 printk(KERN_INFO "Unable to create event %s\n",
1211 probe_desc->event_desc[i]->name);
1212 }
1213 }
1214 }
1215 }
1216
1217 static
1218 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1219 {
1220 int ret;
1221
1222 ret = lttng_syscalls_register(enabler->chan, NULL);
1223 WARN_ON_ONCE(ret);
1224 }
1225
1226 /*
1227 * Create struct lttng_event if it is missing and present in the list of
1228 * tracepoint probes.
1229 * Should be called with sessions mutex held.
1230 */
1231 static
1232 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1233 {
1234 switch (enabler->event_param.instrumentation) {
1235 case LTTNG_KERNEL_TRACEPOINT:
1236 lttng_create_tracepoint_if_missing(enabler);
1237 break;
1238 case LTTNG_KERNEL_SYSCALL:
1239 lttng_create_syscall_if_missing(enabler);
1240 break;
1241 default:
1242 WARN_ON_ONCE(1);
1243 break;
1244 }
1245 }
1246
1247 /*
1248 * Create events associated with an enabler (if not already present),
1249 * and add backward reference from the event to the enabler.
1250 * Should be called with sessions mutex held.
1251 */
1252 static
1253 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1254 {
1255 struct lttng_session *session = enabler->chan->session;
1256 struct lttng_event *event;
1257
1258 /* First ensure that probe events are created for this enabler. */
1259 lttng_create_event_if_missing(enabler);
1260
1261 /* For each event matching enabler in session event list. */
1262 list_for_each_entry(event, &session->events, list) {
1263 struct lttng_enabler_ref *enabler_ref;
1264
1265 if (!lttng_event_match_enabler(event, enabler))
1266 continue;
1267 enabler_ref = lttng_event_enabler_ref(event, enabler);
1268 if (!enabler_ref) {
1269 /*
1270 * If no backward ref, create it.
1271 * Add backward ref from event to enabler.
1272 */
1273 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1274 if (!enabler_ref)
1275 return -ENOMEM;
1276 enabler_ref->ref = enabler;
1277 list_add(&enabler_ref->node,
1278 &event->enablers_ref_head);
1279 }
1280
1281 /*
1282 * Link filter bytecodes if not linked yet.
1283 */
1284 lttng_enabler_event_link_bytecode(event, enabler);
1285
1286 /* TODO: merge event context. */
1287 }
1288 return 0;
1289 }
1290
1291 /*
1292 * Called at module load: connect the probe on all enablers matching
1293 * this event.
1294 * Called with sessions lock held.
1295 */
1296 int lttng_fix_pending_events(void)
1297 {
1298 struct lttng_session *session;
1299
1300 list_for_each_entry(session, &sessions, list)
1301 lttng_session_lazy_sync_enablers(session);
1302 return 0;
1303 }
1304
1305 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1306 struct lttng_kernel_event *event_param,
1307 struct lttng_channel *chan)
1308 {
1309 struct lttng_enabler *enabler;
1310
1311 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1312 if (!enabler)
1313 return NULL;
1314 enabler->type = type;
1315 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1316 memcpy(&enabler->event_param, event_param,
1317 sizeof(enabler->event_param));
1318 enabler->chan = chan;
1319 /* ctx left NULL */
1320 enabler->enabled = 0;
1321 enabler->evtype = LTTNG_TYPE_ENABLER;
1322 mutex_lock(&sessions_mutex);
1323 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1324 lttng_session_lazy_sync_enablers(enabler->chan->session);
1325 mutex_unlock(&sessions_mutex);
1326 return enabler;
1327 }
1328
1329 int lttng_enabler_enable(struct lttng_enabler *enabler)
1330 {
1331 mutex_lock(&sessions_mutex);
1332 enabler->enabled = 1;
1333 lttng_session_lazy_sync_enablers(enabler->chan->session);
1334 mutex_unlock(&sessions_mutex);
1335 return 0;
1336 }
1337
1338 int lttng_enabler_disable(struct lttng_enabler *enabler)
1339 {
1340 mutex_lock(&sessions_mutex);
1341 enabler->enabled = 0;
1342 lttng_session_lazy_sync_enablers(enabler->chan->session);
1343 mutex_unlock(&sessions_mutex);
1344 return 0;
1345 }
1346
1347 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1348 struct lttng_kernel_filter_bytecode __user *bytecode)
1349 {
1350 struct lttng_filter_bytecode_node *bytecode_node;
1351 uint32_t bytecode_len;
1352 int ret;
1353
1354 ret = get_user(bytecode_len, &bytecode->len);
1355 if (ret)
1356 return ret;
1357 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1358 GFP_KERNEL);
1359 if (!bytecode_node)
1360 return -ENOMEM;
1361 ret = copy_from_user(&bytecode_node->bc, bytecode,
1362 sizeof(*bytecode) + bytecode_len);
1363 if (ret)
1364 goto error_free;
1365 bytecode_node->enabler = enabler;
1366 /* Enforce length based on allocated size */
1367 bytecode_node->bc.len = bytecode_len;
1368 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1369 lttng_session_lazy_sync_enablers(enabler->chan->session);
1370 return 0;
1371
1372 error_free:
1373 kfree(bytecode_node);
1374 return ret;
1375 }
1376
1377 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1378 struct lttng_kernel_context *context_param)
1379 {
1380 return -ENOSYS;
1381 }
1382
1383 static
1384 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1385 {
1386 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1387
1388 /* Destroy filter bytecode */
1389 list_for_each_entry_safe(filter_node, tmp_filter_node,
1390 &enabler->filter_bytecode_head, node) {
1391 kfree(filter_node);
1392 }
1393
1394 /* Destroy contexts */
1395 lttng_destroy_context(enabler->ctx);
1396
1397 list_del(&enabler->node);
1398 kfree(enabler);
1399 }
1400
1401 /*
1402 * lttng_session_sync_enablers should be called just before starting a
1403 * session.
1404 * Should be called with sessions mutex held.
1405 */
1406 static
1407 void lttng_session_sync_enablers(struct lttng_session *session)
1408 {
1409 struct lttng_enabler *enabler;
1410 struct lttng_event *event;
1411
1412 list_for_each_entry(enabler, &session->enablers_head, node)
1413 lttng_enabler_ref_events(enabler);
1414 /*
1415 * For each event, if at least one of its enablers is enabled,
1416 * and its channel and session transient states are enabled, we
1417 * enable the event, else we disable it.
1418 */
1419 list_for_each_entry(event, &session->events, list) {
1420 struct lttng_enabler_ref *enabler_ref;
1421 struct lttng_bytecode_runtime *runtime;
1422 int enabled = 0, has_enablers_without_bytecode = 0;
1423
1424 switch (event->instrumentation) {
1425 case LTTNG_KERNEL_TRACEPOINT:
1426 case LTTNG_KERNEL_SYSCALL:
1427 /* Enable events */
1428 list_for_each_entry(enabler_ref,
1429 &event->enablers_ref_head, node) {
1430 if (enabler_ref->ref->enabled) {
1431 enabled = 1;
1432 break;
1433 }
1434 }
1435 break;
1436 default:
1437 /* Not handled with lazy sync. */
1438 continue;
1439 }
1440 /*
1441 * Enabled state is based on union of enablers, with
1442 * intesection of session and channel transient enable
1443 * states.
1444 */
1445 enabled = enabled && session->tstate && event->chan->tstate;
1446
1447 ACCESS_ONCE(event->enabled) = enabled;
1448 /*
1449 * Sync tracepoint registration with event enabled
1450 * state.
1451 */
1452 if (enabled) {
1453 register_event(event);
1454 } else {
1455 _lttng_event_unregister(event);
1456 }
1457
1458 /* Check if has enablers without bytecode enabled */
1459 list_for_each_entry(enabler_ref,
1460 &event->enablers_ref_head, node) {
1461 if (enabler_ref->ref->enabled
1462 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1463 has_enablers_without_bytecode = 1;
1464 break;
1465 }
1466 }
1467 event->has_enablers_without_bytecode =
1468 has_enablers_without_bytecode;
1469
1470 /* Enable filters */
1471 list_for_each_entry(runtime,
1472 &event->bytecode_runtime_head, node)
1473 lttng_filter_sync_state(runtime);
1474 }
1475 }
1476
1477 /*
1478 * Apply enablers to session events, adding events to session if need
1479 * be. It is required after each modification applied to an active
1480 * session, and right before session "start".
1481 * "lazy" sync means we only sync if required.
1482 * Should be called with sessions mutex held.
1483 */
1484 static
1485 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1486 {
1487 /* We can skip if session is not active */
1488 if (!session->active)
1489 return;
1490 lttng_session_sync_enablers(session);
1491 }
1492
1493 /*
1494 * Serialize at most one packet worth of metadata into a metadata
1495 * channel.
1496 * We grab the metadata cache mutex to get exclusive access to our metadata
1497 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1498 * allows us to do racy operations such as looking for remaining space left in
1499 * packet and write, since mutual exclusion protects us from concurrent writes.
1500 * Mutual exclusion on the metadata cache allow us to read the cache content
1501 * without racing against reallocation of the cache by updates.
1502 * Returns the number of bytes written in the channel, 0 if no data
1503 * was written and a negative value on error.
1504 */
1505 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1506 struct channel *chan)
1507 {
1508 struct lib_ring_buffer_ctx ctx;
1509 int ret = 0;
1510 size_t len, reserve_len;
1511
1512 /*
1513 * Ensure we support mutiple get_next / put sequences followed by
1514 * put_next. The metadata cache lock protects reading the metadata
1515 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1516 * "flush" operations on the buffer invoked by different processes.
1517 * Moreover, since the metadata cache memory can be reallocated, we
1518 * need to have exclusive access against updates even though we only
1519 * read it.
1520 */
1521 mutex_lock(&stream->metadata_cache->lock);
1522 WARN_ON(stream->metadata_in < stream->metadata_out);
1523 if (stream->metadata_in != stream->metadata_out)
1524 goto end;
1525
1526 len = stream->metadata_cache->metadata_written -
1527 stream->metadata_in;
1528 if (!len)
1529 goto end;
1530 reserve_len = min_t(size_t,
1531 stream->transport->ops.packet_avail_size(chan),
1532 len);
1533 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1534 sizeof(char), -1);
1535 /*
1536 * If reservation failed, return an error to the caller.
1537 */
1538 ret = stream->transport->ops.event_reserve(&ctx, 0);
1539 if (ret != 0) {
1540 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1541 goto end;
1542 }
1543 stream->transport->ops.event_write(&ctx,
1544 stream->metadata_cache->data + stream->metadata_in,
1545 reserve_len);
1546 stream->transport->ops.event_commit(&ctx);
1547 stream->metadata_in += reserve_len;
1548 ret = reserve_len;
1549
1550 end:
1551 mutex_unlock(&stream->metadata_cache->lock);
1552 return ret;
1553 }
1554
1555 /*
1556 * Write the metadata to the metadata cache.
1557 * Must be called with sessions_mutex held.
1558 * The metadata cache lock protects us from concurrent read access from
1559 * thread outputting metadata content to ring buffer.
1560 */
1561 int lttng_metadata_printf(struct lttng_session *session,
1562 const char *fmt, ...)
1563 {
1564 char *str;
1565 size_t len;
1566 va_list ap;
1567 struct lttng_metadata_stream *stream;
1568
1569 WARN_ON_ONCE(!ACCESS_ONCE(session->active));
1570
1571 va_start(ap, fmt);
1572 str = kvasprintf(GFP_KERNEL, fmt, ap);
1573 va_end(ap);
1574 if (!str)
1575 return -ENOMEM;
1576
1577 len = strlen(str);
1578 mutex_lock(&session->metadata_cache->lock);
1579 if (session->metadata_cache->metadata_written + len >
1580 session->metadata_cache->cache_alloc) {
1581 char *tmp_cache_realloc;
1582 unsigned int tmp_cache_alloc_size;
1583
1584 tmp_cache_alloc_size = max_t(unsigned int,
1585 session->metadata_cache->cache_alloc + len,
1586 session->metadata_cache->cache_alloc << 1);
1587 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1588 if (!tmp_cache_realloc)
1589 goto err;
1590 if (session->metadata_cache->data) {
1591 memcpy(tmp_cache_realloc,
1592 session->metadata_cache->data,
1593 session->metadata_cache->cache_alloc);
1594 vfree(session->metadata_cache->data);
1595 }
1596
1597 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1598 session->metadata_cache->data = tmp_cache_realloc;
1599 }
1600 memcpy(session->metadata_cache->data +
1601 session->metadata_cache->metadata_written,
1602 str, len);
1603 session->metadata_cache->metadata_written += len;
1604 mutex_unlock(&session->metadata_cache->lock);
1605 kfree(str);
1606
1607 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1608 wake_up_interruptible(&stream->read_wait);
1609
1610 return 0;
1611
1612 err:
1613 mutex_unlock(&session->metadata_cache->lock);
1614 kfree(str);
1615 return -ENOMEM;
1616 }
1617
1618 /*
1619 * Must be called with sessions_mutex held.
1620 */
1621 static
1622 int _lttng_field_statedump(struct lttng_session *session,
1623 const struct lttng_event_field *field)
1624 {
1625 int ret = 0;
1626
1627 switch (field->type.atype) {
1628 case atype_integer:
1629 ret = lttng_metadata_printf(session,
1630 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
1631 field->type.u.basic.integer.size,
1632 field->type.u.basic.integer.alignment,
1633 field->type.u.basic.integer.signedness,
1634 (field->type.u.basic.integer.encoding == lttng_encode_none)
1635 ? "none"
1636 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
1637 ? "UTF8"
1638 : "ASCII",
1639 field->type.u.basic.integer.base,
1640 #if __BYTE_ORDER == __BIG_ENDIAN
1641 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1642 #else
1643 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1644 #endif
1645 field->name);
1646 break;
1647 case atype_enum:
1648 ret = lttng_metadata_printf(session,
1649 " %s _%s;\n",
1650 field->type.u.basic.enumeration.name,
1651 field->name);
1652 break;
1653 case atype_array:
1654 {
1655 const struct lttng_basic_type *elem_type;
1656
1657 elem_type = &field->type.u.array.elem_type;
1658 ret = lttng_metadata_printf(session,
1659 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
1660 elem_type->u.basic.integer.size,
1661 elem_type->u.basic.integer.alignment,
1662 elem_type->u.basic.integer.signedness,
1663 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1664 ? "none"
1665 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1666 ? "UTF8"
1667 : "ASCII",
1668 elem_type->u.basic.integer.base,
1669 #if __BYTE_ORDER == __BIG_ENDIAN
1670 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1671 #else
1672 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1673 #endif
1674 field->name, field->type.u.array.length);
1675 break;
1676 }
1677 case atype_sequence:
1678 {
1679 const struct lttng_basic_type *elem_type;
1680 const struct lttng_basic_type *length_type;
1681
1682 elem_type = &field->type.u.sequence.elem_type;
1683 length_type = &field->type.u.sequence.length_type;
1684 ret = lttng_metadata_printf(session,
1685 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
1686 length_type->u.basic.integer.size,
1687 (unsigned int) length_type->u.basic.integer.alignment,
1688 length_type->u.basic.integer.signedness,
1689 (length_type->u.basic.integer.encoding == lttng_encode_none)
1690 ? "none"
1691 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
1692 ? "UTF8"
1693 : "ASCII"),
1694 length_type->u.basic.integer.base,
1695 #if __BYTE_ORDER == __BIG_ENDIAN
1696 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1697 #else
1698 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1699 #endif
1700 field->name);
1701 if (ret)
1702 return ret;
1703
1704 ret = lttng_metadata_printf(session,
1705 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
1706 elem_type->u.basic.integer.size,
1707 (unsigned int) elem_type->u.basic.integer.alignment,
1708 elem_type->u.basic.integer.signedness,
1709 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1710 ? "none"
1711 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1712 ? "UTF8"
1713 : "ASCII"),
1714 elem_type->u.basic.integer.base,
1715 #if __BYTE_ORDER == __BIG_ENDIAN
1716 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1717 #else
1718 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1719 #endif
1720 field->name,
1721 field->name);
1722 break;
1723 }
1724
1725 case atype_string:
1726 /* Default encoding is UTF8 */
1727 ret = lttng_metadata_printf(session,
1728 " string%s _%s;\n",
1729 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
1730 " { encoding = ASCII; }" : "",
1731 field->name);
1732 break;
1733 default:
1734 WARN_ON_ONCE(1);
1735 return -EINVAL;
1736 }
1737 return ret;
1738 }
1739
1740 static
1741 int _lttng_context_metadata_statedump(struct lttng_session *session,
1742 struct lttng_ctx *ctx)
1743 {
1744 int ret = 0;
1745 int i;
1746
1747 if (!ctx)
1748 return 0;
1749 for (i = 0; i < ctx->nr_fields; i++) {
1750 const struct lttng_ctx_field *field = &ctx->fields[i];
1751
1752 ret = _lttng_field_statedump(session, &field->event_field);
1753 if (ret)
1754 return ret;
1755 }
1756 return ret;
1757 }
1758
1759 static
1760 int _lttng_fields_metadata_statedump(struct lttng_session *session,
1761 struct lttng_event *event)
1762 {
1763 const struct lttng_event_desc *desc = event->desc;
1764 int ret = 0;
1765 int i;
1766
1767 for (i = 0; i < desc->nr_fields; i++) {
1768 const struct lttng_event_field *field = &desc->fields[i];
1769
1770 ret = _lttng_field_statedump(session, field);
1771 if (ret)
1772 return ret;
1773 }
1774 return ret;
1775 }
1776
1777 /*
1778 * Must be called with sessions_mutex held.
1779 */
1780 static
1781 int _lttng_event_metadata_statedump(struct lttng_session *session,
1782 struct lttng_channel *chan,
1783 struct lttng_event *event)
1784 {
1785 int ret = 0;
1786
1787 if (event->metadata_dumped || !ACCESS_ONCE(session->active))
1788 return 0;
1789 if (chan->channel_type == METADATA_CHANNEL)
1790 return 0;
1791
1792 ret = lttng_metadata_printf(session,
1793 "event {\n"
1794 " name = \"%s\";\n"
1795 " id = %u;\n"
1796 " stream_id = %u;\n",
1797 event->desc->name,
1798 event->id,
1799 event->chan->id);
1800 if (ret)
1801 goto end;
1802
1803 if (event->ctx) {
1804 ret = lttng_metadata_printf(session,
1805 " context := struct {\n");
1806 if (ret)
1807 goto end;
1808 }
1809 ret = _lttng_context_metadata_statedump(session, event->ctx);
1810 if (ret)
1811 goto end;
1812 if (event->ctx) {
1813 ret = lttng_metadata_printf(session,
1814 " };\n");
1815 if (ret)
1816 goto end;
1817 }
1818
1819 ret = lttng_metadata_printf(session,
1820 " fields := struct {\n"
1821 );
1822 if (ret)
1823 goto end;
1824
1825 ret = _lttng_fields_metadata_statedump(session, event);
1826 if (ret)
1827 goto end;
1828
1829 /*
1830 * LTTng space reservation can only reserve multiples of the
1831 * byte size.
1832 */
1833 ret = lttng_metadata_printf(session,
1834 " };\n"
1835 "};\n\n");
1836 if (ret)
1837 goto end;
1838
1839 event->metadata_dumped = 1;
1840 end:
1841 return ret;
1842
1843 }
1844
1845 /*
1846 * Must be called with sessions_mutex held.
1847 */
1848 static
1849 int _lttng_channel_metadata_statedump(struct lttng_session *session,
1850 struct lttng_channel *chan)
1851 {
1852 int ret = 0;
1853
1854 if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
1855 return 0;
1856
1857 if (chan->channel_type == METADATA_CHANNEL)
1858 return 0;
1859
1860 WARN_ON_ONCE(!chan->header_type);
1861 ret = lttng_metadata_printf(session,
1862 "stream {\n"
1863 " id = %u;\n"
1864 " event.header := %s;\n"
1865 " packet.context := struct packet_context;\n",
1866 chan->id,
1867 chan->header_type == 1 ? "struct event_header_compact" :
1868 "struct event_header_large");
1869 if (ret)
1870 goto end;
1871
1872 if (chan->ctx) {
1873 ret = lttng_metadata_printf(session,
1874 " event.context := struct {\n");
1875 if (ret)
1876 goto end;
1877 }
1878 ret = _lttng_context_metadata_statedump(session, chan->ctx);
1879 if (ret)
1880 goto end;
1881 if (chan->ctx) {
1882 ret = lttng_metadata_printf(session,
1883 " };\n");
1884 if (ret)
1885 goto end;
1886 }
1887
1888 ret = lttng_metadata_printf(session,
1889 "};\n\n");
1890
1891 chan->metadata_dumped = 1;
1892 end:
1893 return ret;
1894 }
1895
1896 /*
1897 * Must be called with sessions_mutex held.
1898 */
1899 static
1900 int _lttng_stream_packet_context_declare(struct lttng_session *session)
1901 {
1902 return lttng_metadata_printf(session,
1903 "struct packet_context {\n"
1904 " uint64_clock_monotonic_t timestamp_begin;\n"
1905 " uint64_clock_monotonic_t timestamp_end;\n"
1906 " uint64_t content_size;\n"
1907 " uint64_t packet_size;\n"
1908 " unsigned long events_discarded;\n"
1909 " uint32_t cpu_id;\n"
1910 "};\n\n"
1911 );
1912 }
1913
1914 /*
1915 * Compact header:
1916 * id: range: 0 - 30.
1917 * id 31 is reserved to indicate an extended header.
1918 *
1919 * Large header:
1920 * id: range: 0 - 65534.
1921 * id 65535 is reserved to indicate an extended header.
1922 *
1923 * Must be called with sessions_mutex held.
1924 */
1925 static
1926 int _lttng_event_header_declare(struct lttng_session *session)
1927 {
1928 return lttng_metadata_printf(session,
1929 "struct event_header_compact {\n"
1930 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
1931 " variant <id> {\n"
1932 " struct {\n"
1933 " uint27_clock_monotonic_t timestamp;\n"
1934 " } compact;\n"
1935 " struct {\n"
1936 " uint32_t id;\n"
1937 " uint64_clock_monotonic_t timestamp;\n"
1938 " } extended;\n"
1939 " } v;\n"
1940 "} align(%u);\n"
1941 "\n"
1942 "struct event_header_large {\n"
1943 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
1944 " variant <id> {\n"
1945 " struct {\n"
1946 " uint32_clock_monotonic_t timestamp;\n"
1947 " } compact;\n"
1948 " struct {\n"
1949 " uint32_t id;\n"
1950 " uint64_clock_monotonic_t timestamp;\n"
1951 " } extended;\n"
1952 " } v;\n"
1953 "} align(%u);\n\n",
1954 lttng_alignof(uint32_t) * CHAR_BIT,
1955 lttng_alignof(uint16_t) * CHAR_BIT
1956 );
1957 }
1958
1959 /*
1960 * Approximation of NTP time of day to clock monotonic correlation,
1961 * taken at start of trace.
1962 * Yes, this is only an approximation. Yes, we can (and will) do better
1963 * in future versions.
1964 * Return 0 if offset is negative. It may happen if the system sets
1965 * the REALTIME clock to 0 after boot.
1966 */
1967 static
1968 uint64_t measure_clock_offset(void)
1969 {
1970 uint64_t monotonic_avg, monotonic[2], realtime;
1971 int64_t offset;
1972 struct timespec rts = { 0, 0 };
1973 unsigned long flags;
1974
1975 /* Disable interrupts to increase correlation precision. */
1976 local_irq_save(flags);
1977 monotonic[0] = trace_clock_read64();
1978 getnstimeofday(&rts);
1979 monotonic[1] = trace_clock_read64();
1980 local_irq_restore(flags);
1981
1982 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
1983 realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
1984 realtime += rts.tv_nsec;
1985 offset = (int64_t) realtime - monotonic_avg;
1986 if (offset < 0)
1987 return 0;
1988 return offset;
1989 }
1990
1991 /*
1992 * Output metadata into this session's metadata buffers.
1993 * Must be called with sessions_mutex held.
1994 */
1995 static
1996 int _lttng_session_metadata_statedump(struct lttng_session *session)
1997 {
1998 unsigned char *uuid_c = session->uuid.b;
1999 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2000 struct lttng_channel *chan;
2001 struct lttng_event *event;
2002 int ret = 0;
2003
2004 if (!ACCESS_ONCE(session->active))
2005 return 0;
2006 if (session->metadata_dumped)
2007 goto skip_session;
2008
2009 snprintf(uuid_s, sizeof(uuid_s),
2010 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2011 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2012 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2013 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2014 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2015
2016 ret = lttng_metadata_printf(session,
2017 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2018 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2019 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2020 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2021 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2022 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2023 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2024 "\n"
2025 "trace {\n"
2026 " major = %u;\n"
2027 " minor = %u;\n"
2028 " uuid = \"%s\";\n"
2029 " byte_order = %s;\n"
2030 " packet.header := struct {\n"
2031 " uint32_t magic;\n"
2032 " uint8_t uuid[16];\n"
2033 " uint32_t stream_id;\n"
2034 " };\n"
2035 "};\n\n",
2036 lttng_alignof(uint8_t) * CHAR_BIT,
2037 lttng_alignof(uint16_t) * CHAR_BIT,
2038 lttng_alignof(uint32_t) * CHAR_BIT,
2039 lttng_alignof(uint64_t) * CHAR_BIT,
2040 sizeof(unsigned long) * CHAR_BIT,
2041 lttng_alignof(unsigned long) * CHAR_BIT,
2042 CTF_SPEC_MAJOR,
2043 CTF_SPEC_MINOR,
2044 uuid_s,
2045 #if __BYTE_ORDER == __BIG_ENDIAN
2046 "be"
2047 #else
2048 "le"
2049 #endif
2050 );
2051 if (ret)
2052 goto end;
2053
2054 ret = lttng_metadata_printf(session,
2055 "env {\n"
2056 " hostname = \"%s\";\n"
2057 " domain = \"kernel\";\n"
2058 " sysname = \"%s\";\n"
2059 " kernel_release = \"%s\";\n"
2060 " kernel_version = \"%s\";\n"
2061 " tracer_name = \"lttng-modules\";\n"
2062 " tracer_major = %d;\n"
2063 " tracer_minor = %d;\n"
2064 " tracer_patchlevel = %d;\n"
2065 "};\n\n",
2066 current->nsproxy->uts_ns->name.nodename,
2067 utsname()->sysname,
2068 utsname()->release,
2069 utsname()->version,
2070 LTTNG_MODULES_MAJOR_VERSION,
2071 LTTNG_MODULES_MINOR_VERSION,
2072 LTTNG_MODULES_PATCHLEVEL_VERSION
2073 );
2074 if (ret)
2075 goto end;
2076
2077 ret = lttng_metadata_printf(session,
2078 "clock {\n"
2079 " name = %s;\n",
2080 "monotonic"
2081 );
2082 if (ret)
2083 goto end;
2084
2085 if (!trace_clock_uuid(clock_uuid_s)) {
2086 ret = lttng_metadata_printf(session,
2087 " uuid = \"%s\";\n",
2088 clock_uuid_s
2089 );
2090 if (ret)
2091 goto end;
2092 }
2093
2094 ret = lttng_metadata_printf(session,
2095 " description = \"Monotonic Clock\";\n"
2096 " freq = %llu; /* Frequency, in Hz */\n"
2097 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2098 " offset = %llu;\n"
2099 "};\n\n",
2100 (unsigned long long) trace_clock_freq(),
2101 (unsigned long long) measure_clock_offset()
2102 );
2103 if (ret)
2104 goto end;
2105
2106 ret = lttng_metadata_printf(session,
2107 "typealias integer {\n"
2108 " size = 27; align = 1; signed = false;\n"
2109 " map = clock.monotonic.value;\n"
2110 "} := uint27_clock_monotonic_t;\n"
2111 "\n"
2112 "typealias integer {\n"
2113 " size = 32; align = %u; signed = false;\n"
2114 " map = clock.monotonic.value;\n"
2115 "} := uint32_clock_monotonic_t;\n"
2116 "\n"
2117 "typealias integer {\n"
2118 " size = 64; align = %u; signed = false;\n"
2119 " map = clock.monotonic.value;\n"
2120 "} := uint64_clock_monotonic_t;\n\n",
2121 lttng_alignof(uint32_t) * CHAR_BIT,
2122 lttng_alignof(uint64_t) * CHAR_BIT
2123 );
2124 if (ret)
2125 goto end;
2126
2127 ret = _lttng_stream_packet_context_declare(session);
2128 if (ret)
2129 goto end;
2130
2131 ret = _lttng_event_header_declare(session);
2132 if (ret)
2133 goto end;
2134
2135 skip_session:
2136 list_for_each_entry(chan, &session->chan, list) {
2137 ret = _lttng_channel_metadata_statedump(session, chan);
2138 if (ret)
2139 goto end;
2140 }
2141
2142 list_for_each_entry(event, &session->events, list) {
2143 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2144 if (ret)
2145 goto end;
2146 }
2147 session->metadata_dumped = 1;
2148 end:
2149 return ret;
2150 }
2151
2152 /**
2153 * lttng_transport_register - LTT transport registration
2154 * @transport: transport structure
2155 *
2156 * Registers a transport which can be used as output to extract the data out of
2157 * LTTng. The module calling this registration function must ensure that no
2158 * trap-inducing code will be executed by the transport functions. E.g.
2159 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
2160 * is made visible to the transport function. This registration acts as a
2161 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
2162 * after its registration must it synchronize the TLBs.
2163 */
2164 void lttng_transport_register(struct lttng_transport *transport)
2165 {
2166 /*
2167 * Make sure no page fault can be triggered by the module about to be
2168 * registered. We deal with this here so we don't have to call
2169 * vmalloc_sync_all() in each module's init.
2170 */
2171 wrapper_vmalloc_sync_all();
2172
2173 mutex_lock(&sessions_mutex);
2174 list_add_tail(&transport->node, &lttng_transport_list);
2175 mutex_unlock(&sessions_mutex);
2176 }
2177 EXPORT_SYMBOL_GPL(lttng_transport_register);
2178
2179 /**
2180 * lttng_transport_unregister - LTT transport unregistration
2181 * @transport: transport structure
2182 */
2183 void lttng_transport_unregister(struct lttng_transport *transport)
2184 {
2185 mutex_lock(&sessions_mutex);
2186 list_del(&transport->node);
2187 mutex_unlock(&sessions_mutex);
2188 }
2189 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2190
2191 static int __init lttng_events_init(void)
2192 {
2193 int ret;
2194
2195 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
2196 if (ret)
2197 return ret;
2198 ret = wrapper_get_pfnblock_flags_mask_init();
2199 if (ret)
2200 return ret;
2201 ret = wrapper_get_pageblock_flags_mask_init();
2202 if (ret)
2203 return ret;
2204 ret = lttng_context_init();
2205 if (ret)
2206 return ret;
2207 ret = lttng_tracepoint_init();
2208 if (ret)
2209 goto error_tp;
2210 event_cache = KMEM_CACHE(lttng_event, 0);
2211 if (!event_cache) {
2212 ret = -ENOMEM;
2213 goto error_kmem;
2214 }
2215 ret = lttng_abi_init();
2216 if (ret)
2217 goto error_abi;
2218 ret = lttng_logger_init();
2219 if (ret)
2220 goto error_logger;
2221 return 0;
2222
2223 error_logger:
2224 lttng_abi_exit();
2225 error_abi:
2226 kmem_cache_destroy(event_cache);
2227 error_kmem:
2228 lttng_tracepoint_exit();
2229 error_tp:
2230 lttng_context_exit();
2231 return ret;
2232 }
2233
2234 module_init(lttng_events_init);
2235
2236 static void __exit lttng_events_exit(void)
2237 {
2238 struct lttng_session *session, *tmpsession;
2239
2240 lttng_logger_exit();
2241 lttng_abi_exit();
2242 list_for_each_entry_safe(session, tmpsession, &sessions, list)
2243 lttng_session_destroy(session);
2244 kmem_cache_destroy(event_cache);
2245 lttng_tracepoint_exit();
2246 lttng_context_exit();
2247 }
2248
2249 module_exit(lttng_events_exit);
2250
2251 MODULE_LICENSE("GPL and additional rights");
2252 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
2253 MODULE_DESCRIPTION("LTTng Events");
2254 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
2255 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
2256 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
2257 LTTNG_MODULES_EXTRAVERSION);
This page took 0.072201 seconds and 5 git commands to generate.