Fix: do not generate packet at destroy after stop
[lttng-modules.git] / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
25 * overrides a function with a define.
26 */
27 #include "wrapper/page_alloc.h"
28
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/jiffies.h>
34 #include <linux/utsname.h>
35 #include <linux/err.h>
36 #include <linux/seq_file.h>
37 #include <linux/file.h>
38 #include <linux/anon_inodes.h>
39 #include "wrapper/file.h"
40 #include <linux/jhash.h>
41 #include <linux/uaccess.h>
42 #include <linux/vmalloc.h>
43
44 #include "wrapper/uuid.h"
45 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
46 #include "wrapper/random.h"
47 #include "wrapper/tracepoint.h"
48 #include "wrapper/list.h"
49 #include "lttng-kernel-version.h"
50 #include "lttng-events.h"
51 #include "lttng-tracer.h"
52 #include "lttng-abi-old.h"
53 #include "lttng-endian.h"
54 #include "wrapper/vzalloc.h"
55 #include "wrapper/ringbuffer/backend.h"
56 #include "wrapper/ringbuffer/frontend.h"
57
58 #define METADATA_CACHE_DEFAULT_SIZE 4096
59
60 static LIST_HEAD(sessions);
61 static LIST_HEAD(lttng_transport_list);
62 /*
63 * Protect the sessions and metadata caches.
64 */
65 static DEFINE_MUTEX(sessions_mutex);
66 static struct kmem_cache *event_cache;
67
68 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
69 static void lttng_session_sync_enablers(struct lttng_session *session);
70 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
71
72 static void _lttng_event_destroy(struct lttng_event *event);
73 static void _lttng_channel_destroy(struct lttng_channel *chan);
74 static int _lttng_event_unregister(struct lttng_event *event);
75 static
76 int _lttng_event_metadata_statedump(struct lttng_session *session,
77 struct lttng_channel *chan,
78 struct lttng_event *event);
79 static
80 int _lttng_session_metadata_statedump(struct lttng_session *session);
81 static
82 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
83
84 void synchronize_trace(void)
85 {
86 synchronize_sched();
87 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
88 #ifdef CONFIG_PREEMPT_RT_FULL
89 synchronize_rcu();
90 #endif
91 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
92 #ifdef CONFIG_PREEMPT_RT
93 synchronize_rcu();
94 #endif
95 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
96 }
97
98 void lttng_lock_sessions(void)
99 {
100 mutex_lock(&sessions_mutex);
101 }
102
103 void lttng_unlock_sessions(void)
104 {
105 mutex_unlock(&sessions_mutex);
106 }
107
108 /*
109 * Called with sessions lock held.
110 */
111 int lttng_session_active(void)
112 {
113 struct lttng_session *iter;
114
115 list_for_each_entry(iter, &sessions, list) {
116 if (iter->active)
117 return 1;
118 }
119 return 0;
120 }
121
122 struct lttng_session *lttng_session_create(void)
123 {
124 struct lttng_session *session;
125 struct lttng_metadata_cache *metadata_cache;
126 int i;
127
128 mutex_lock(&sessions_mutex);
129 session = kzalloc(sizeof(struct lttng_session), GFP_KERNEL);
130 if (!session)
131 goto err;
132 INIT_LIST_HEAD(&session->chan);
133 INIT_LIST_HEAD(&session->events);
134 uuid_le_gen(&session->uuid);
135
136 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
137 GFP_KERNEL);
138 if (!metadata_cache)
139 goto err_free_session;
140 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
141 if (!metadata_cache->data)
142 goto err_free_cache;
143 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
144 kref_init(&metadata_cache->refcount);
145 mutex_init(&metadata_cache->lock);
146 session->metadata_cache = metadata_cache;
147 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
148 memcpy(&metadata_cache->uuid, &session->uuid,
149 sizeof(metadata_cache->uuid));
150 INIT_LIST_HEAD(&session->enablers_head);
151 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
152 INIT_HLIST_HEAD(&session->events_ht.table[i]);
153 list_add(&session->list, &sessions);
154 mutex_unlock(&sessions_mutex);
155 return session;
156
157 err_free_cache:
158 kfree(metadata_cache);
159 err_free_session:
160 kfree(session);
161 err:
162 mutex_unlock(&sessions_mutex);
163 return NULL;
164 }
165
166 void metadata_cache_destroy(struct kref *kref)
167 {
168 struct lttng_metadata_cache *cache =
169 container_of(kref, struct lttng_metadata_cache, refcount);
170 vfree(cache->data);
171 kfree(cache);
172 }
173
174 void lttng_session_destroy(struct lttng_session *session)
175 {
176 struct lttng_channel *chan, *tmpchan;
177 struct lttng_event *event, *tmpevent;
178 struct lttng_metadata_stream *metadata_stream;
179 struct lttng_enabler *enabler, *tmpenabler;
180 int ret;
181
182 mutex_lock(&sessions_mutex);
183 ACCESS_ONCE(session->active) = 0;
184 list_for_each_entry(chan, &session->chan, list) {
185 ret = lttng_syscalls_unregister(chan);
186 WARN_ON(ret);
187 }
188 list_for_each_entry(event, &session->events, list) {
189 ret = _lttng_event_unregister(event);
190 WARN_ON(ret);
191 }
192 synchronize_trace(); /* Wait for in-flight events to complete */
193 list_for_each_entry_safe(enabler, tmpenabler,
194 &session->enablers_head, node)
195 lttng_enabler_destroy(enabler);
196 list_for_each_entry_safe(event, tmpevent, &session->events, list)
197 _lttng_event_destroy(event);
198 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
199 BUG_ON(chan->channel_type == METADATA_CHANNEL);
200 _lttng_channel_destroy(chan);
201 }
202 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
203 _lttng_metadata_channel_hangup(metadata_stream);
204 if (session->pid_tracker)
205 lttng_pid_tracker_destroy(session->pid_tracker);
206 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
207 list_del(&session->list);
208 mutex_unlock(&sessions_mutex);
209 kfree(session);
210 }
211
212 int lttng_session_enable(struct lttng_session *session)
213 {
214 int ret = 0;
215 struct lttng_channel *chan;
216
217 mutex_lock(&sessions_mutex);
218 if (session->active) {
219 ret = -EBUSY;
220 goto end;
221 }
222
223 /* Set transient enabler state to "enabled" */
224 session->tstate = 1;
225
226 /*
227 * Snapshot the number of events per channel to know the type of header
228 * we need to use.
229 */
230 list_for_each_entry(chan, &session->chan, list) {
231 if (chan->header_type)
232 continue; /* don't change it if session stop/restart */
233 if (chan->free_event_id < 31)
234 chan->header_type = 1; /* compact */
235 else
236 chan->header_type = 2; /* large */
237 }
238
239 /* We need to sync enablers with session before activation. */
240 lttng_session_sync_enablers(session);
241
242 /* Clear each stream's quiescent state. */
243 list_for_each_entry(chan, &session->chan, list)
244 lib_ring_buffer_clear_quiescent_channel(chan->chan);
245
246 ACCESS_ONCE(session->active) = 1;
247 ACCESS_ONCE(session->been_active) = 1;
248 ret = _lttng_session_metadata_statedump(session);
249 if (ret) {
250 ACCESS_ONCE(session->active) = 0;
251 goto end;
252 }
253 ret = lttng_statedump_start(session);
254 if (ret)
255 ACCESS_ONCE(session->active) = 0;
256 end:
257 mutex_unlock(&sessions_mutex);
258 return ret;
259 }
260
261 int lttng_session_disable(struct lttng_session *session)
262 {
263 int ret = 0;
264 struct lttng_channel *chan;
265
266 mutex_lock(&sessions_mutex);
267 if (!session->active) {
268 ret = -EBUSY;
269 goto end;
270 }
271 ACCESS_ONCE(session->active) = 0;
272
273 /* Set transient enabler state to "disabled" */
274 session->tstate = 0;
275 lttng_session_sync_enablers(session);
276
277 /* Set each stream's quiescent state. */
278 list_for_each_entry(chan, &session->chan, list)
279 lib_ring_buffer_set_quiescent_channel(chan->chan);
280 end:
281 mutex_unlock(&sessions_mutex);
282 return ret;
283 }
284
285 int lttng_channel_enable(struct lttng_channel *channel)
286 {
287 int ret = 0;
288
289 mutex_lock(&sessions_mutex);
290 if (channel->channel_type == METADATA_CHANNEL) {
291 ret = -EPERM;
292 goto end;
293 }
294 if (channel->enabled) {
295 ret = -EEXIST;
296 goto end;
297 }
298 /* Set transient enabler state to "enabled" */
299 channel->tstate = 1;
300 lttng_session_sync_enablers(channel->session);
301 /* Set atomically the state to "enabled" */
302 ACCESS_ONCE(channel->enabled) = 1;
303 end:
304 mutex_unlock(&sessions_mutex);
305 return ret;
306 }
307
308 int lttng_channel_disable(struct lttng_channel *channel)
309 {
310 int ret = 0;
311
312 mutex_lock(&sessions_mutex);
313 if (channel->channel_type == METADATA_CHANNEL) {
314 ret = -EPERM;
315 goto end;
316 }
317 if (!channel->enabled) {
318 ret = -EEXIST;
319 goto end;
320 }
321 /* Set atomically the state to "disabled" */
322 ACCESS_ONCE(channel->enabled) = 0;
323 /* Set transient enabler state to "enabled" */
324 channel->tstate = 0;
325 lttng_session_sync_enablers(channel->session);
326 end:
327 mutex_unlock(&sessions_mutex);
328 return ret;
329 }
330
331 int lttng_event_enable(struct lttng_event *event)
332 {
333 int ret = 0;
334
335 mutex_lock(&sessions_mutex);
336 if (event->chan->channel_type == METADATA_CHANNEL) {
337 ret = -EPERM;
338 goto end;
339 }
340 if (event->enabled) {
341 ret = -EEXIST;
342 goto end;
343 }
344 switch (event->instrumentation) {
345 case LTTNG_KERNEL_TRACEPOINT:
346 case LTTNG_KERNEL_SYSCALL:
347 ret = -EINVAL;
348 break;
349 case LTTNG_KERNEL_KPROBE:
350 case LTTNG_KERNEL_FUNCTION:
351 case LTTNG_KERNEL_NOOP:
352 ACCESS_ONCE(event->enabled) = 1;
353 break;
354 case LTTNG_KERNEL_KRETPROBE:
355 ret = lttng_kretprobes_event_enable_state(event, 1);
356 break;
357 default:
358 WARN_ON_ONCE(1);
359 ret = -EINVAL;
360 }
361 end:
362 mutex_unlock(&sessions_mutex);
363 return ret;
364 }
365
366 int lttng_event_disable(struct lttng_event *event)
367 {
368 int ret = 0;
369
370 mutex_lock(&sessions_mutex);
371 if (event->chan->channel_type == METADATA_CHANNEL) {
372 ret = -EPERM;
373 goto end;
374 }
375 if (!event->enabled) {
376 ret = -EEXIST;
377 goto end;
378 }
379 switch (event->instrumentation) {
380 case LTTNG_KERNEL_TRACEPOINT:
381 case LTTNG_KERNEL_SYSCALL:
382 ret = -EINVAL;
383 break;
384 case LTTNG_KERNEL_KPROBE:
385 case LTTNG_KERNEL_FUNCTION:
386 case LTTNG_KERNEL_NOOP:
387 ACCESS_ONCE(event->enabled) = 0;
388 break;
389 case LTTNG_KERNEL_KRETPROBE:
390 ret = lttng_kretprobes_event_enable_state(event, 0);
391 break;
392 default:
393 WARN_ON_ONCE(1);
394 ret = -EINVAL;
395 }
396 end:
397 mutex_unlock(&sessions_mutex);
398 return ret;
399 }
400
401 static struct lttng_transport *lttng_transport_find(const char *name)
402 {
403 struct lttng_transport *transport;
404
405 list_for_each_entry(transport, &lttng_transport_list, node) {
406 if (!strcmp(transport->name, name))
407 return transport;
408 }
409 return NULL;
410 }
411
412 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
413 const char *transport_name,
414 void *buf_addr,
415 size_t subbuf_size, size_t num_subbuf,
416 unsigned int switch_timer_interval,
417 unsigned int read_timer_interval,
418 enum channel_type channel_type)
419 {
420 struct lttng_channel *chan;
421 struct lttng_transport *transport = NULL;
422
423 mutex_lock(&sessions_mutex);
424 if (session->been_active && channel_type != METADATA_CHANNEL)
425 goto active; /* Refuse to add channel to active session */
426 transport = lttng_transport_find(transport_name);
427 if (!transport) {
428 printk(KERN_WARNING "LTTng transport %s not found\n",
429 transport_name);
430 goto notransport;
431 }
432 if (!try_module_get(transport->owner)) {
433 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
434 goto notransport;
435 }
436 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
437 if (!chan)
438 goto nomem;
439 chan->session = session;
440 chan->id = session->free_chan_id++;
441 chan->ops = &transport->ops;
442 /*
443 * Note: the channel creation op already writes into the packet
444 * headers. Therefore the "chan" information used as input
445 * should be already accessible.
446 */
447 chan->chan = transport->ops.channel_create(transport_name,
448 chan, buf_addr, subbuf_size, num_subbuf,
449 switch_timer_interval, read_timer_interval);
450 if (!chan->chan)
451 goto create_error;
452 chan->tstate = 1;
453 chan->enabled = 1;
454 chan->transport = transport;
455 chan->channel_type = channel_type;
456 list_add(&chan->list, &session->chan);
457 mutex_unlock(&sessions_mutex);
458 return chan;
459
460 create_error:
461 kfree(chan);
462 nomem:
463 if (transport)
464 module_put(transport->owner);
465 notransport:
466 active:
467 mutex_unlock(&sessions_mutex);
468 return NULL;
469 }
470
471 /*
472 * Only used internally at session destruction for per-cpu channels, and
473 * when metadata channel is released.
474 * Needs to be called with sessions mutex held.
475 */
476 static
477 void _lttng_channel_destroy(struct lttng_channel *chan)
478 {
479 chan->ops->channel_destroy(chan->chan);
480 module_put(chan->transport->owner);
481 list_del(&chan->list);
482 lttng_destroy_context(chan->ctx);
483 kfree(chan);
484 }
485
486 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
487 {
488 BUG_ON(chan->channel_type != METADATA_CHANNEL);
489
490 /* Protect the metadata cache with the sessions_mutex. */
491 mutex_lock(&sessions_mutex);
492 _lttng_channel_destroy(chan);
493 mutex_unlock(&sessions_mutex);
494 }
495 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
496
497 static
498 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
499 {
500 stream->finalized = 1;
501 wake_up_interruptible(&stream->read_wait);
502 }
503
504 /*
505 * Supports event creation while tracing session is active.
506 * Needs to be called with sessions mutex held.
507 */
508 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
509 struct lttng_kernel_event *event_param,
510 void *filter,
511 const struct lttng_event_desc *event_desc,
512 enum lttng_kernel_instrumentation itype)
513 {
514 struct lttng_session *session = chan->session;
515 struct lttng_event *event;
516 const char *event_name;
517 struct hlist_head *head;
518 size_t name_len;
519 uint32_t hash;
520 int ret;
521
522 if (chan->free_event_id == -1U) {
523 ret = -EMFILE;
524 goto full;
525 }
526
527 switch (itype) {
528 case LTTNG_KERNEL_TRACEPOINT:
529 event_name = event_desc->name;
530 break;
531 case LTTNG_KERNEL_KPROBE:
532 case LTTNG_KERNEL_KRETPROBE:
533 case LTTNG_KERNEL_FUNCTION:
534 case LTTNG_KERNEL_NOOP:
535 case LTTNG_KERNEL_SYSCALL:
536 event_name = event_param->name;
537 break;
538 default:
539 WARN_ON_ONCE(1);
540 ret = -EINVAL;
541 goto type_error;
542 }
543 name_len = strlen(event_name);
544 hash = jhash(event_name, name_len, 0);
545 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
546 lttng_hlist_for_each_entry(event, head, hlist) {
547 WARN_ON_ONCE(!event->desc);
548 if (!strncmp(event->desc->name, event_name,
549 LTTNG_KERNEL_SYM_NAME_LEN - 1)
550 && chan == event->chan) {
551 ret = -EEXIST;
552 goto exist;
553 }
554 }
555
556 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
557 if (!event) {
558 ret = -ENOMEM;
559 goto cache_error;
560 }
561 event->chan = chan;
562 event->filter = filter;
563 event->id = chan->free_event_id++;
564 event->instrumentation = itype;
565 event->evtype = LTTNG_TYPE_EVENT;
566 INIT_LIST_HEAD(&event->bytecode_runtime_head);
567 INIT_LIST_HEAD(&event->enablers_ref_head);
568
569 switch (itype) {
570 case LTTNG_KERNEL_TRACEPOINT:
571 /* Event will be enabled by enabler sync. */
572 event->enabled = 0;
573 event->registered = 0;
574 event->desc = lttng_event_get(event_name);
575 if (!event->desc) {
576 ret = -ENOENT;
577 goto register_error;
578 }
579 /* Populate lttng_event structure before event registration. */
580 smp_wmb();
581 break;
582 case LTTNG_KERNEL_KPROBE:
583 /*
584 * Needs to be explicitly enabled after creation, since
585 * we may want to apply filters.
586 */
587 event->enabled = 0;
588 event->registered = 1;
589 /*
590 * Populate lttng_event structure before event
591 * registration.
592 */
593 smp_wmb();
594 ret = lttng_kprobes_register(event_name,
595 event_param->u.kprobe.symbol_name,
596 event_param->u.kprobe.offset,
597 event_param->u.kprobe.addr,
598 event);
599 if (ret) {
600 ret = -EINVAL;
601 goto register_error;
602 }
603 ret = try_module_get(event->desc->owner);
604 WARN_ON_ONCE(!ret);
605 break;
606 case LTTNG_KERNEL_KRETPROBE:
607 {
608 struct lttng_event *event_return;
609
610 /* kretprobe defines 2 events */
611 /*
612 * Needs to be explicitly enabled after creation, since
613 * we may want to apply filters.
614 */
615 event->enabled = 0;
616 event->registered = 1;
617 event_return =
618 kmem_cache_zalloc(event_cache, GFP_KERNEL);
619 if (!event_return) {
620 ret = -ENOMEM;
621 goto register_error;
622 }
623 event_return->chan = chan;
624 event_return->filter = filter;
625 event_return->id = chan->free_event_id++;
626 event_return->enabled = 0;
627 event_return->registered = 1;
628 event_return->instrumentation = itype;
629 /*
630 * Populate lttng_event structure before kretprobe registration.
631 */
632 smp_wmb();
633 ret = lttng_kretprobes_register(event_name,
634 event_param->u.kretprobe.symbol_name,
635 event_param->u.kretprobe.offset,
636 event_param->u.kretprobe.addr,
637 event, event_return);
638 if (ret) {
639 kmem_cache_free(event_cache, event_return);
640 ret = -EINVAL;
641 goto register_error;
642 }
643 /* Take 2 refs on the module: one per event. */
644 ret = try_module_get(event->desc->owner);
645 WARN_ON_ONCE(!ret);
646 ret = try_module_get(event->desc->owner);
647 WARN_ON_ONCE(!ret);
648 ret = _lttng_event_metadata_statedump(chan->session, chan,
649 event_return);
650 WARN_ON_ONCE(ret > 0);
651 if (ret) {
652 kmem_cache_free(event_cache, event_return);
653 module_put(event->desc->owner);
654 module_put(event->desc->owner);
655 goto statedump_error;
656 }
657 list_add(&event_return->list, &chan->session->events);
658 break;
659 }
660 case LTTNG_KERNEL_FUNCTION:
661 /*
662 * Needs to be explicitly enabled after creation, since
663 * we may want to apply filters.
664 */
665 event->enabled = 0;
666 event->registered = 1;
667 /*
668 * Populate lttng_event structure before event
669 * registration.
670 */
671 smp_wmb();
672 ret = lttng_ftrace_register(event_name,
673 event_param->u.ftrace.symbol_name,
674 event);
675 if (ret) {
676 goto register_error;
677 }
678 ret = try_module_get(event->desc->owner);
679 WARN_ON_ONCE(!ret);
680 break;
681 case LTTNG_KERNEL_NOOP:
682 case LTTNG_KERNEL_SYSCALL:
683 /*
684 * Needs to be explicitly enabled after creation, since
685 * we may want to apply filters.
686 */
687 event->enabled = 0;
688 event->registered = 0;
689 event->desc = event_desc;
690 if (!event->desc) {
691 ret = -EINVAL;
692 goto register_error;
693 }
694 break;
695 default:
696 WARN_ON_ONCE(1);
697 ret = -EINVAL;
698 goto register_error;
699 }
700 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
701 WARN_ON_ONCE(ret > 0);
702 if (ret) {
703 goto statedump_error;
704 }
705 hlist_add_head(&event->hlist, head);
706 list_add(&event->list, &chan->session->events);
707 return event;
708
709 statedump_error:
710 /* If a statedump error occurs, events will not be readable. */
711 register_error:
712 kmem_cache_free(event_cache, event);
713 cache_error:
714 exist:
715 type_error:
716 full:
717 return ERR_PTR(ret);
718 }
719
720 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
721 struct lttng_kernel_event *event_param,
722 void *filter,
723 const struct lttng_event_desc *event_desc,
724 enum lttng_kernel_instrumentation itype)
725 {
726 struct lttng_event *event;
727
728 mutex_lock(&sessions_mutex);
729 event = _lttng_event_create(chan, event_param, filter, event_desc,
730 itype);
731 mutex_unlock(&sessions_mutex);
732 return event;
733 }
734
735 /* Only used for tracepoints for now. */
736 static
737 void register_event(struct lttng_event *event)
738 {
739 const struct lttng_event_desc *desc;
740 int ret = -EINVAL;
741
742 if (event->registered)
743 return;
744
745 desc = event->desc;
746 switch (event->instrumentation) {
747 case LTTNG_KERNEL_TRACEPOINT:
748 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
749 desc->probe_callback,
750 event);
751 break;
752 case LTTNG_KERNEL_SYSCALL:
753 ret = lttng_syscall_filter_enable(event->chan,
754 desc->name);
755 break;
756 case LTTNG_KERNEL_KPROBE:
757 case LTTNG_KERNEL_KRETPROBE:
758 case LTTNG_KERNEL_FUNCTION:
759 case LTTNG_KERNEL_NOOP:
760 ret = 0;
761 break;
762 default:
763 WARN_ON_ONCE(1);
764 }
765 if (!ret)
766 event->registered = 1;
767 }
768
769 /*
770 * Only used internally at session destruction.
771 */
772 int _lttng_event_unregister(struct lttng_event *event)
773 {
774 const struct lttng_event_desc *desc;
775 int ret = -EINVAL;
776
777 if (!event->registered)
778 return 0;
779
780 desc = event->desc;
781 switch (event->instrumentation) {
782 case LTTNG_KERNEL_TRACEPOINT:
783 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
784 event->desc->probe_callback,
785 event);
786 break;
787 case LTTNG_KERNEL_KPROBE:
788 lttng_kprobes_unregister(event);
789 ret = 0;
790 break;
791 case LTTNG_KERNEL_KRETPROBE:
792 lttng_kretprobes_unregister(event);
793 ret = 0;
794 break;
795 case LTTNG_KERNEL_FUNCTION:
796 lttng_ftrace_unregister(event);
797 ret = 0;
798 break;
799 case LTTNG_KERNEL_SYSCALL:
800 ret = lttng_syscall_filter_disable(event->chan,
801 desc->name);
802 break;
803 case LTTNG_KERNEL_NOOP:
804 ret = 0;
805 break;
806 default:
807 WARN_ON_ONCE(1);
808 }
809 if (!ret)
810 event->registered = 0;
811 return ret;
812 }
813
814 /*
815 * Only used internally at session destruction.
816 */
817 static
818 void _lttng_event_destroy(struct lttng_event *event)
819 {
820 switch (event->instrumentation) {
821 case LTTNG_KERNEL_TRACEPOINT:
822 lttng_event_put(event->desc);
823 break;
824 case LTTNG_KERNEL_KPROBE:
825 module_put(event->desc->owner);
826 lttng_kprobes_destroy_private(event);
827 break;
828 case LTTNG_KERNEL_KRETPROBE:
829 module_put(event->desc->owner);
830 lttng_kretprobes_destroy_private(event);
831 break;
832 case LTTNG_KERNEL_FUNCTION:
833 module_put(event->desc->owner);
834 lttng_ftrace_destroy_private(event);
835 break;
836 case LTTNG_KERNEL_NOOP:
837 case LTTNG_KERNEL_SYSCALL:
838 break;
839 default:
840 WARN_ON_ONCE(1);
841 }
842 list_del(&event->list);
843 lttng_destroy_context(event->ctx);
844 kmem_cache_free(event_cache, event);
845 }
846
847 int lttng_session_track_pid(struct lttng_session *session, int pid)
848 {
849 int ret;
850
851 if (pid < -1)
852 return -EINVAL;
853 mutex_lock(&sessions_mutex);
854 if (pid == -1) {
855 /* track all pids: destroy tracker. */
856 if (session->pid_tracker) {
857 struct lttng_pid_tracker *lpf;
858
859 lpf = session->pid_tracker;
860 rcu_assign_pointer(session->pid_tracker, NULL);
861 synchronize_trace();
862 lttng_pid_tracker_destroy(lpf);
863 }
864 ret = 0;
865 } else {
866 if (!session->pid_tracker) {
867 struct lttng_pid_tracker *lpf;
868
869 lpf = lttng_pid_tracker_create();
870 if (!lpf) {
871 ret = -ENOMEM;
872 goto unlock;
873 }
874 ret = lttng_pid_tracker_add(lpf, pid);
875 rcu_assign_pointer(session->pid_tracker, lpf);
876 } else {
877 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
878 }
879 }
880 unlock:
881 mutex_unlock(&sessions_mutex);
882 return ret;
883 }
884
885 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
886 {
887 int ret;
888
889 if (pid < -1)
890 return -EINVAL;
891 mutex_lock(&sessions_mutex);
892 if (pid == -1) {
893 /* untrack all pids: replace by empty tracker. */
894 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
895 struct lttng_pid_tracker *lpf;
896
897 lpf = lttng_pid_tracker_create();
898 if (!lpf) {
899 ret = -ENOMEM;
900 goto unlock;
901 }
902 rcu_assign_pointer(session->pid_tracker, lpf);
903 synchronize_trace();
904 if (old_lpf)
905 lttng_pid_tracker_destroy(old_lpf);
906 ret = 0;
907 } else {
908 if (!session->pid_tracker) {
909 ret = -ENOENT;
910 goto unlock;
911 }
912 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
913 }
914 unlock:
915 mutex_unlock(&sessions_mutex);
916 return ret;
917 }
918
919 static
920 void *pid_list_start(struct seq_file *m, loff_t *pos)
921 {
922 struct lttng_session *session = m->private;
923 struct lttng_pid_tracker *lpf;
924 struct lttng_pid_hash_node *e;
925 int iter = 0, i;
926
927 mutex_lock(&sessions_mutex);
928 lpf = session->pid_tracker;
929 if (lpf) {
930 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
931 struct hlist_head *head = &lpf->pid_hash[i];
932
933 lttng_hlist_for_each_entry(e, head, hlist) {
934 if (iter++ >= *pos)
935 return e;
936 }
937 }
938 } else {
939 /* PID tracker disabled. */
940 if (iter >= *pos && iter == 0) {
941 return session; /* empty tracker */
942 }
943 iter++;
944 }
945 /* End of list */
946 return NULL;
947 }
948
949 /* Called with sessions_mutex held. */
950 static
951 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
952 {
953 struct lttng_session *session = m->private;
954 struct lttng_pid_tracker *lpf;
955 struct lttng_pid_hash_node *e;
956 int iter = 0, i;
957
958 (*ppos)++;
959 lpf = session->pid_tracker;
960 if (lpf) {
961 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
962 struct hlist_head *head = &lpf->pid_hash[i];
963
964 lttng_hlist_for_each_entry(e, head, hlist) {
965 if (iter++ >= *ppos)
966 return e;
967 }
968 }
969 } else {
970 /* PID tracker disabled. */
971 if (iter >= *ppos && iter == 0)
972 return session; /* empty tracker */
973 iter++;
974 }
975
976 /* End of list */
977 return NULL;
978 }
979
980 static
981 void pid_list_stop(struct seq_file *m, void *p)
982 {
983 mutex_unlock(&sessions_mutex);
984 }
985
986 static
987 int pid_list_show(struct seq_file *m, void *p)
988 {
989 int pid;
990
991 if (p == m->private) {
992 /* Tracker disabled. */
993 pid = -1;
994 } else {
995 const struct lttng_pid_hash_node *e = p;
996
997 pid = lttng_pid_tracker_get_node_pid(e);
998 }
999 seq_printf(m, "process { pid = %d; };\n", pid);
1000 return 0;
1001 }
1002
1003 static
1004 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
1005 .start = pid_list_start,
1006 .next = pid_list_next,
1007 .stop = pid_list_stop,
1008 .show = pid_list_show,
1009 };
1010
1011 static
1012 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
1013 {
1014 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
1015 }
1016
1017 static
1018 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
1019 {
1020 struct seq_file *m = file->private_data;
1021 struct lttng_session *session = m->private;
1022 int ret;
1023
1024 WARN_ON_ONCE(!session);
1025 ret = seq_release(inode, file);
1026 if (!ret && session)
1027 fput(session->file);
1028 return ret;
1029 }
1030
1031 const struct file_operations lttng_tracker_pids_list_fops = {
1032 .owner = THIS_MODULE,
1033 .open = lttng_tracker_pids_list_open,
1034 .read = seq_read,
1035 .llseek = seq_lseek,
1036 .release = lttng_tracker_pids_list_release,
1037 };
1038
1039 int lttng_session_list_tracker_pids(struct lttng_session *session)
1040 {
1041 struct file *tracker_pids_list_file;
1042 struct seq_file *m;
1043 int file_fd, ret;
1044
1045 file_fd = lttng_get_unused_fd();
1046 if (file_fd < 0) {
1047 ret = file_fd;
1048 goto fd_error;
1049 }
1050
1051 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
1052 &lttng_tracker_pids_list_fops,
1053 NULL, O_RDWR);
1054 if (IS_ERR(tracker_pids_list_file)) {
1055 ret = PTR_ERR(tracker_pids_list_file);
1056 goto file_error;
1057 }
1058 if (atomic_long_add_unless(&session->file->f_count,
1059 1, INT_MAX) == INT_MAX) {
1060 goto refcount_error;
1061 }
1062 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1063 if (ret < 0)
1064 goto open_error;
1065 m = tracker_pids_list_file->private_data;
1066 m->private = session;
1067 fd_install(file_fd, tracker_pids_list_file);
1068
1069 return file_fd;
1070
1071 open_error:
1072 atomic_long_dec(&session->file->f_count);
1073 refcount_error:
1074 fput(tracker_pids_list_file);
1075 file_error:
1076 put_unused_fd(file_fd);
1077 fd_error:
1078 return ret;
1079 }
1080
1081 /*
1082 * Enabler management.
1083 */
1084 static
1085 int lttng_match_enabler_wildcard(const char *desc_name,
1086 const char *name)
1087 {
1088 /* Compare excluding final '*' */
1089 if (strncmp(desc_name, name, strlen(name) - 1))
1090 return 0;
1091 return 1;
1092 }
1093
1094 static
1095 int lttng_match_enabler_name(const char *desc_name,
1096 const char *name)
1097 {
1098 if (strcmp(desc_name, name))
1099 return 0;
1100 return 1;
1101 }
1102
1103 static
1104 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1105 struct lttng_enabler *enabler)
1106 {
1107 const char *desc_name, *enabler_name;
1108
1109 enabler_name = enabler->event_param.name;
1110 switch (enabler->event_param.instrumentation) {
1111 case LTTNG_KERNEL_TRACEPOINT:
1112 desc_name = desc->name;
1113 break;
1114 case LTTNG_KERNEL_SYSCALL:
1115 desc_name = desc->name;
1116 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1117 desc_name += strlen("compat_");
1118 if (!strncmp(desc_name, "syscall_exit_",
1119 strlen("syscall_exit_"))) {
1120 desc_name += strlen("syscall_exit_");
1121 } else if (!strncmp(desc_name, "syscall_entry_",
1122 strlen("syscall_entry_"))) {
1123 desc_name += strlen("syscall_entry_");
1124 } else {
1125 WARN_ON_ONCE(1);
1126 return -EINVAL;
1127 }
1128 break;
1129 default:
1130 WARN_ON_ONCE(1);
1131 return -EINVAL;
1132 }
1133 switch (enabler->type) {
1134 case LTTNG_ENABLER_WILDCARD:
1135 return lttng_match_enabler_wildcard(desc_name, enabler_name);
1136 case LTTNG_ENABLER_NAME:
1137 return lttng_match_enabler_name(desc_name, enabler_name);
1138 default:
1139 return -EINVAL;
1140 }
1141 }
1142
1143 static
1144 int lttng_event_match_enabler(struct lttng_event *event,
1145 struct lttng_enabler *enabler)
1146 {
1147 if (enabler->event_param.instrumentation != event->instrumentation)
1148 return 0;
1149 if (lttng_desc_match_enabler(event->desc, enabler)
1150 && event->chan == enabler->chan)
1151 return 1;
1152 else
1153 return 0;
1154 }
1155
1156 static
1157 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1158 struct lttng_enabler *enabler)
1159 {
1160 struct lttng_enabler_ref *enabler_ref;
1161
1162 list_for_each_entry(enabler_ref,
1163 &event->enablers_ref_head, node) {
1164 if (enabler_ref->ref == enabler)
1165 return enabler_ref;
1166 }
1167 return NULL;
1168 }
1169
1170 static
1171 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1172 {
1173 struct lttng_session *session = enabler->chan->session;
1174 struct lttng_probe_desc *probe_desc;
1175 const struct lttng_event_desc *desc;
1176 int i;
1177 struct list_head *probe_list;
1178
1179 probe_list = lttng_get_probe_list_head();
1180 /*
1181 * For each probe event, if we find that a probe event matches
1182 * our enabler, create an associated lttng_event if not
1183 * already present.
1184 */
1185 list_for_each_entry(probe_desc, probe_list, head) {
1186 for (i = 0; i < probe_desc->nr_events; i++) {
1187 int found = 0;
1188 struct hlist_head *head;
1189 const char *event_name;
1190 size_t name_len;
1191 uint32_t hash;
1192 struct lttng_event *event;
1193
1194 desc = probe_desc->event_desc[i];
1195 if (!lttng_desc_match_enabler(desc, enabler))
1196 continue;
1197 event_name = desc->name;
1198 name_len = strlen(event_name);
1199
1200 /*
1201 * Check if already created.
1202 */
1203 hash = jhash(event_name, name_len, 0);
1204 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1205 lttng_hlist_for_each_entry(event, head, hlist) {
1206 if (event->desc == desc
1207 && event->chan == enabler->chan)
1208 found = 1;
1209 }
1210 if (found)
1211 continue;
1212
1213 /*
1214 * We need to create an event for this
1215 * event probe.
1216 */
1217 event = _lttng_event_create(enabler->chan,
1218 NULL, NULL, desc,
1219 LTTNG_KERNEL_TRACEPOINT);
1220 if (!event) {
1221 printk(KERN_INFO "Unable to create event %s\n",
1222 probe_desc->event_desc[i]->name);
1223 }
1224 }
1225 }
1226 }
1227
1228 static
1229 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1230 {
1231 int ret;
1232
1233 ret = lttng_syscalls_register(enabler->chan, NULL);
1234 WARN_ON_ONCE(ret);
1235 }
1236
1237 /*
1238 * Create struct lttng_event if it is missing and present in the list of
1239 * tracepoint probes.
1240 * Should be called with sessions mutex held.
1241 */
1242 static
1243 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1244 {
1245 switch (enabler->event_param.instrumentation) {
1246 case LTTNG_KERNEL_TRACEPOINT:
1247 lttng_create_tracepoint_if_missing(enabler);
1248 break;
1249 case LTTNG_KERNEL_SYSCALL:
1250 lttng_create_syscall_if_missing(enabler);
1251 break;
1252 default:
1253 WARN_ON_ONCE(1);
1254 break;
1255 }
1256 }
1257
1258 /*
1259 * Create events associated with an enabler (if not already present),
1260 * and add backward reference from the event to the enabler.
1261 * Should be called with sessions mutex held.
1262 */
1263 static
1264 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1265 {
1266 struct lttng_session *session = enabler->chan->session;
1267 struct lttng_event *event;
1268
1269 /* First ensure that probe events are created for this enabler. */
1270 lttng_create_event_if_missing(enabler);
1271
1272 /* For each event matching enabler in session event list. */
1273 list_for_each_entry(event, &session->events, list) {
1274 struct lttng_enabler_ref *enabler_ref;
1275
1276 if (!lttng_event_match_enabler(event, enabler))
1277 continue;
1278 enabler_ref = lttng_event_enabler_ref(event, enabler);
1279 if (!enabler_ref) {
1280 /*
1281 * If no backward ref, create it.
1282 * Add backward ref from event to enabler.
1283 */
1284 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1285 if (!enabler_ref)
1286 return -ENOMEM;
1287 enabler_ref->ref = enabler;
1288 list_add(&enabler_ref->node,
1289 &event->enablers_ref_head);
1290 }
1291
1292 /*
1293 * Link filter bytecodes if not linked yet.
1294 */
1295 lttng_enabler_event_link_bytecode(event, enabler);
1296
1297 /* TODO: merge event context. */
1298 }
1299 return 0;
1300 }
1301
1302 /*
1303 * Called at module load: connect the probe on all enablers matching
1304 * this event.
1305 * Called with sessions lock held.
1306 */
1307 int lttng_fix_pending_events(void)
1308 {
1309 struct lttng_session *session;
1310
1311 list_for_each_entry(session, &sessions, list)
1312 lttng_session_lazy_sync_enablers(session);
1313 return 0;
1314 }
1315
1316 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1317 struct lttng_kernel_event *event_param,
1318 struct lttng_channel *chan)
1319 {
1320 struct lttng_enabler *enabler;
1321
1322 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1323 if (!enabler)
1324 return NULL;
1325 enabler->type = type;
1326 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1327 memcpy(&enabler->event_param, event_param,
1328 sizeof(enabler->event_param));
1329 enabler->chan = chan;
1330 /* ctx left NULL */
1331 enabler->enabled = 0;
1332 enabler->evtype = LTTNG_TYPE_ENABLER;
1333 mutex_lock(&sessions_mutex);
1334 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1335 lttng_session_lazy_sync_enablers(enabler->chan->session);
1336 mutex_unlock(&sessions_mutex);
1337 return enabler;
1338 }
1339
1340 int lttng_enabler_enable(struct lttng_enabler *enabler)
1341 {
1342 mutex_lock(&sessions_mutex);
1343 enabler->enabled = 1;
1344 lttng_session_lazy_sync_enablers(enabler->chan->session);
1345 mutex_unlock(&sessions_mutex);
1346 return 0;
1347 }
1348
1349 int lttng_enabler_disable(struct lttng_enabler *enabler)
1350 {
1351 mutex_lock(&sessions_mutex);
1352 enabler->enabled = 0;
1353 lttng_session_lazy_sync_enablers(enabler->chan->session);
1354 mutex_unlock(&sessions_mutex);
1355 return 0;
1356 }
1357
1358 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1359 struct lttng_kernel_filter_bytecode __user *bytecode)
1360 {
1361 struct lttng_filter_bytecode_node *bytecode_node;
1362 uint32_t bytecode_len;
1363 int ret;
1364
1365 ret = get_user(bytecode_len, &bytecode->len);
1366 if (ret)
1367 return ret;
1368 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1369 GFP_KERNEL);
1370 if (!bytecode_node)
1371 return -ENOMEM;
1372 ret = copy_from_user(&bytecode_node->bc, bytecode,
1373 sizeof(*bytecode) + bytecode_len);
1374 if (ret)
1375 goto error_free;
1376 bytecode_node->enabler = enabler;
1377 /* Enforce length based on allocated size */
1378 bytecode_node->bc.len = bytecode_len;
1379 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1380 lttng_session_lazy_sync_enablers(enabler->chan->session);
1381 return 0;
1382
1383 error_free:
1384 kfree(bytecode_node);
1385 return ret;
1386 }
1387
1388 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1389 struct lttng_kernel_context *context_param)
1390 {
1391 return -ENOSYS;
1392 }
1393
1394 static
1395 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1396 {
1397 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1398
1399 /* Destroy filter bytecode */
1400 list_for_each_entry_safe(filter_node, tmp_filter_node,
1401 &enabler->filter_bytecode_head, node) {
1402 kfree(filter_node);
1403 }
1404
1405 /* Destroy contexts */
1406 lttng_destroy_context(enabler->ctx);
1407
1408 list_del(&enabler->node);
1409 kfree(enabler);
1410 }
1411
1412 /*
1413 * lttng_session_sync_enablers should be called just before starting a
1414 * session.
1415 * Should be called with sessions mutex held.
1416 */
1417 static
1418 void lttng_session_sync_enablers(struct lttng_session *session)
1419 {
1420 struct lttng_enabler *enabler;
1421 struct lttng_event *event;
1422
1423 list_for_each_entry(enabler, &session->enablers_head, node)
1424 lttng_enabler_ref_events(enabler);
1425 /*
1426 * For each event, if at least one of its enablers is enabled,
1427 * and its channel and session transient states are enabled, we
1428 * enable the event, else we disable it.
1429 */
1430 list_for_each_entry(event, &session->events, list) {
1431 struct lttng_enabler_ref *enabler_ref;
1432 struct lttng_bytecode_runtime *runtime;
1433 int enabled = 0, has_enablers_without_bytecode = 0;
1434
1435 switch (event->instrumentation) {
1436 case LTTNG_KERNEL_TRACEPOINT:
1437 case LTTNG_KERNEL_SYSCALL:
1438 /* Enable events */
1439 list_for_each_entry(enabler_ref,
1440 &event->enablers_ref_head, node) {
1441 if (enabler_ref->ref->enabled) {
1442 enabled = 1;
1443 break;
1444 }
1445 }
1446 break;
1447 default:
1448 /* Not handled with lazy sync. */
1449 continue;
1450 }
1451 /*
1452 * Enabled state is based on union of enablers, with
1453 * intesection of session and channel transient enable
1454 * states.
1455 */
1456 enabled = enabled && session->tstate && event->chan->tstate;
1457
1458 ACCESS_ONCE(event->enabled) = enabled;
1459 /*
1460 * Sync tracepoint registration with event enabled
1461 * state.
1462 */
1463 if (enabled) {
1464 register_event(event);
1465 } else {
1466 _lttng_event_unregister(event);
1467 }
1468
1469 /* Check if has enablers without bytecode enabled */
1470 list_for_each_entry(enabler_ref,
1471 &event->enablers_ref_head, node) {
1472 if (enabler_ref->ref->enabled
1473 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1474 has_enablers_without_bytecode = 1;
1475 break;
1476 }
1477 }
1478 event->has_enablers_without_bytecode =
1479 has_enablers_without_bytecode;
1480
1481 /* Enable filters */
1482 list_for_each_entry(runtime,
1483 &event->bytecode_runtime_head, node)
1484 lttng_filter_sync_state(runtime);
1485 }
1486 }
1487
1488 /*
1489 * Apply enablers to session events, adding events to session if need
1490 * be. It is required after each modification applied to an active
1491 * session, and right before session "start".
1492 * "lazy" sync means we only sync if required.
1493 * Should be called with sessions mutex held.
1494 */
1495 static
1496 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1497 {
1498 /* We can skip if session is not active */
1499 if (!session->active)
1500 return;
1501 lttng_session_sync_enablers(session);
1502 }
1503
1504 /*
1505 * Serialize at most one packet worth of metadata into a metadata
1506 * channel.
1507 * We grab the metadata cache mutex to get exclusive access to our metadata
1508 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1509 * allows us to do racy operations such as looking for remaining space left in
1510 * packet and write, since mutual exclusion protects us from concurrent writes.
1511 * Mutual exclusion on the metadata cache allow us to read the cache content
1512 * without racing against reallocation of the cache by updates.
1513 * Returns the number of bytes written in the channel, 0 if no data
1514 * was written and a negative value on error.
1515 */
1516 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1517 struct channel *chan)
1518 {
1519 struct lib_ring_buffer_ctx ctx;
1520 int ret = 0;
1521 size_t len, reserve_len;
1522
1523 /*
1524 * Ensure we support mutiple get_next / put sequences followed by
1525 * put_next. The metadata cache lock protects reading the metadata
1526 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1527 * "flush" operations on the buffer invoked by different processes.
1528 * Moreover, since the metadata cache memory can be reallocated, we
1529 * need to have exclusive access against updates even though we only
1530 * read it.
1531 */
1532 mutex_lock(&stream->metadata_cache->lock);
1533 WARN_ON(stream->metadata_in < stream->metadata_out);
1534 if (stream->metadata_in != stream->metadata_out)
1535 goto end;
1536
1537 len = stream->metadata_cache->metadata_written -
1538 stream->metadata_in;
1539 if (!len)
1540 goto end;
1541 reserve_len = min_t(size_t,
1542 stream->transport->ops.packet_avail_size(chan),
1543 len);
1544 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1545 sizeof(char), -1);
1546 /*
1547 * If reservation failed, return an error to the caller.
1548 */
1549 ret = stream->transport->ops.event_reserve(&ctx, 0);
1550 if (ret != 0) {
1551 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1552 goto end;
1553 }
1554 stream->transport->ops.event_write(&ctx,
1555 stream->metadata_cache->data + stream->metadata_in,
1556 reserve_len);
1557 stream->transport->ops.event_commit(&ctx);
1558 stream->metadata_in += reserve_len;
1559 ret = reserve_len;
1560
1561 end:
1562 mutex_unlock(&stream->metadata_cache->lock);
1563 return ret;
1564 }
1565
1566 /*
1567 * Write the metadata to the metadata cache.
1568 * Must be called with sessions_mutex held.
1569 * The metadata cache lock protects us from concurrent read access from
1570 * thread outputting metadata content to ring buffer.
1571 */
1572 int lttng_metadata_printf(struct lttng_session *session,
1573 const char *fmt, ...)
1574 {
1575 char *str;
1576 size_t len;
1577 va_list ap;
1578 struct lttng_metadata_stream *stream;
1579
1580 WARN_ON_ONCE(!ACCESS_ONCE(session->active));
1581
1582 va_start(ap, fmt);
1583 str = kvasprintf(GFP_KERNEL, fmt, ap);
1584 va_end(ap);
1585 if (!str)
1586 return -ENOMEM;
1587
1588 len = strlen(str);
1589 mutex_lock(&session->metadata_cache->lock);
1590 if (session->metadata_cache->metadata_written + len >
1591 session->metadata_cache->cache_alloc) {
1592 char *tmp_cache_realloc;
1593 unsigned int tmp_cache_alloc_size;
1594
1595 tmp_cache_alloc_size = max_t(unsigned int,
1596 session->metadata_cache->cache_alloc + len,
1597 session->metadata_cache->cache_alloc << 1);
1598 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1599 if (!tmp_cache_realloc)
1600 goto err;
1601 if (session->metadata_cache->data) {
1602 memcpy(tmp_cache_realloc,
1603 session->metadata_cache->data,
1604 session->metadata_cache->cache_alloc);
1605 vfree(session->metadata_cache->data);
1606 }
1607
1608 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1609 session->metadata_cache->data = tmp_cache_realloc;
1610 }
1611 memcpy(session->metadata_cache->data +
1612 session->metadata_cache->metadata_written,
1613 str, len);
1614 session->metadata_cache->metadata_written += len;
1615 mutex_unlock(&session->metadata_cache->lock);
1616 kfree(str);
1617
1618 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1619 wake_up_interruptible(&stream->read_wait);
1620
1621 return 0;
1622
1623 err:
1624 mutex_unlock(&session->metadata_cache->lock);
1625 kfree(str);
1626 return -ENOMEM;
1627 }
1628
1629 /*
1630 * Must be called with sessions_mutex held.
1631 */
1632 static
1633 int _lttng_field_statedump(struct lttng_session *session,
1634 const struct lttng_event_field *field)
1635 {
1636 int ret = 0;
1637
1638 switch (field->type.atype) {
1639 case atype_integer:
1640 ret = lttng_metadata_printf(session,
1641 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
1642 field->type.u.basic.integer.size,
1643 field->type.u.basic.integer.alignment,
1644 field->type.u.basic.integer.signedness,
1645 (field->type.u.basic.integer.encoding == lttng_encode_none)
1646 ? "none"
1647 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
1648 ? "UTF8"
1649 : "ASCII",
1650 field->type.u.basic.integer.base,
1651 #if __BYTE_ORDER == __BIG_ENDIAN
1652 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1653 #else
1654 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1655 #endif
1656 field->name);
1657 break;
1658 case atype_enum:
1659 ret = lttng_metadata_printf(session,
1660 " %s _%s;\n",
1661 field->type.u.basic.enumeration.name,
1662 field->name);
1663 break;
1664 case atype_array:
1665 {
1666 const struct lttng_basic_type *elem_type;
1667
1668 elem_type = &field->type.u.array.elem_type;
1669 ret = lttng_metadata_printf(session,
1670 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
1671 elem_type->u.basic.integer.size,
1672 elem_type->u.basic.integer.alignment,
1673 elem_type->u.basic.integer.signedness,
1674 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1675 ? "none"
1676 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1677 ? "UTF8"
1678 : "ASCII",
1679 elem_type->u.basic.integer.base,
1680 #if __BYTE_ORDER == __BIG_ENDIAN
1681 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1682 #else
1683 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1684 #endif
1685 field->name, field->type.u.array.length);
1686 break;
1687 }
1688 case atype_sequence:
1689 {
1690 const struct lttng_basic_type *elem_type;
1691 const struct lttng_basic_type *length_type;
1692
1693 elem_type = &field->type.u.sequence.elem_type;
1694 length_type = &field->type.u.sequence.length_type;
1695 ret = lttng_metadata_printf(session,
1696 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
1697 length_type->u.basic.integer.size,
1698 (unsigned int) length_type->u.basic.integer.alignment,
1699 length_type->u.basic.integer.signedness,
1700 (length_type->u.basic.integer.encoding == lttng_encode_none)
1701 ? "none"
1702 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
1703 ? "UTF8"
1704 : "ASCII"),
1705 length_type->u.basic.integer.base,
1706 #if __BYTE_ORDER == __BIG_ENDIAN
1707 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1708 #else
1709 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1710 #endif
1711 field->name);
1712 if (ret)
1713 return ret;
1714
1715 ret = lttng_metadata_printf(session,
1716 " integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
1717 elem_type->u.basic.integer.size,
1718 (unsigned int) elem_type->u.basic.integer.alignment,
1719 elem_type->u.basic.integer.signedness,
1720 (elem_type->u.basic.integer.encoding == lttng_encode_none)
1721 ? "none"
1722 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
1723 ? "UTF8"
1724 : "ASCII"),
1725 elem_type->u.basic.integer.base,
1726 #if __BYTE_ORDER == __BIG_ENDIAN
1727 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
1728 #else
1729 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
1730 #endif
1731 field->name,
1732 field->name);
1733 break;
1734 }
1735
1736 case atype_string:
1737 /* Default encoding is UTF8 */
1738 ret = lttng_metadata_printf(session,
1739 " string%s _%s;\n",
1740 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
1741 " { encoding = ASCII; }" : "",
1742 field->name);
1743 break;
1744 default:
1745 WARN_ON_ONCE(1);
1746 return -EINVAL;
1747 }
1748 return ret;
1749 }
1750
1751 static
1752 int _lttng_context_metadata_statedump(struct lttng_session *session,
1753 struct lttng_ctx *ctx)
1754 {
1755 int ret = 0;
1756 int i;
1757
1758 if (!ctx)
1759 return 0;
1760 for (i = 0; i < ctx->nr_fields; i++) {
1761 const struct lttng_ctx_field *field = &ctx->fields[i];
1762
1763 ret = _lttng_field_statedump(session, &field->event_field);
1764 if (ret)
1765 return ret;
1766 }
1767 return ret;
1768 }
1769
1770 static
1771 int _lttng_fields_metadata_statedump(struct lttng_session *session,
1772 struct lttng_event *event)
1773 {
1774 const struct lttng_event_desc *desc = event->desc;
1775 int ret = 0;
1776 int i;
1777
1778 for (i = 0; i < desc->nr_fields; i++) {
1779 const struct lttng_event_field *field = &desc->fields[i];
1780
1781 ret = _lttng_field_statedump(session, field);
1782 if (ret)
1783 return ret;
1784 }
1785 return ret;
1786 }
1787
1788 /*
1789 * Must be called with sessions_mutex held.
1790 */
1791 static
1792 int _lttng_event_metadata_statedump(struct lttng_session *session,
1793 struct lttng_channel *chan,
1794 struct lttng_event *event)
1795 {
1796 int ret = 0;
1797
1798 if (event->metadata_dumped || !ACCESS_ONCE(session->active))
1799 return 0;
1800 if (chan->channel_type == METADATA_CHANNEL)
1801 return 0;
1802
1803 ret = lttng_metadata_printf(session,
1804 "event {\n"
1805 " name = \"%s\";\n"
1806 " id = %u;\n"
1807 " stream_id = %u;\n",
1808 event->desc->name,
1809 event->id,
1810 event->chan->id);
1811 if (ret)
1812 goto end;
1813
1814 if (event->ctx) {
1815 ret = lttng_metadata_printf(session,
1816 " context := struct {\n");
1817 if (ret)
1818 goto end;
1819 }
1820 ret = _lttng_context_metadata_statedump(session, event->ctx);
1821 if (ret)
1822 goto end;
1823 if (event->ctx) {
1824 ret = lttng_metadata_printf(session,
1825 " };\n");
1826 if (ret)
1827 goto end;
1828 }
1829
1830 ret = lttng_metadata_printf(session,
1831 " fields := struct {\n"
1832 );
1833 if (ret)
1834 goto end;
1835
1836 ret = _lttng_fields_metadata_statedump(session, event);
1837 if (ret)
1838 goto end;
1839
1840 /*
1841 * LTTng space reservation can only reserve multiples of the
1842 * byte size.
1843 */
1844 ret = lttng_metadata_printf(session,
1845 " };\n"
1846 "};\n\n");
1847 if (ret)
1848 goto end;
1849
1850 event->metadata_dumped = 1;
1851 end:
1852 return ret;
1853
1854 }
1855
1856 /*
1857 * Must be called with sessions_mutex held.
1858 */
1859 static
1860 int _lttng_channel_metadata_statedump(struct lttng_session *session,
1861 struct lttng_channel *chan)
1862 {
1863 int ret = 0;
1864
1865 if (chan->metadata_dumped || !ACCESS_ONCE(session->active))
1866 return 0;
1867
1868 if (chan->channel_type == METADATA_CHANNEL)
1869 return 0;
1870
1871 WARN_ON_ONCE(!chan->header_type);
1872 ret = lttng_metadata_printf(session,
1873 "stream {\n"
1874 " id = %u;\n"
1875 " event.header := %s;\n"
1876 " packet.context := struct packet_context;\n",
1877 chan->id,
1878 chan->header_type == 1 ? "struct event_header_compact" :
1879 "struct event_header_large");
1880 if (ret)
1881 goto end;
1882
1883 if (chan->ctx) {
1884 ret = lttng_metadata_printf(session,
1885 " event.context := struct {\n");
1886 if (ret)
1887 goto end;
1888 }
1889 ret = _lttng_context_metadata_statedump(session, chan->ctx);
1890 if (ret)
1891 goto end;
1892 if (chan->ctx) {
1893 ret = lttng_metadata_printf(session,
1894 " };\n");
1895 if (ret)
1896 goto end;
1897 }
1898
1899 ret = lttng_metadata_printf(session,
1900 "};\n\n");
1901
1902 chan->metadata_dumped = 1;
1903 end:
1904 return ret;
1905 }
1906
1907 /*
1908 * Must be called with sessions_mutex held.
1909 */
1910 static
1911 int _lttng_stream_packet_context_declare(struct lttng_session *session)
1912 {
1913 return lttng_metadata_printf(session,
1914 "struct packet_context {\n"
1915 " uint64_clock_monotonic_t timestamp_begin;\n"
1916 " uint64_clock_monotonic_t timestamp_end;\n"
1917 " uint64_t content_size;\n"
1918 " uint64_t packet_size;\n"
1919 " unsigned long events_discarded;\n"
1920 " uint32_t cpu_id;\n"
1921 "};\n\n"
1922 );
1923 }
1924
1925 /*
1926 * Compact header:
1927 * id: range: 0 - 30.
1928 * id 31 is reserved to indicate an extended header.
1929 *
1930 * Large header:
1931 * id: range: 0 - 65534.
1932 * id 65535 is reserved to indicate an extended header.
1933 *
1934 * Must be called with sessions_mutex held.
1935 */
1936 static
1937 int _lttng_event_header_declare(struct lttng_session *session)
1938 {
1939 return lttng_metadata_printf(session,
1940 "struct event_header_compact {\n"
1941 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
1942 " variant <id> {\n"
1943 " struct {\n"
1944 " uint27_clock_monotonic_t timestamp;\n"
1945 " } compact;\n"
1946 " struct {\n"
1947 " uint32_t id;\n"
1948 " uint64_clock_monotonic_t timestamp;\n"
1949 " } extended;\n"
1950 " } v;\n"
1951 "} align(%u);\n"
1952 "\n"
1953 "struct event_header_large {\n"
1954 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
1955 " variant <id> {\n"
1956 " struct {\n"
1957 " uint32_clock_monotonic_t timestamp;\n"
1958 " } compact;\n"
1959 " struct {\n"
1960 " uint32_t id;\n"
1961 " uint64_clock_monotonic_t timestamp;\n"
1962 " } extended;\n"
1963 " } v;\n"
1964 "} align(%u);\n\n",
1965 lttng_alignof(uint32_t) * CHAR_BIT,
1966 lttng_alignof(uint16_t) * CHAR_BIT
1967 );
1968 }
1969
1970 /*
1971 * Approximation of NTP time of day to clock monotonic correlation,
1972 * taken at start of trace.
1973 * Yes, this is only an approximation. Yes, we can (and will) do better
1974 * in future versions.
1975 * Return 0 if offset is negative. It may happen if the system sets
1976 * the REALTIME clock to 0 after boot.
1977 */
1978 static
1979 uint64_t measure_clock_offset(void)
1980 {
1981 uint64_t monotonic_avg, monotonic[2], realtime;
1982 int64_t offset;
1983 struct timespec rts = { 0, 0 };
1984 unsigned long flags;
1985
1986 /* Disable interrupts to increase correlation precision. */
1987 local_irq_save(flags);
1988 monotonic[0] = trace_clock_read64();
1989 getnstimeofday(&rts);
1990 monotonic[1] = trace_clock_read64();
1991 local_irq_restore(flags);
1992
1993 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
1994 realtime = (uint64_t) rts.tv_sec * NSEC_PER_SEC;
1995 realtime += rts.tv_nsec;
1996 offset = (int64_t) realtime - monotonic_avg;
1997 if (offset < 0)
1998 return 0;
1999 return offset;
2000 }
2001
2002 /*
2003 * Output metadata into this session's metadata buffers.
2004 * Must be called with sessions_mutex held.
2005 */
2006 static
2007 int _lttng_session_metadata_statedump(struct lttng_session *session)
2008 {
2009 unsigned char *uuid_c = session->uuid.b;
2010 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2011 struct lttng_channel *chan;
2012 struct lttng_event *event;
2013 int ret = 0;
2014
2015 if (!ACCESS_ONCE(session->active))
2016 return 0;
2017 if (session->metadata_dumped)
2018 goto skip_session;
2019
2020 snprintf(uuid_s, sizeof(uuid_s),
2021 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2022 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2023 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2024 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2025 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2026
2027 ret = lttng_metadata_printf(session,
2028 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2029 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2030 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2031 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2032 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2033 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2034 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2035 "\n"
2036 "trace {\n"
2037 " major = %u;\n"
2038 " minor = %u;\n"
2039 " uuid = \"%s\";\n"
2040 " byte_order = %s;\n"
2041 " packet.header := struct {\n"
2042 " uint32_t magic;\n"
2043 " uint8_t uuid[16];\n"
2044 " uint32_t stream_id;\n"
2045 " };\n"
2046 "};\n\n",
2047 lttng_alignof(uint8_t) * CHAR_BIT,
2048 lttng_alignof(uint16_t) * CHAR_BIT,
2049 lttng_alignof(uint32_t) * CHAR_BIT,
2050 lttng_alignof(uint64_t) * CHAR_BIT,
2051 sizeof(unsigned long) * CHAR_BIT,
2052 lttng_alignof(unsigned long) * CHAR_BIT,
2053 CTF_SPEC_MAJOR,
2054 CTF_SPEC_MINOR,
2055 uuid_s,
2056 #if __BYTE_ORDER == __BIG_ENDIAN
2057 "be"
2058 #else
2059 "le"
2060 #endif
2061 );
2062 if (ret)
2063 goto end;
2064
2065 ret = lttng_metadata_printf(session,
2066 "env {\n"
2067 " hostname = \"%s\";\n"
2068 " domain = \"kernel\";\n"
2069 " sysname = \"%s\";\n"
2070 " kernel_release = \"%s\";\n"
2071 " kernel_version = \"%s\";\n"
2072 " tracer_name = \"lttng-modules\";\n"
2073 " tracer_major = %d;\n"
2074 " tracer_minor = %d;\n"
2075 " tracer_patchlevel = %d;\n"
2076 "};\n\n",
2077 current->nsproxy->uts_ns->name.nodename,
2078 utsname()->sysname,
2079 utsname()->release,
2080 utsname()->version,
2081 LTTNG_MODULES_MAJOR_VERSION,
2082 LTTNG_MODULES_MINOR_VERSION,
2083 LTTNG_MODULES_PATCHLEVEL_VERSION
2084 );
2085 if (ret)
2086 goto end;
2087
2088 ret = lttng_metadata_printf(session,
2089 "clock {\n"
2090 " name = %s;\n",
2091 "monotonic"
2092 );
2093 if (ret)
2094 goto end;
2095
2096 if (!trace_clock_uuid(clock_uuid_s)) {
2097 ret = lttng_metadata_printf(session,
2098 " uuid = \"%s\";\n",
2099 clock_uuid_s
2100 );
2101 if (ret)
2102 goto end;
2103 }
2104
2105 ret = lttng_metadata_printf(session,
2106 " description = \"Monotonic Clock\";\n"
2107 " freq = %llu; /* Frequency, in Hz */\n"
2108 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2109 " offset = %llu;\n"
2110 "};\n\n",
2111 (unsigned long long) trace_clock_freq(),
2112 (unsigned long long) measure_clock_offset()
2113 );
2114 if (ret)
2115 goto end;
2116
2117 ret = lttng_metadata_printf(session,
2118 "typealias integer {\n"
2119 " size = 27; align = 1; signed = false;\n"
2120 " map = clock.monotonic.value;\n"
2121 "} := uint27_clock_monotonic_t;\n"
2122 "\n"
2123 "typealias integer {\n"
2124 " size = 32; align = %u; signed = false;\n"
2125 " map = clock.monotonic.value;\n"
2126 "} := uint32_clock_monotonic_t;\n"
2127 "\n"
2128 "typealias integer {\n"
2129 " size = 64; align = %u; signed = false;\n"
2130 " map = clock.monotonic.value;\n"
2131 "} := uint64_clock_monotonic_t;\n\n",
2132 lttng_alignof(uint32_t) * CHAR_BIT,
2133 lttng_alignof(uint64_t) * CHAR_BIT
2134 );
2135 if (ret)
2136 goto end;
2137
2138 ret = _lttng_stream_packet_context_declare(session);
2139 if (ret)
2140 goto end;
2141
2142 ret = _lttng_event_header_declare(session);
2143 if (ret)
2144 goto end;
2145
2146 skip_session:
2147 list_for_each_entry(chan, &session->chan, list) {
2148 ret = _lttng_channel_metadata_statedump(session, chan);
2149 if (ret)
2150 goto end;
2151 }
2152
2153 list_for_each_entry(event, &session->events, list) {
2154 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2155 if (ret)
2156 goto end;
2157 }
2158 session->metadata_dumped = 1;
2159 end:
2160 return ret;
2161 }
2162
2163 /**
2164 * lttng_transport_register - LTT transport registration
2165 * @transport: transport structure
2166 *
2167 * Registers a transport which can be used as output to extract the data out of
2168 * LTTng. The module calling this registration function must ensure that no
2169 * trap-inducing code will be executed by the transport functions. E.g.
2170 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
2171 * is made visible to the transport function. This registration acts as a
2172 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
2173 * after its registration must it synchronize the TLBs.
2174 */
2175 void lttng_transport_register(struct lttng_transport *transport)
2176 {
2177 /*
2178 * Make sure no page fault can be triggered by the module about to be
2179 * registered. We deal with this here so we don't have to call
2180 * vmalloc_sync_all() in each module's init.
2181 */
2182 wrapper_vmalloc_sync_all();
2183
2184 mutex_lock(&sessions_mutex);
2185 list_add_tail(&transport->node, &lttng_transport_list);
2186 mutex_unlock(&sessions_mutex);
2187 }
2188 EXPORT_SYMBOL_GPL(lttng_transport_register);
2189
2190 /**
2191 * lttng_transport_unregister - LTT transport unregistration
2192 * @transport: transport structure
2193 */
2194 void lttng_transport_unregister(struct lttng_transport *transport)
2195 {
2196 mutex_lock(&sessions_mutex);
2197 list_del(&transport->node);
2198 mutex_unlock(&sessions_mutex);
2199 }
2200 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2201
2202 static int __init lttng_events_init(void)
2203 {
2204 int ret;
2205
2206 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
2207 if (ret)
2208 return ret;
2209 ret = wrapper_get_pfnblock_flags_mask_init();
2210 if (ret)
2211 return ret;
2212 ret = wrapper_get_pageblock_flags_mask_init();
2213 if (ret)
2214 return ret;
2215 ret = lttng_context_init();
2216 if (ret)
2217 return ret;
2218 ret = lttng_tracepoint_init();
2219 if (ret)
2220 goto error_tp;
2221 event_cache = KMEM_CACHE(lttng_event, 0);
2222 if (!event_cache) {
2223 ret = -ENOMEM;
2224 goto error_kmem;
2225 }
2226 ret = lttng_abi_init();
2227 if (ret)
2228 goto error_abi;
2229 ret = lttng_logger_init();
2230 if (ret)
2231 goto error_logger;
2232 return 0;
2233
2234 error_logger:
2235 lttng_abi_exit();
2236 error_abi:
2237 kmem_cache_destroy(event_cache);
2238 error_kmem:
2239 lttng_tracepoint_exit();
2240 error_tp:
2241 lttng_context_exit();
2242 return ret;
2243 }
2244
2245 module_init(lttng_events_init);
2246
2247 static void __exit lttng_events_exit(void)
2248 {
2249 struct lttng_session *session, *tmpsession;
2250
2251 lttng_logger_exit();
2252 lttng_abi_exit();
2253 list_for_each_entry_safe(session, tmpsession, &sessions, list)
2254 lttng_session_destroy(session);
2255 kmem_cache_destroy(event_cache);
2256 lttng_tracepoint_exit();
2257 lttng_context_exit();
2258 }
2259
2260 module_exit(lttng_events_exit);
2261
2262 MODULE_LICENSE("GPL and additional rights");
2263 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
2264 MODULE_DESCRIPTION("LTTng Events");
2265 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
2266 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
2267 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
2268 LTTNG_MODULES_EXTRAVERSION);
This page took 0.073724 seconds and 5 git commands to generate.