Fix: rcu: Fix data-race due to atomic_t copy-by-value (5.5.6, 5.4.22)
[lttng-modules.git] / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
25 * overrides a function with a define.
26 */
27 #include "wrapper/page_alloc.h"
28
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/jiffies.h>
34 #include <linux/utsname.h>
35 #include <linux/err.h>
36 #include <linux/seq_file.h>
37 #include <linux/file.h>
38 #include <linux/anon_inodes.h>
39 #include <wrapper/file.h>
40 #include <linux/jhash.h>
41 #include <linux/uaccess.h>
42 #include <linux/vmalloc.h>
43
44 #include <wrapper/uuid.h>
45 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
46 #include <wrapper/random.h>
47 #include <wrapper/tracepoint.h>
48 #include <wrapper/list.h>
49 #include <wrapper/types.h>
50 #include <lttng-kernel-version.h>
51 #include <lttng-events.h>
52 #include <lttng-tracer.h>
53 #include <lttng-abi-old.h>
54 #include <lttng-endian.h>
55 #include <lttng-string-utils.h>
56 #include <wrapper/vzalloc.h>
57 #include <wrapper/ringbuffer/backend.h>
58 #include <wrapper/ringbuffer/frontend.h>
59 #include <wrapper/time.h>
60
61 #define METADATA_CACHE_DEFAULT_SIZE 4096
62
63 static LIST_HEAD(sessions);
64 static LIST_HEAD(lttng_transport_list);
65 /*
66 * Protect the sessions and metadata caches.
67 */
68 static DEFINE_MUTEX(sessions_mutex);
69 static struct kmem_cache *event_cache;
70
71 static void lttng_session_lazy_sync_enablers(struct lttng_session *session);
72 static void lttng_session_sync_enablers(struct lttng_session *session);
73 static void lttng_enabler_destroy(struct lttng_enabler *enabler);
74
75 static void _lttng_event_destroy(struct lttng_event *event);
76 static void _lttng_channel_destroy(struct lttng_channel *chan);
77 static int _lttng_event_unregister(struct lttng_event *event);
78 static
79 int _lttng_event_metadata_statedump(struct lttng_session *session,
80 struct lttng_channel *chan,
81 struct lttng_event *event);
82 static
83 int _lttng_session_metadata_statedump(struct lttng_session *session);
84 static
85 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
86 static
87 int _lttng_field_statedump(struct lttng_session *session,
88 const struct lttng_event_field *field,
89 size_t nesting);
90
91 void synchronize_trace(void)
92 {
93 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
94 synchronize_rcu();
95 #else
96 synchronize_sched();
97 #endif
98
99 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
100 #ifdef CONFIG_PREEMPT_RT_FULL
101 synchronize_rcu();
102 #endif
103 #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
104 #ifdef CONFIG_PREEMPT_RT
105 synchronize_rcu();
106 #endif
107 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)) */
108 }
109
110 void lttng_lock_sessions(void)
111 {
112 mutex_lock(&sessions_mutex);
113 }
114
115 void lttng_unlock_sessions(void)
116 {
117 mutex_unlock(&sessions_mutex);
118 }
119
120 /*
121 * Called with sessions lock held.
122 */
123 int lttng_session_active(void)
124 {
125 struct lttng_session *iter;
126
127 list_for_each_entry(iter, &sessions, list) {
128 if (iter->active)
129 return 1;
130 }
131 return 0;
132 }
133
134 struct lttng_session *lttng_session_create(void)
135 {
136 struct lttng_session *session;
137 struct lttng_metadata_cache *metadata_cache;
138 int i;
139
140 mutex_lock(&sessions_mutex);
141 session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
142 if (!session)
143 goto err;
144 INIT_LIST_HEAD(&session->chan);
145 INIT_LIST_HEAD(&session->events);
146 uuid_le_gen(&session->uuid);
147
148 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
149 GFP_KERNEL);
150 if (!metadata_cache)
151 goto err_free_session;
152 metadata_cache->data = lttng_vzalloc(METADATA_CACHE_DEFAULT_SIZE);
153 if (!metadata_cache->data)
154 goto err_free_cache;
155 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
156 kref_init(&metadata_cache->refcount);
157 mutex_init(&metadata_cache->lock);
158 session->metadata_cache = metadata_cache;
159 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
160 memcpy(&metadata_cache->uuid, &session->uuid,
161 sizeof(metadata_cache->uuid));
162 INIT_LIST_HEAD(&session->enablers_head);
163 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
164 INIT_HLIST_HEAD(&session->events_ht.table[i]);
165 list_add(&session->list, &sessions);
166 mutex_unlock(&sessions_mutex);
167 return session;
168
169 err_free_cache:
170 kfree(metadata_cache);
171 err_free_session:
172 lttng_kvfree(session);
173 err:
174 mutex_unlock(&sessions_mutex);
175 return NULL;
176 }
177
178 void metadata_cache_destroy(struct kref *kref)
179 {
180 struct lttng_metadata_cache *cache =
181 container_of(kref, struct lttng_metadata_cache, refcount);
182 vfree(cache->data);
183 kfree(cache);
184 }
185
186 void lttng_session_destroy(struct lttng_session *session)
187 {
188 struct lttng_channel *chan, *tmpchan;
189 struct lttng_event *event, *tmpevent;
190 struct lttng_metadata_stream *metadata_stream;
191 struct lttng_enabler *enabler, *tmpenabler;
192 int ret;
193
194 mutex_lock(&sessions_mutex);
195 WRITE_ONCE(session->active, 0);
196 list_for_each_entry(chan, &session->chan, list) {
197 ret = lttng_syscalls_unregister(chan);
198 WARN_ON(ret);
199 }
200 list_for_each_entry(event, &session->events, list) {
201 ret = _lttng_event_unregister(event);
202 WARN_ON(ret);
203 }
204 synchronize_trace(); /* Wait for in-flight events to complete */
205 list_for_each_entry_safe(enabler, tmpenabler,
206 &session->enablers_head, node)
207 lttng_enabler_destroy(enabler);
208 list_for_each_entry_safe(event, tmpevent, &session->events, list)
209 _lttng_event_destroy(event);
210 list_for_each_entry_safe(chan, tmpchan, &session->chan, list) {
211 BUG_ON(chan->channel_type == METADATA_CHANNEL);
212 _lttng_channel_destroy(chan);
213 }
214 list_for_each_entry(metadata_stream, &session->metadata_cache->metadata_stream, list)
215 _lttng_metadata_channel_hangup(metadata_stream);
216 if (session->pid_tracker)
217 lttng_pid_tracker_destroy(session->pid_tracker);
218 kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
219 list_del(&session->list);
220 mutex_unlock(&sessions_mutex);
221 lttng_kvfree(session);
222 }
223
224 int lttng_session_statedump(struct lttng_session *session)
225 {
226 int ret;
227
228 mutex_lock(&sessions_mutex);
229 ret = lttng_statedump_start(session);
230 mutex_unlock(&sessions_mutex);
231 return ret;
232 }
233
234 int lttng_session_enable(struct lttng_session *session)
235 {
236 int ret = 0;
237 struct lttng_channel *chan;
238
239 mutex_lock(&sessions_mutex);
240 if (session->active) {
241 ret = -EBUSY;
242 goto end;
243 }
244
245 /* Set transient enabler state to "enabled" */
246 session->tstate = 1;
247
248 /*
249 * Snapshot the number of events per channel to know the type of header
250 * we need to use.
251 */
252 list_for_each_entry(chan, &session->chan, list) {
253 if (chan->header_type)
254 continue; /* don't change it if session stop/restart */
255 if (chan->free_event_id < 31)
256 chan->header_type = 1; /* compact */
257 else
258 chan->header_type = 2; /* large */
259 }
260
261 /* We need to sync enablers with session before activation. */
262 lttng_session_sync_enablers(session);
263
264 /* Clear each stream's quiescent state. */
265 list_for_each_entry(chan, &session->chan, list) {
266 if (chan->channel_type != METADATA_CHANNEL)
267 lib_ring_buffer_clear_quiescent_channel(chan->chan);
268 }
269
270 WRITE_ONCE(session->active, 1);
271 WRITE_ONCE(session->been_active, 1);
272 ret = _lttng_session_metadata_statedump(session);
273 if (ret) {
274 WRITE_ONCE(session->active, 0);
275 goto end;
276 }
277 ret = lttng_statedump_start(session);
278 if (ret)
279 WRITE_ONCE(session->active, 0);
280 end:
281 mutex_unlock(&sessions_mutex);
282 return ret;
283 }
284
285 int lttng_session_disable(struct lttng_session *session)
286 {
287 int ret = 0;
288 struct lttng_channel *chan;
289
290 mutex_lock(&sessions_mutex);
291 if (!session->active) {
292 ret = -EBUSY;
293 goto end;
294 }
295 WRITE_ONCE(session->active, 0);
296
297 /* Set transient enabler state to "disabled" */
298 session->tstate = 0;
299 lttng_session_sync_enablers(session);
300
301 /* Set each stream's quiescent state. */
302 list_for_each_entry(chan, &session->chan, list) {
303 if (chan->channel_type != METADATA_CHANNEL)
304 lib_ring_buffer_set_quiescent_channel(chan->chan);
305 }
306 end:
307 mutex_unlock(&sessions_mutex);
308 return ret;
309 }
310
311 int lttng_session_metadata_regenerate(struct lttng_session *session)
312 {
313 int ret = 0;
314 struct lttng_channel *chan;
315 struct lttng_event *event;
316 struct lttng_metadata_cache *cache = session->metadata_cache;
317 struct lttng_metadata_stream *stream;
318
319 mutex_lock(&sessions_mutex);
320 if (!session->active) {
321 ret = -EBUSY;
322 goto end;
323 }
324
325 mutex_lock(&cache->lock);
326 memset(cache->data, 0, cache->cache_alloc);
327 cache->metadata_written = 0;
328 cache->version++;
329 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list) {
330 stream->metadata_out = 0;
331 stream->metadata_in = 0;
332 }
333 mutex_unlock(&cache->lock);
334
335 session->metadata_dumped = 0;
336 list_for_each_entry(chan, &session->chan, list) {
337 chan->metadata_dumped = 0;
338 }
339
340 list_for_each_entry(event, &session->events, list) {
341 event->metadata_dumped = 0;
342 }
343
344 ret = _lttng_session_metadata_statedump(session);
345
346 end:
347 mutex_unlock(&sessions_mutex);
348 return ret;
349 }
350
351
352
353 int lttng_channel_enable(struct lttng_channel *channel)
354 {
355 int ret = 0;
356
357 mutex_lock(&sessions_mutex);
358 if (channel->channel_type == METADATA_CHANNEL) {
359 ret = -EPERM;
360 goto end;
361 }
362 if (channel->enabled) {
363 ret = -EEXIST;
364 goto end;
365 }
366 /* Set transient enabler state to "enabled" */
367 channel->tstate = 1;
368 lttng_session_sync_enablers(channel->session);
369 /* Set atomically the state to "enabled" */
370 WRITE_ONCE(channel->enabled, 1);
371 end:
372 mutex_unlock(&sessions_mutex);
373 return ret;
374 }
375
376 int lttng_channel_disable(struct lttng_channel *channel)
377 {
378 int ret = 0;
379
380 mutex_lock(&sessions_mutex);
381 if (channel->channel_type == METADATA_CHANNEL) {
382 ret = -EPERM;
383 goto end;
384 }
385 if (!channel->enabled) {
386 ret = -EEXIST;
387 goto end;
388 }
389 /* Set atomically the state to "disabled" */
390 WRITE_ONCE(channel->enabled, 0);
391 /* Set transient enabler state to "enabled" */
392 channel->tstate = 0;
393 lttng_session_sync_enablers(channel->session);
394 end:
395 mutex_unlock(&sessions_mutex);
396 return ret;
397 }
398
399 int lttng_event_enable(struct lttng_event *event)
400 {
401 int ret = 0;
402
403 mutex_lock(&sessions_mutex);
404 if (event->chan->channel_type == METADATA_CHANNEL) {
405 ret = -EPERM;
406 goto end;
407 }
408 if (event->enabled) {
409 ret = -EEXIST;
410 goto end;
411 }
412 switch (event->instrumentation) {
413 case LTTNG_KERNEL_TRACEPOINT:
414 case LTTNG_KERNEL_SYSCALL:
415 ret = -EINVAL;
416 break;
417 case LTTNG_KERNEL_KPROBE:
418 case LTTNG_KERNEL_FUNCTION:
419 case LTTNG_KERNEL_NOOP:
420 WRITE_ONCE(event->enabled, 1);
421 break;
422 case LTTNG_KERNEL_KRETPROBE:
423 ret = lttng_kretprobes_event_enable_state(event, 1);
424 break;
425 default:
426 WARN_ON_ONCE(1);
427 ret = -EINVAL;
428 }
429 end:
430 mutex_unlock(&sessions_mutex);
431 return ret;
432 }
433
434 int lttng_event_disable(struct lttng_event *event)
435 {
436 int ret = 0;
437
438 mutex_lock(&sessions_mutex);
439 if (event->chan->channel_type == METADATA_CHANNEL) {
440 ret = -EPERM;
441 goto end;
442 }
443 if (!event->enabled) {
444 ret = -EEXIST;
445 goto end;
446 }
447 switch (event->instrumentation) {
448 case LTTNG_KERNEL_TRACEPOINT:
449 case LTTNG_KERNEL_SYSCALL:
450 ret = -EINVAL;
451 break;
452 case LTTNG_KERNEL_KPROBE:
453 case LTTNG_KERNEL_FUNCTION:
454 case LTTNG_KERNEL_NOOP:
455 WRITE_ONCE(event->enabled, 0);
456 break;
457 case LTTNG_KERNEL_KRETPROBE:
458 ret = lttng_kretprobes_event_enable_state(event, 0);
459 break;
460 default:
461 WARN_ON_ONCE(1);
462 ret = -EINVAL;
463 }
464 end:
465 mutex_unlock(&sessions_mutex);
466 return ret;
467 }
468
469 static struct lttng_transport *lttng_transport_find(const char *name)
470 {
471 struct lttng_transport *transport;
472
473 list_for_each_entry(transport, &lttng_transport_list, node) {
474 if (!strcmp(transport->name, name))
475 return transport;
476 }
477 return NULL;
478 }
479
480 struct lttng_channel *lttng_channel_create(struct lttng_session *session,
481 const char *transport_name,
482 void *buf_addr,
483 size_t subbuf_size, size_t num_subbuf,
484 unsigned int switch_timer_interval,
485 unsigned int read_timer_interval,
486 enum channel_type channel_type)
487 {
488 struct lttng_channel *chan;
489 struct lttng_transport *transport = NULL;
490
491 mutex_lock(&sessions_mutex);
492 if (session->been_active && channel_type != METADATA_CHANNEL)
493 goto active; /* Refuse to add channel to active session */
494 transport = lttng_transport_find(transport_name);
495 if (!transport) {
496 printk(KERN_WARNING "LTTng transport %s not found\n",
497 transport_name);
498 goto notransport;
499 }
500 if (!try_module_get(transport->owner)) {
501 printk(KERN_WARNING "LTT : Can't lock transport module.\n");
502 goto notransport;
503 }
504 chan = kzalloc(sizeof(struct lttng_channel), GFP_KERNEL);
505 if (!chan)
506 goto nomem;
507 chan->session = session;
508 chan->id = session->free_chan_id++;
509 chan->ops = &transport->ops;
510 /*
511 * Note: the channel creation op already writes into the packet
512 * headers. Therefore the "chan" information used as input
513 * should be already accessible.
514 */
515 chan->chan = transport->ops.channel_create(transport_name,
516 chan, buf_addr, subbuf_size, num_subbuf,
517 switch_timer_interval, read_timer_interval);
518 if (!chan->chan)
519 goto create_error;
520 chan->tstate = 1;
521 chan->enabled = 1;
522 chan->transport = transport;
523 chan->channel_type = channel_type;
524 list_add(&chan->list, &session->chan);
525 mutex_unlock(&sessions_mutex);
526 return chan;
527
528 create_error:
529 kfree(chan);
530 nomem:
531 if (transport)
532 module_put(transport->owner);
533 notransport:
534 active:
535 mutex_unlock(&sessions_mutex);
536 return NULL;
537 }
538
539 /*
540 * Only used internally at session destruction for per-cpu channels, and
541 * when metadata channel is released.
542 * Needs to be called with sessions mutex held.
543 */
544 static
545 void _lttng_channel_destroy(struct lttng_channel *chan)
546 {
547 chan->ops->channel_destroy(chan->chan);
548 module_put(chan->transport->owner);
549 list_del(&chan->list);
550 lttng_destroy_context(chan->ctx);
551 kfree(chan);
552 }
553
554 void lttng_metadata_channel_destroy(struct lttng_channel *chan)
555 {
556 BUG_ON(chan->channel_type != METADATA_CHANNEL);
557
558 /* Protect the metadata cache with the sessions_mutex. */
559 mutex_lock(&sessions_mutex);
560 _lttng_channel_destroy(chan);
561 mutex_unlock(&sessions_mutex);
562 }
563 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
564
565 static
566 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
567 {
568 stream->finalized = 1;
569 wake_up_interruptible(&stream->read_wait);
570 }
571
572 /*
573 * Supports event creation while tracing session is active.
574 * Needs to be called with sessions mutex held.
575 */
576 struct lttng_event *_lttng_event_create(struct lttng_channel *chan,
577 struct lttng_kernel_event *event_param,
578 void *filter,
579 const struct lttng_event_desc *event_desc,
580 enum lttng_kernel_instrumentation itype)
581 {
582 struct lttng_session *session = chan->session;
583 struct lttng_event *event;
584 const char *event_name;
585 struct hlist_head *head;
586 size_t name_len;
587 uint32_t hash;
588 int ret;
589
590 if (chan->free_event_id == -1U) {
591 ret = -EMFILE;
592 goto full;
593 }
594
595 switch (itype) {
596 case LTTNG_KERNEL_TRACEPOINT:
597 event_name = event_desc->name;
598 break;
599 case LTTNG_KERNEL_KPROBE:
600 case LTTNG_KERNEL_KRETPROBE:
601 case LTTNG_KERNEL_FUNCTION:
602 case LTTNG_KERNEL_NOOP:
603 case LTTNG_KERNEL_SYSCALL:
604 event_name = event_param->name;
605 break;
606 default:
607 WARN_ON_ONCE(1);
608 ret = -EINVAL;
609 goto type_error;
610 }
611 name_len = strlen(event_name);
612 hash = jhash(event_name, name_len, 0);
613 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
614 lttng_hlist_for_each_entry(event, head, hlist) {
615 WARN_ON_ONCE(!event->desc);
616 if (!strncmp(event->desc->name, event_name,
617 LTTNG_KERNEL_SYM_NAME_LEN - 1)
618 && chan == event->chan) {
619 ret = -EEXIST;
620 goto exist;
621 }
622 }
623
624 event = kmem_cache_zalloc(event_cache, GFP_KERNEL);
625 if (!event) {
626 ret = -ENOMEM;
627 goto cache_error;
628 }
629 event->chan = chan;
630 event->filter = filter;
631 event->id = chan->free_event_id++;
632 event->instrumentation = itype;
633 event->evtype = LTTNG_TYPE_EVENT;
634 INIT_LIST_HEAD(&event->bytecode_runtime_head);
635 INIT_LIST_HEAD(&event->enablers_ref_head);
636
637 switch (itype) {
638 case LTTNG_KERNEL_TRACEPOINT:
639 /* Event will be enabled by enabler sync. */
640 event->enabled = 0;
641 event->registered = 0;
642 event->desc = lttng_event_get(event_name);
643 if (!event->desc) {
644 ret = -ENOENT;
645 goto register_error;
646 }
647 /* Populate lttng_event structure before event registration. */
648 smp_wmb();
649 break;
650 case LTTNG_KERNEL_KPROBE:
651 /*
652 * Needs to be explicitly enabled after creation, since
653 * we may want to apply filters.
654 */
655 event->enabled = 0;
656 event->registered = 1;
657 /*
658 * Populate lttng_event structure before event
659 * registration.
660 */
661 smp_wmb();
662 ret = lttng_kprobes_register(event_name,
663 event_param->u.kprobe.symbol_name,
664 event_param->u.kprobe.offset,
665 event_param->u.kprobe.addr,
666 event);
667 if (ret) {
668 ret = -EINVAL;
669 goto register_error;
670 }
671 ret = try_module_get(event->desc->owner);
672 WARN_ON_ONCE(!ret);
673 break;
674 case LTTNG_KERNEL_KRETPROBE:
675 {
676 struct lttng_event *event_return;
677
678 /* kretprobe defines 2 events */
679 /*
680 * Needs to be explicitly enabled after creation, since
681 * we may want to apply filters.
682 */
683 event->enabled = 0;
684 event->registered = 1;
685 event_return =
686 kmem_cache_zalloc(event_cache, GFP_KERNEL);
687 if (!event_return) {
688 ret = -ENOMEM;
689 goto register_error;
690 }
691 event_return->chan = chan;
692 event_return->filter = filter;
693 event_return->id = chan->free_event_id++;
694 event_return->enabled = 0;
695 event_return->registered = 1;
696 event_return->instrumentation = itype;
697 /*
698 * Populate lttng_event structure before kretprobe registration.
699 */
700 smp_wmb();
701 ret = lttng_kretprobes_register(event_name,
702 event_param->u.kretprobe.symbol_name,
703 event_param->u.kretprobe.offset,
704 event_param->u.kretprobe.addr,
705 event, event_return);
706 if (ret) {
707 kmem_cache_free(event_cache, event_return);
708 ret = -EINVAL;
709 goto register_error;
710 }
711 /* Take 2 refs on the module: one per event. */
712 ret = try_module_get(event->desc->owner);
713 WARN_ON_ONCE(!ret);
714 ret = try_module_get(event->desc->owner);
715 WARN_ON_ONCE(!ret);
716 ret = _lttng_event_metadata_statedump(chan->session, chan,
717 event_return);
718 WARN_ON_ONCE(ret > 0);
719 if (ret) {
720 kmem_cache_free(event_cache, event_return);
721 module_put(event->desc->owner);
722 module_put(event->desc->owner);
723 goto statedump_error;
724 }
725 list_add(&event_return->list, &chan->session->events);
726 break;
727 }
728 case LTTNG_KERNEL_FUNCTION:
729 /*
730 * Needs to be explicitly enabled after creation, since
731 * we may want to apply filters.
732 */
733 event->enabled = 0;
734 event->registered = 1;
735 /*
736 * Populate lttng_event structure before event
737 * registration.
738 */
739 smp_wmb();
740 ret = lttng_ftrace_register(event_name,
741 event_param->u.ftrace.symbol_name,
742 event);
743 if (ret) {
744 goto register_error;
745 }
746 ret = try_module_get(event->desc->owner);
747 WARN_ON_ONCE(!ret);
748 break;
749 case LTTNG_KERNEL_NOOP:
750 case LTTNG_KERNEL_SYSCALL:
751 /*
752 * Needs to be explicitly enabled after creation, since
753 * we may want to apply filters.
754 */
755 event->enabled = 0;
756 event->registered = 0;
757 event->desc = event_desc;
758 if (!event->desc) {
759 ret = -EINVAL;
760 goto register_error;
761 }
762 break;
763 default:
764 WARN_ON_ONCE(1);
765 ret = -EINVAL;
766 goto register_error;
767 }
768 ret = _lttng_event_metadata_statedump(chan->session, chan, event);
769 WARN_ON_ONCE(ret > 0);
770 if (ret) {
771 goto statedump_error;
772 }
773 hlist_add_head(&event->hlist, head);
774 list_add(&event->list, &chan->session->events);
775 return event;
776
777 statedump_error:
778 /* If a statedump error occurs, events will not be readable. */
779 register_error:
780 kmem_cache_free(event_cache, event);
781 cache_error:
782 exist:
783 type_error:
784 full:
785 return ERR_PTR(ret);
786 }
787
788 struct lttng_event *lttng_event_create(struct lttng_channel *chan,
789 struct lttng_kernel_event *event_param,
790 void *filter,
791 const struct lttng_event_desc *event_desc,
792 enum lttng_kernel_instrumentation itype)
793 {
794 struct lttng_event *event;
795
796 mutex_lock(&sessions_mutex);
797 event = _lttng_event_create(chan, event_param, filter, event_desc,
798 itype);
799 mutex_unlock(&sessions_mutex);
800 return event;
801 }
802
803 /* Only used for tracepoints for now. */
804 static
805 void register_event(struct lttng_event *event)
806 {
807 const struct lttng_event_desc *desc;
808 int ret = -EINVAL;
809
810 if (event->registered)
811 return;
812
813 desc = event->desc;
814 switch (event->instrumentation) {
815 case LTTNG_KERNEL_TRACEPOINT:
816 ret = lttng_wrapper_tracepoint_probe_register(desc->kname,
817 desc->probe_callback,
818 event);
819 break;
820 case LTTNG_KERNEL_SYSCALL:
821 ret = lttng_syscall_filter_enable(event->chan,
822 desc->name);
823 break;
824 case LTTNG_KERNEL_KPROBE:
825 case LTTNG_KERNEL_KRETPROBE:
826 case LTTNG_KERNEL_FUNCTION:
827 case LTTNG_KERNEL_NOOP:
828 ret = 0;
829 break;
830 default:
831 WARN_ON_ONCE(1);
832 }
833 if (!ret)
834 event->registered = 1;
835 }
836
837 /*
838 * Only used internally at session destruction.
839 */
840 int _lttng_event_unregister(struct lttng_event *event)
841 {
842 const struct lttng_event_desc *desc;
843 int ret = -EINVAL;
844
845 if (!event->registered)
846 return 0;
847
848 desc = event->desc;
849 switch (event->instrumentation) {
850 case LTTNG_KERNEL_TRACEPOINT:
851 ret = lttng_wrapper_tracepoint_probe_unregister(event->desc->kname,
852 event->desc->probe_callback,
853 event);
854 break;
855 case LTTNG_KERNEL_KPROBE:
856 lttng_kprobes_unregister(event);
857 ret = 0;
858 break;
859 case LTTNG_KERNEL_KRETPROBE:
860 lttng_kretprobes_unregister(event);
861 ret = 0;
862 break;
863 case LTTNG_KERNEL_FUNCTION:
864 lttng_ftrace_unregister(event);
865 ret = 0;
866 break;
867 case LTTNG_KERNEL_SYSCALL:
868 ret = lttng_syscall_filter_disable(event->chan,
869 desc->name);
870 break;
871 case LTTNG_KERNEL_NOOP:
872 ret = 0;
873 break;
874 default:
875 WARN_ON_ONCE(1);
876 }
877 if (!ret)
878 event->registered = 0;
879 return ret;
880 }
881
882 /*
883 * Only used internally at session destruction.
884 */
885 static
886 void _lttng_event_destroy(struct lttng_event *event)
887 {
888 switch (event->instrumentation) {
889 case LTTNG_KERNEL_TRACEPOINT:
890 lttng_event_put(event->desc);
891 break;
892 case LTTNG_KERNEL_KPROBE:
893 module_put(event->desc->owner);
894 lttng_kprobes_destroy_private(event);
895 break;
896 case LTTNG_KERNEL_KRETPROBE:
897 module_put(event->desc->owner);
898 lttng_kretprobes_destroy_private(event);
899 break;
900 case LTTNG_KERNEL_FUNCTION:
901 module_put(event->desc->owner);
902 lttng_ftrace_destroy_private(event);
903 break;
904 case LTTNG_KERNEL_NOOP:
905 case LTTNG_KERNEL_SYSCALL:
906 break;
907 default:
908 WARN_ON_ONCE(1);
909 }
910 list_del(&event->list);
911 lttng_destroy_context(event->ctx);
912 kmem_cache_free(event_cache, event);
913 }
914
915 int lttng_session_track_pid(struct lttng_session *session, int pid)
916 {
917 int ret;
918
919 if (pid < -1)
920 return -EINVAL;
921 mutex_lock(&sessions_mutex);
922 if (pid == -1) {
923 /* track all pids: destroy tracker. */
924 if (session->pid_tracker) {
925 struct lttng_pid_tracker *lpf;
926
927 lpf = session->pid_tracker;
928 rcu_assign_pointer(session->pid_tracker, NULL);
929 synchronize_trace();
930 lttng_pid_tracker_destroy(lpf);
931 }
932 ret = 0;
933 } else {
934 if (!session->pid_tracker) {
935 struct lttng_pid_tracker *lpf;
936
937 lpf = lttng_pid_tracker_create();
938 if (!lpf) {
939 ret = -ENOMEM;
940 goto unlock;
941 }
942 ret = lttng_pid_tracker_add(lpf, pid);
943 rcu_assign_pointer(session->pid_tracker, lpf);
944 } else {
945 ret = lttng_pid_tracker_add(session->pid_tracker, pid);
946 }
947 }
948 unlock:
949 mutex_unlock(&sessions_mutex);
950 return ret;
951 }
952
953 int lttng_session_untrack_pid(struct lttng_session *session, int pid)
954 {
955 int ret;
956
957 if (pid < -1)
958 return -EINVAL;
959 mutex_lock(&sessions_mutex);
960 if (pid == -1) {
961 /* untrack all pids: replace by empty tracker. */
962 struct lttng_pid_tracker *old_lpf = session->pid_tracker;
963 struct lttng_pid_tracker *lpf;
964
965 lpf = lttng_pid_tracker_create();
966 if (!lpf) {
967 ret = -ENOMEM;
968 goto unlock;
969 }
970 rcu_assign_pointer(session->pid_tracker, lpf);
971 synchronize_trace();
972 if (old_lpf)
973 lttng_pid_tracker_destroy(old_lpf);
974 ret = 0;
975 } else {
976 if (!session->pid_tracker) {
977 ret = -ENOENT;
978 goto unlock;
979 }
980 ret = lttng_pid_tracker_del(session->pid_tracker, pid);
981 }
982 unlock:
983 mutex_unlock(&sessions_mutex);
984 return ret;
985 }
986
987 static
988 void *pid_list_start(struct seq_file *m, loff_t *pos)
989 {
990 struct lttng_session *session = m->private;
991 struct lttng_pid_tracker *lpf;
992 struct lttng_pid_hash_node *e;
993 int iter = 0, i;
994
995 mutex_lock(&sessions_mutex);
996 lpf = session->pid_tracker;
997 if (lpf) {
998 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
999 struct hlist_head *head = &lpf->pid_hash[i];
1000
1001 lttng_hlist_for_each_entry(e, head, hlist) {
1002 if (iter++ >= *pos)
1003 return e;
1004 }
1005 }
1006 } else {
1007 /* PID tracker disabled. */
1008 if (iter >= *pos && iter == 0) {
1009 return session; /* empty tracker */
1010 }
1011 iter++;
1012 }
1013 /* End of list */
1014 return NULL;
1015 }
1016
1017 /* Called with sessions_mutex held. */
1018 static
1019 void *pid_list_next(struct seq_file *m, void *p, loff_t *ppos)
1020 {
1021 struct lttng_session *session = m->private;
1022 struct lttng_pid_tracker *lpf;
1023 struct lttng_pid_hash_node *e;
1024 int iter = 0, i;
1025
1026 (*ppos)++;
1027 lpf = session->pid_tracker;
1028 if (lpf) {
1029 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
1030 struct hlist_head *head = &lpf->pid_hash[i];
1031
1032 lttng_hlist_for_each_entry(e, head, hlist) {
1033 if (iter++ >= *ppos)
1034 return e;
1035 }
1036 }
1037 } else {
1038 /* PID tracker disabled. */
1039 if (iter >= *ppos && iter == 0)
1040 return session; /* empty tracker */
1041 iter++;
1042 }
1043
1044 /* End of list */
1045 return NULL;
1046 }
1047
1048 static
1049 void pid_list_stop(struct seq_file *m, void *p)
1050 {
1051 mutex_unlock(&sessions_mutex);
1052 }
1053
1054 static
1055 int pid_list_show(struct seq_file *m, void *p)
1056 {
1057 int pid;
1058
1059 if (p == m->private) {
1060 /* Tracker disabled. */
1061 pid = -1;
1062 } else {
1063 const struct lttng_pid_hash_node *e = p;
1064
1065 pid = lttng_pid_tracker_get_node_pid(e);
1066 }
1067 seq_printf(m, "process { pid = %d; };\n", pid);
1068 return 0;
1069 }
1070
1071 static
1072 const struct seq_operations lttng_tracker_pids_list_seq_ops = {
1073 .start = pid_list_start,
1074 .next = pid_list_next,
1075 .stop = pid_list_stop,
1076 .show = pid_list_show,
1077 };
1078
1079 static
1080 int lttng_tracker_pids_list_open(struct inode *inode, struct file *file)
1081 {
1082 return seq_open(file, &lttng_tracker_pids_list_seq_ops);
1083 }
1084
1085 static
1086 int lttng_tracker_pids_list_release(struct inode *inode, struct file *file)
1087 {
1088 struct seq_file *m = file->private_data;
1089 struct lttng_session *session = m->private;
1090 int ret;
1091
1092 WARN_ON_ONCE(!session);
1093 ret = seq_release(inode, file);
1094 if (!ret && session)
1095 fput(session->file);
1096 return ret;
1097 }
1098
1099 const struct file_operations lttng_tracker_pids_list_fops = {
1100 .owner = THIS_MODULE,
1101 .open = lttng_tracker_pids_list_open,
1102 .read = seq_read,
1103 .llseek = seq_lseek,
1104 .release = lttng_tracker_pids_list_release,
1105 };
1106
1107 int lttng_session_list_tracker_pids(struct lttng_session *session)
1108 {
1109 struct file *tracker_pids_list_file;
1110 struct seq_file *m;
1111 int file_fd, ret;
1112
1113 file_fd = lttng_get_unused_fd();
1114 if (file_fd < 0) {
1115 ret = file_fd;
1116 goto fd_error;
1117 }
1118
1119 tracker_pids_list_file = anon_inode_getfile("[lttng_tracker_pids_list]",
1120 &lttng_tracker_pids_list_fops,
1121 NULL, O_RDWR);
1122 if (IS_ERR(tracker_pids_list_file)) {
1123 ret = PTR_ERR(tracker_pids_list_file);
1124 goto file_error;
1125 }
1126 if (!atomic_long_add_unless(&session->file->f_count, 1, LONG_MAX)) {
1127 ret = -EOVERFLOW;
1128 goto refcount_error;
1129 }
1130 ret = lttng_tracker_pids_list_fops.open(NULL, tracker_pids_list_file);
1131 if (ret < 0)
1132 goto open_error;
1133 m = tracker_pids_list_file->private_data;
1134 m->private = session;
1135 fd_install(file_fd, tracker_pids_list_file);
1136
1137 return file_fd;
1138
1139 open_error:
1140 atomic_long_dec(&session->file->f_count);
1141 refcount_error:
1142 fput(tracker_pids_list_file);
1143 file_error:
1144 put_unused_fd(file_fd);
1145 fd_error:
1146 return ret;
1147 }
1148
1149 /*
1150 * Enabler management.
1151 */
1152 static
1153 int lttng_match_enabler_star_glob(const char *desc_name,
1154 const char *pattern)
1155 {
1156 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1157 desc_name, LTTNG_SIZE_MAX))
1158 return 0;
1159 return 1;
1160 }
1161
1162 static
1163 int lttng_match_enabler_name(const char *desc_name,
1164 const char *name)
1165 {
1166 if (strcmp(desc_name, name))
1167 return 0;
1168 return 1;
1169 }
1170
1171 static
1172 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
1173 struct lttng_enabler *enabler)
1174 {
1175 const char *desc_name, *enabler_name;
1176
1177 enabler_name = enabler->event_param.name;
1178 switch (enabler->event_param.instrumentation) {
1179 case LTTNG_KERNEL_TRACEPOINT:
1180 desc_name = desc->name;
1181 break;
1182 case LTTNG_KERNEL_SYSCALL:
1183 desc_name = desc->name;
1184 if (!strncmp(desc_name, "compat_", strlen("compat_")))
1185 desc_name += strlen("compat_");
1186 if (!strncmp(desc_name, "syscall_exit_",
1187 strlen("syscall_exit_"))) {
1188 desc_name += strlen("syscall_exit_");
1189 } else if (!strncmp(desc_name, "syscall_entry_",
1190 strlen("syscall_entry_"))) {
1191 desc_name += strlen("syscall_entry_");
1192 } else {
1193 WARN_ON_ONCE(1);
1194 return -EINVAL;
1195 }
1196 break;
1197 default:
1198 WARN_ON_ONCE(1);
1199 return -EINVAL;
1200 }
1201 switch (enabler->type) {
1202 case LTTNG_ENABLER_STAR_GLOB:
1203 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1204 case LTTNG_ENABLER_NAME:
1205 return lttng_match_enabler_name(desc_name, enabler_name);
1206 default:
1207 return -EINVAL;
1208 }
1209 }
1210
1211 static
1212 int lttng_event_match_enabler(struct lttng_event *event,
1213 struct lttng_enabler *enabler)
1214 {
1215 if (enabler->event_param.instrumentation != event->instrumentation)
1216 return 0;
1217 if (lttng_desc_match_enabler(event->desc, enabler)
1218 && event->chan == enabler->chan)
1219 return 1;
1220 else
1221 return 0;
1222 }
1223
1224 static
1225 struct lttng_enabler_ref *lttng_event_enabler_ref(struct lttng_event *event,
1226 struct lttng_enabler *enabler)
1227 {
1228 struct lttng_enabler_ref *enabler_ref;
1229
1230 list_for_each_entry(enabler_ref,
1231 &event->enablers_ref_head, node) {
1232 if (enabler_ref->ref == enabler)
1233 return enabler_ref;
1234 }
1235 return NULL;
1236 }
1237
1238 static
1239 void lttng_create_tracepoint_if_missing(struct lttng_enabler *enabler)
1240 {
1241 struct lttng_session *session = enabler->chan->session;
1242 struct lttng_probe_desc *probe_desc;
1243 const struct lttng_event_desc *desc;
1244 int i;
1245 struct list_head *probe_list;
1246
1247 probe_list = lttng_get_probe_list_head();
1248 /*
1249 * For each probe event, if we find that a probe event matches
1250 * our enabler, create an associated lttng_event if not
1251 * already present.
1252 */
1253 list_for_each_entry(probe_desc, probe_list, head) {
1254 for (i = 0; i < probe_desc->nr_events; i++) {
1255 int found = 0;
1256 struct hlist_head *head;
1257 const char *event_name;
1258 size_t name_len;
1259 uint32_t hash;
1260 struct lttng_event *event;
1261
1262 desc = probe_desc->event_desc[i];
1263 if (!lttng_desc_match_enabler(desc, enabler))
1264 continue;
1265 event_name = desc->name;
1266 name_len = strlen(event_name);
1267
1268 /*
1269 * Check if already created.
1270 */
1271 hash = jhash(event_name, name_len, 0);
1272 head = &session->events_ht.table[hash & (LTTNG_EVENT_HT_SIZE - 1)];
1273 lttng_hlist_for_each_entry(event, head, hlist) {
1274 if (event->desc == desc
1275 && event->chan == enabler->chan)
1276 found = 1;
1277 }
1278 if (found)
1279 continue;
1280
1281 /*
1282 * We need to create an event for this
1283 * event probe.
1284 */
1285 event = _lttng_event_create(enabler->chan,
1286 NULL, NULL, desc,
1287 LTTNG_KERNEL_TRACEPOINT);
1288 if (!event) {
1289 printk(KERN_INFO "Unable to create event %s\n",
1290 probe_desc->event_desc[i]->name);
1291 }
1292 }
1293 }
1294 }
1295
1296 static
1297 void lttng_create_syscall_if_missing(struct lttng_enabler *enabler)
1298 {
1299 int ret;
1300
1301 ret = lttng_syscalls_register(enabler->chan, NULL);
1302 WARN_ON_ONCE(ret);
1303 }
1304
1305 /*
1306 * Create struct lttng_event if it is missing and present in the list of
1307 * tracepoint probes.
1308 * Should be called with sessions mutex held.
1309 */
1310 static
1311 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
1312 {
1313 switch (enabler->event_param.instrumentation) {
1314 case LTTNG_KERNEL_TRACEPOINT:
1315 lttng_create_tracepoint_if_missing(enabler);
1316 break;
1317 case LTTNG_KERNEL_SYSCALL:
1318 lttng_create_syscall_if_missing(enabler);
1319 break;
1320 default:
1321 WARN_ON_ONCE(1);
1322 break;
1323 }
1324 }
1325
1326 /*
1327 * Create events associated with an enabler (if not already present),
1328 * and add backward reference from the event to the enabler.
1329 * Should be called with sessions mutex held.
1330 */
1331 static
1332 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
1333 {
1334 struct lttng_session *session = enabler->chan->session;
1335 struct lttng_event *event;
1336
1337 /* First ensure that probe events are created for this enabler. */
1338 lttng_create_event_if_missing(enabler);
1339
1340 /* For each event matching enabler in session event list. */
1341 list_for_each_entry(event, &session->events, list) {
1342 struct lttng_enabler_ref *enabler_ref;
1343
1344 if (!lttng_event_match_enabler(event, enabler))
1345 continue;
1346 enabler_ref = lttng_event_enabler_ref(event, enabler);
1347 if (!enabler_ref) {
1348 /*
1349 * If no backward ref, create it.
1350 * Add backward ref from event to enabler.
1351 */
1352 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
1353 if (!enabler_ref)
1354 return -ENOMEM;
1355 enabler_ref->ref = enabler;
1356 list_add(&enabler_ref->node,
1357 &event->enablers_ref_head);
1358 }
1359
1360 /*
1361 * Link filter bytecodes if not linked yet.
1362 */
1363 lttng_enabler_event_link_bytecode(event, enabler);
1364
1365 /* TODO: merge event context. */
1366 }
1367 return 0;
1368 }
1369
1370 /*
1371 * Called at module load: connect the probe on all enablers matching
1372 * this event.
1373 * Called with sessions lock held.
1374 */
1375 int lttng_fix_pending_events(void)
1376 {
1377 struct lttng_session *session;
1378
1379 list_for_each_entry(session, &sessions, list)
1380 lttng_session_lazy_sync_enablers(session);
1381 return 0;
1382 }
1383
1384 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1385 struct lttng_kernel_event *event_param,
1386 struct lttng_channel *chan)
1387 {
1388 struct lttng_enabler *enabler;
1389
1390 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL);
1391 if (!enabler)
1392 return NULL;
1393 enabler->type = type;
1394 INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1395 memcpy(&enabler->event_param, event_param,
1396 sizeof(enabler->event_param));
1397 enabler->chan = chan;
1398 /* ctx left NULL */
1399 enabler->enabled = 0;
1400 enabler->evtype = LTTNG_TYPE_ENABLER;
1401 mutex_lock(&sessions_mutex);
1402 list_add(&enabler->node, &enabler->chan->session->enablers_head);
1403 lttng_session_lazy_sync_enablers(enabler->chan->session);
1404 mutex_unlock(&sessions_mutex);
1405 return enabler;
1406 }
1407
1408 int lttng_enabler_enable(struct lttng_enabler *enabler)
1409 {
1410 mutex_lock(&sessions_mutex);
1411 enabler->enabled = 1;
1412 lttng_session_lazy_sync_enablers(enabler->chan->session);
1413 mutex_unlock(&sessions_mutex);
1414 return 0;
1415 }
1416
1417 int lttng_enabler_disable(struct lttng_enabler *enabler)
1418 {
1419 mutex_lock(&sessions_mutex);
1420 enabler->enabled = 0;
1421 lttng_session_lazy_sync_enablers(enabler->chan->session);
1422 mutex_unlock(&sessions_mutex);
1423 return 0;
1424 }
1425
1426 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1427 struct lttng_kernel_filter_bytecode __user *bytecode)
1428 {
1429 struct lttng_filter_bytecode_node *bytecode_node;
1430 uint32_t bytecode_len;
1431 int ret;
1432
1433 ret = get_user(bytecode_len, &bytecode->len);
1434 if (ret)
1435 return ret;
1436 bytecode_node = kzalloc(sizeof(*bytecode_node) + bytecode_len,
1437 GFP_KERNEL);
1438 if (!bytecode_node)
1439 return -ENOMEM;
1440 ret = copy_from_user(&bytecode_node->bc, bytecode,
1441 sizeof(*bytecode) + bytecode_len);
1442 if (ret)
1443 goto error_free;
1444 bytecode_node->enabler = enabler;
1445 /* Enforce length based on allocated size */
1446 bytecode_node->bc.len = bytecode_len;
1447 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
1448 lttng_session_lazy_sync_enablers(enabler->chan->session);
1449 return 0;
1450
1451 error_free:
1452 kfree(bytecode_node);
1453 return ret;
1454 }
1455
1456 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1457 struct lttng_kernel_context *context_param)
1458 {
1459 return -ENOSYS;
1460 }
1461
1462 static
1463 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1464 {
1465 struct lttng_filter_bytecode_node *filter_node, *tmp_filter_node;
1466
1467 /* Destroy filter bytecode */
1468 list_for_each_entry_safe(filter_node, tmp_filter_node,
1469 &enabler->filter_bytecode_head, node) {
1470 kfree(filter_node);
1471 }
1472
1473 /* Destroy contexts */
1474 lttng_destroy_context(enabler->ctx);
1475
1476 list_del(&enabler->node);
1477 kfree(enabler);
1478 }
1479
1480 /*
1481 * lttng_session_sync_enablers should be called just before starting a
1482 * session.
1483 * Should be called with sessions mutex held.
1484 */
1485 static
1486 void lttng_session_sync_enablers(struct lttng_session *session)
1487 {
1488 struct lttng_enabler *enabler;
1489 struct lttng_event *event;
1490
1491 list_for_each_entry(enabler, &session->enablers_head, node)
1492 lttng_enabler_ref_events(enabler);
1493 /*
1494 * For each event, if at least one of its enablers is enabled,
1495 * and its channel and session transient states are enabled, we
1496 * enable the event, else we disable it.
1497 */
1498 list_for_each_entry(event, &session->events, list) {
1499 struct lttng_enabler_ref *enabler_ref;
1500 struct lttng_bytecode_runtime *runtime;
1501 int enabled = 0, has_enablers_without_bytecode = 0;
1502
1503 switch (event->instrumentation) {
1504 case LTTNG_KERNEL_TRACEPOINT:
1505 case LTTNG_KERNEL_SYSCALL:
1506 /* Enable events */
1507 list_for_each_entry(enabler_ref,
1508 &event->enablers_ref_head, node) {
1509 if (enabler_ref->ref->enabled) {
1510 enabled = 1;
1511 break;
1512 }
1513 }
1514 break;
1515 default:
1516 /* Not handled with lazy sync. */
1517 continue;
1518 }
1519 /*
1520 * Enabled state is based on union of enablers, with
1521 * intesection of session and channel transient enable
1522 * states.
1523 */
1524 enabled = enabled && session->tstate && event->chan->tstate;
1525
1526 WRITE_ONCE(event->enabled, enabled);
1527 /*
1528 * Sync tracepoint registration with event enabled
1529 * state.
1530 */
1531 if (enabled) {
1532 register_event(event);
1533 } else {
1534 _lttng_event_unregister(event);
1535 }
1536
1537 /* Check if has enablers without bytecode enabled */
1538 list_for_each_entry(enabler_ref,
1539 &event->enablers_ref_head, node) {
1540 if (enabler_ref->ref->enabled
1541 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1542 has_enablers_without_bytecode = 1;
1543 break;
1544 }
1545 }
1546 event->has_enablers_without_bytecode =
1547 has_enablers_without_bytecode;
1548
1549 /* Enable filters */
1550 list_for_each_entry(runtime,
1551 &event->bytecode_runtime_head, node)
1552 lttng_filter_sync_state(runtime);
1553 }
1554 }
1555
1556 /*
1557 * Apply enablers to session events, adding events to session if need
1558 * be. It is required after each modification applied to an active
1559 * session, and right before session "start".
1560 * "lazy" sync means we only sync if required.
1561 * Should be called with sessions mutex held.
1562 */
1563 static
1564 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1565 {
1566 /* We can skip if session is not active */
1567 if (!session->active)
1568 return;
1569 lttng_session_sync_enablers(session);
1570 }
1571
1572 /*
1573 * Serialize at most one packet worth of metadata into a metadata
1574 * channel.
1575 * We grab the metadata cache mutex to get exclusive access to our metadata
1576 * buffer and to the metadata cache. Exclusive access to the metadata buffer
1577 * allows us to do racy operations such as looking for remaining space left in
1578 * packet and write, since mutual exclusion protects us from concurrent writes.
1579 * Mutual exclusion on the metadata cache allow us to read the cache content
1580 * without racing against reallocation of the cache by updates.
1581 * Returns the number of bytes written in the channel, 0 if no data
1582 * was written and a negative value on error.
1583 */
1584 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
1585 struct channel *chan)
1586 {
1587 struct lib_ring_buffer_ctx ctx;
1588 int ret = 0;
1589 size_t len, reserve_len;
1590
1591 /*
1592 * Ensure we support mutiple get_next / put sequences followed by
1593 * put_next. The metadata cache lock protects reading the metadata
1594 * cache. It can indeed be read concurrently by "get_next_subbuf" and
1595 * "flush" operations on the buffer invoked by different processes.
1596 * Moreover, since the metadata cache memory can be reallocated, we
1597 * need to have exclusive access against updates even though we only
1598 * read it.
1599 */
1600 mutex_lock(&stream->metadata_cache->lock);
1601 WARN_ON(stream->metadata_in < stream->metadata_out);
1602 if (stream->metadata_in != stream->metadata_out)
1603 goto end;
1604
1605 /* Metadata regenerated, change the version. */
1606 if (stream->metadata_cache->version != stream->version)
1607 stream->version = stream->metadata_cache->version;
1608
1609 len = stream->metadata_cache->metadata_written -
1610 stream->metadata_in;
1611 if (!len)
1612 goto end;
1613 reserve_len = min_t(size_t,
1614 stream->transport->ops.packet_avail_size(chan),
1615 len);
1616 lib_ring_buffer_ctx_init(&ctx, chan, NULL, reserve_len,
1617 sizeof(char), -1);
1618 /*
1619 * If reservation failed, return an error to the caller.
1620 */
1621 ret = stream->transport->ops.event_reserve(&ctx, 0);
1622 if (ret != 0) {
1623 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
1624 goto end;
1625 }
1626 stream->transport->ops.event_write(&ctx,
1627 stream->metadata_cache->data + stream->metadata_in,
1628 reserve_len);
1629 stream->transport->ops.event_commit(&ctx);
1630 stream->metadata_in += reserve_len;
1631 ret = reserve_len;
1632
1633 end:
1634 mutex_unlock(&stream->metadata_cache->lock);
1635 return ret;
1636 }
1637
1638 /*
1639 * Write the metadata to the metadata cache.
1640 * Must be called with sessions_mutex held.
1641 * The metadata cache lock protects us from concurrent read access from
1642 * thread outputting metadata content to ring buffer.
1643 */
1644 int lttng_metadata_printf(struct lttng_session *session,
1645 const char *fmt, ...)
1646 {
1647 char *str;
1648 size_t len;
1649 va_list ap;
1650 struct lttng_metadata_stream *stream;
1651
1652 WARN_ON_ONCE(!READ_ONCE(session->active));
1653
1654 va_start(ap, fmt);
1655 str = kvasprintf(GFP_KERNEL, fmt, ap);
1656 va_end(ap);
1657 if (!str)
1658 return -ENOMEM;
1659
1660 len = strlen(str);
1661 mutex_lock(&session->metadata_cache->lock);
1662 if (session->metadata_cache->metadata_written + len >
1663 session->metadata_cache->cache_alloc) {
1664 char *tmp_cache_realloc;
1665 unsigned int tmp_cache_alloc_size;
1666
1667 tmp_cache_alloc_size = max_t(unsigned int,
1668 session->metadata_cache->cache_alloc + len,
1669 session->metadata_cache->cache_alloc << 1);
1670 tmp_cache_realloc = lttng_vzalloc(tmp_cache_alloc_size);
1671 if (!tmp_cache_realloc)
1672 goto err;
1673 if (session->metadata_cache->data) {
1674 memcpy(tmp_cache_realloc,
1675 session->metadata_cache->data,
1676 session->metadata_cache->cache_alloc);
1677 vfree(session->metadata_cache->data);
1678 }
1679
1680 session->metadata_cache->cache_alloc = tmp_cache_alloc_size;
1681 session->metadata_cache->data = tmp_cache_realloc;
1682 }
1683 memcpy(session->metadata_cache->data +
1684 session->metadata_cache->metadata_written,
1685 str, len);
1686 session->metadata_cache->metadata_written += len;
1687 mutex_unlock(&session->metadata_cache->lock);
1688 kfree(str);
1689
1690 list_for_each_entry(stream, &session->metadata_cache->metadata_stream, list)
1691 wake_up_interruptible(&stream->read_wait);
1692
1693 return 0;
1694
1695 err:
1696 mutex_unlock(&session->metadata_cache->lock);
1697 kfree(str);
1698 return -ENOMEM;
1699 }
1700
1701 static
1702 int print_tabs(struct lttng_session *session, size_t nesting)
1703 {
1704 size_t i;
1705
1706 for (i = 0; i < nesting; i++) {
1707 int ret;
1708
1709 ret = lttng_metadata_printf(session, " ");
1710 if (ret) {
1711 return ret;
1712 }
1713 }
1714 return 0;
1715 }
1716
1717 /*
1718 * Must be called with sessions_mutex held.
1719 */
1720 static
1721 int _lttng_struct_type_statedump(struct lttng_session *session,
1722 const struct lttng_type *type,
1723 size_t nesting)
1724 {
1725 int ret;
1726 uint32_t i, nr_fields;
1727
1728 ret = print_tabs(session, nesting);
1729 if (ret)
1730 return ret;
1731 ret = lttng_metadata_printf(session,
1732 "struct {\n");
1733 if (ret)
1734 return ret;
1735 nr_fields = type->u._struct.nr_fields;
1736 for (i = 0; i < nr_fields; i++) {
1737 const struct lttng_event_field *iter_field;
1738
1739 iter_field = &type->u._struct.fields[i];
1740 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1741 if (ret)
1742 return ret;
1743 }
1744 ret = print_tabs(session, nesting);
1745 if (ret)
1746 return ret;
1747 ret = lttng_metadata_printf(session,
1748 "}");
1749 return ret;
1750 }
1751
1752 /*
1753 * Must be called with sessions_mutex held.
1754 */
1755 static
1756 int _lttng_struct_statedump(struct lttng_session *session,
1757 const struct lttng_event_field *field,
1758 size_t nesting)
1759 {
1760 int ret;
1761
1762 ret = _lttng_struct_type_statedump(session,
1763 &field->type, nesting);
1764 if (ret)
1765 return ret;
1766 ret = lttng_metadata_printf(session,
1767 "_%s;\n",
1768 field->name);
1769 return ret;
1770 }
1771
1772 /*
1773 * Must be called with sessions_mutex held.
1774 */
1775 static
1776 int _lttng_variant_type_statedump(struct lttng_session *session,
1777 const struct lttng_type *type,
1778 size_t nesting)
1779 {
1780 int ret;
1781 uint32_t i, nr_choices;
1782
1783 ret = print_tabs(session, nesting);
1784 if (ret)
1785 return ret;
1786 ret = lttng_metadata_printf(session,
1787 "variant <_%s> {\n",
1788 type->u.variant.tag_name);
1789 if (ret)
1790 return ret;
1791 nr_choices = type->u.variant.nr_choices;
1792 for (i = 0; i < nr_choices; i++) {
1793 const struct lttng_event_field *iter_field;
1794
1795 iter_field = &type->u.variant.choices[i];
1796 ret = _lttng_field_statedump(session, iter_field, nesting + 1);
1797 if (ret)
1798 return ret;
1799 }
1800 ret = print_tabs(session, nesting);
1801 if (ret)
1802 return ret;
1803 ret = lttng_metadata_printf(session,
1804 "}");
1805 return ret;
1806 }
1807
1808 /*
1809 * Must be called with sessions_mutex held.
1810 */
1811 static
1812 int _lttng_variant_statedump(struct lttng_session *session,
1813 const struct lttng_event_field *field,
1814 size_t nesting)
1815 {
1816 int ret;
1817
1818 ret = _lttng_variant_type_statedump(session,
1819 &field->type, nesting);
1820 if (ret)
1821 return ret;
1822 ret = lttng_metadata_printf(session,
1823 "_%s;\n",
1824 field->name);
1825 return ret;
1826 }
1827
1828 /*
1829 * Must be called with sessions_mutex held.
1830 */
1831 static
1832 int _lttng_array_compound_statedump(struct lttng_session *session,
1833 const struct lttng_event_field *field,
1834 size_t nesting)
1835 {
1836 int ret;
1837 const struct lttng_type *elem_type;
1838
1839 /* Only array of structures and variants are currently supported. */
1840 elem_type = field->type.u.array_compound.elem_type;
1841 switch (elem_type->atype) {
1842 case atype_struct:
1843 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
1844 if (ret)
1845 return ret;
1846 break;
1847 case atype_variant:
1848 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
1849 if (ret)
1850 return ret;
1851 break;
1852 default:
1853 return -EINVAL;
1854 }
1855 ret = lttng_metadata_printf(session,
1856 " _%s[%u];\n",
1857 field->name,
1858 field->type.u.array_compound.length);
1859 return ret;
1860 }
1861
1862 /*
1863 * Must be called with sessions_mutex held.
1864 */
1865 static
1866 int _lttng_sequence_compound_statedump(struct lttng_session *session,
1867 const struct lttng_event_field *field,
1868 size_t nesting)
1869 {
1870 int ret;
1871 const char *length_name;
1872 const struct lttng_type *elem_type;
1873
1874 length_name = field->type.u.sequence_compound.length_name;
1875
1876 /* Only array of structures and variants are currently supported. */
1877 elem_type = field->type.u.sequence_compound.elem_type;
1878 switch (elem_type->atype) {
1879 case atype_struct:
1880 ret = _lttng_struct_type_statedump(session, elem_type, nesting);
1881 if (ret)
1882 return ret;
1883 break;
1884 case atype_variant:
1885 ret = _lttng_variant_type_statedump(session, elem_type, nesting);
1886 if (ret)
1887 return ret;
1888 break;
1889 default:
1890 return -EINVAL;
1891 }
1892 ret = lttng_metadata_printf(session,
1893 " _%s[ _%s ];\n",
1894 field->name,
1895 length_name);
1896 return ret;
1897 }
1898
1899 /*
1900 * Must be called with sessions_mutex held.
1901 */
1902 static
1903 int _lttng_enum_statedump(struct lttng_session *session,
1904 const struct lttng_event_field *field,
1905 size_t nesting)
1906 {
1907 const struct lttng_enum_desc *enum_desc;
1908 const struct lttng_integer_type *container_type;
1909 int ret;
1910 unsigned int i, nr_entries;
1911
1912 enum_desc = field->type.u.basic.enumeration.desc;
1913 container_type = &field->type.u.basic.enumeration.container_type;
1914 nr_entries = enum_desc->nr_entries;
1915
1916 ret = print_tabs(session, nesting);
1917 if (ret)
1918 goto end;
1919 ret = lttng_metadata_printf(session,
1920 "enum : integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } {\n",
1921 container_type->size,
1922 container_type->alignment,
1923 container_type->signedness,
1924 (container_type->encoding == lttng_encode_none)
1925 ? "none"
1926 : (container_type->encoding == lttng_encode_UTF8)
1927 ? "UTF8"
1928 : "ASCII",
1929 container_type->base,
1930 #if __BYTE_ORDER == __BIG_ENDIAN
1931 container_type->reverse_byte_order ? " byte_order = le;" : ""
1932 #else
1933 container_type->reverse_byte_order ? " byte_order = be;" : ""
1934 #endif
1935 );
1936 if (ret)
1937 goto end;
1938 /* Dump all entries */
1939 for (i = 0; i < nr_entries; i++) {
1940 const struct lttng_enum_entry *entry = &enum_desc->entries[i];
1941 int j, len;
1942
1943 ret = print_tabs(session, nesting + 1);
1944 if (ret)
1945 goto end;
1946 ret = lttng_metadata_printf(session,
1947 "\"");
1948 if (ret)
1949 goto end;
1950 len = strlen(entry->string);
1951 /* Escape the character '"' */
1952 for (j = 0; j < len; j++) {
1953 char c = entry->string[j];
1954
1955 switch (c) {
1956 case '"':
1957 ret = lttng_metadata_printf(session,
1958 "\\\"");
1959 break;
1960 case '\\':
1961 ret = lttng_metadata_printf(session,
1962 "\\\\");
1963 break;
1964 default:
1965 ret = lttng_metadata_printf(session,
1966 "%c", c);
1967 break;
1968 }
1969 if (ret)
1970 goto end;
1971 }
1972 ret = lttng_metadata_printf(session, "\"");
1973 if (ret)
1974 goto end;
1975
1976 if (entry->options.is_auto) {
1977 ret = lttng_metadata_printf(session, ",\n");
1978 if (ret)
1979 goto end;
1980 } else {
1981 ret = lttng_metadata_printf(session,
1982 " = ");
1983 if (ret)
1984 goto end;
1985 if (entry->start.signedness)
1986 ret = lttng_metadata_printf(session,
1987 "%lld", (long long) entry->start.value);
1988 else
1989 ret = lttng_metadata_printf(session,
1990 "%llu", entry->start.value);
1991 if (ret)
1992 goto end;
1993 if (entry->start.signedness == entry->end.signedness &&
1994 entry->start.value
1995 == entry->end.value) {
1996 ret = lttng_metadata_printf(session,
1997 ",\n");
1998 } else {
1999 if (entry->end.signedness) {
2000 ret = lttng_metadata_printf(session,
2001 " ... %lld,\n",
2002 (long long) entry->end.value);
2003 } else {
2004 ret = lttng_metadata_printf(session,
2005 " ... %llu,\n",
2006 entry->end.value);
2007 }
2008 }
2009 if (ret)
2010 goto end;
2011 }
2012 }
2013 ret = print_tabs(session, nesting);
2014 if (ret)
2015 goto end;
2016 ret = lttng_metadata_printf(session, "} _%s;\n",
2017 field->name);
2018 end:
2019 return ret;
2020 }
2021
2022 /*
2023 * Must be called with sessions_mutex held.
2024 */
2025 static
2026 int _lttng_field_statedump(struct lttng_session *session,
2027 const struct lttng_event_field *field,
2028 size_t nesting)
2029 {
2030 int ret = 0;
2031
2032 switch (field->type.atype) {
2033 case atype_integer:
2034 ret = print_tabs(session, nesting);
2035 if (ret)
2036 return ret;
2037 ret = lttng_metadata_printf(session,
2038 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s;\n",
2039 field->type.u.basic.integer.size,
2040 field->type.u.basic.integer.alignment,
2041 field->type.u.basic.integer.signedness,
2042 (field->type.u.basic.integer.encoding == lttng_encode_none)
2043 ? "none"
2044 : (field->type.u.basic.integer.encoding == lttng_encode_UTF8)
2045 ? "UTF8"
2046 : "ASCII",
2047 field->type.u.basic.integer.base,
2048 #if __BYTE_ORDER == __BIG_ENDIAN
2049 field->type.u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2050 #else
2051 field->type.u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2052 #endif
2053 field->name);
2054 break;
2055 case atype_enum:
2056 ret = _lttng_enum_statedump(session, field, nesting);
2057 break;
2058 case atype_array:
2059 {
2060 const struct lttng_basic_type *elem_type;
2061
2062 elem_type = &field->type.u.array.elem_type;
2063 if (field->type.u.array.elem_alignment) {
2064 ret = print_tabs(session, nesting);
2065 if (ret)
2066 return ret;
2067 ret = lttng_metadata_printf(session,
2068 "struct { } align(%u) _%s_padding;\n",
2069 field->type.u.array.elem_alignment * CHAR_BIT,
2070 field->name);
2071 if (ret)
2072 return ret;
2073 }
2074 ret = print_tabs(session, nesting);
2075 if (ret)
2076 return ret;
2077 ret = lttng_metadata_printf(session,
2078 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[%u];\n",
2079 elem_type->u.basic.integer.size,
2080 elem_type->u.basic.integer.alignment,
2081 elem_type->u.basic.integer.signedness,
2082 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2083 ? "none"
2084 : (elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2085 ? "UTF8"
2086 : "ASCII",
2087 elem_type->u.basic.integer.base,
2088 #if __BYTE_ORDER == __BIG_ENDIAN
2089 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2090 #else
2091 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2092 #endif
2093 field->name, field->type.u.array.length);
2094 break;
2095 }
2096 case atype_sequence:
2097 {
2098 const struct lttng_basic_type *elem_type;
2099 const struct lttng_basic_type *length_type;
2100
2101 elem_type = &field->type.u.sequence.elem_type;
2102 length_type = &field->type.u.sequence.length_type;
2103 ret = print_tabs(session, nesting);
2104 if (ret)
2105 return ret;
2106 ret = lttng_metadata_printf(session,
2107 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } __%s_length;\n",
2108 length_type->u.basic.integer.size,
2109 (unsigned int) length_type->u.basic.integer.alignment,
2110 length_type->u.basic.integer.signedness,
2111 (length_type->u.basic.integer.encoding == lttng_encode_none)
2112 ? "none"
2113 : ((length_type->u.basic.integer.encoding == lttng_encode_UTF8)
2114 ? "UTF8"
2115 : "ASCII"),
2116 length_type->u.basic.integer.base,
2117 #if __BYTE_ORDER == __BIG_ENDIAN
2118 length_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2119 #else
2120 length_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2121 #endif
2122 field->name);
2123 if (ret)
2124 return ret;
2125
2126 if (field->type.u.sequence.elem_alignment) {
2127 ret = print_tabs(session, nesting);
2128 if (ret)
2129 return ret;
2130 ret = lttng_metadata_printf(session,
2131 "struct { } align(%u) _%s_padding;\n",
2132 field->type.u.sequence.elem_alignment * CHAR_BIT,
2133 field->name);
2134 if (ret)
2135 return ret;
2136 }
2137 ret = print_tabs(session, nesting);
2138 if (ret)
2139 return ret;
2140 ret = lttng_metadata_printf(session,
2141 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s } _%s[ __%s_length ];\n",
2142 elem_type->u.basic.integer.size,
2143 (unsigned int) elem_type->u.basic.integer.alignment,
2144 elem_type->u.basic.integer.signedness,
2145 (elem_type->u.basic.integer.encoding == lttng_encode_none)
2146 ? "none"
2147 : ((elem_type->u.basic.integer.encoding == lttng_encode_UTF8)
2148 ? "UTF8"
2149 : "ASCII"),
2150 elem_type->u.basic.integer.base,
2151 #if __BYTE_ORDER == __BIG_ENDIAN
2152 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = le;" : "",
2153 #else
2154 elem_type->u.basic.integer.reverse_byte_order ? " byte_order = be;" : "",
2155 #endif
2156 field->name,
2157 field->name);
2158 break;
2159 }
2160
2161 case atype_string:
2162 /* Default encoding is UTF8 */
2163 ret = print_tabs(session, nesting);
2164 if (ret)
2165 return ret;
2166 ret = lttng_metadata_printf(session,
2167 "string%s _%s;\n",
2168 field->type.u.basic.string.encoding == lttng_encode_ASCII ?
2169 " { encoding = ASCII; }" : "",
2170 field->name);
2171 break;
2172 case atype_struct:
2173 ret = _lttng_struct_statedump(session, field, nesting);
2174 break;
2175 case atype_array_compound:
2176 ret = _lttng_array_compound_statedump(session, field, nesting);
2177 break;
2178 case atype_sequence_compound:
2179 ret = _lttng_sequence_compound_statedump(session, field, nesting);
2180 break;
2181 case atype_variant:
2182 ret = _lttng_variant_statedump(session, field, nesting);
2183 break;
2184
2185 default:
2186 WARN_ON_ONCE(1);
2187 return -EINVAL;
2188 }
2189 return ret;
2190 }
2191
2192 static
2193 int _lttng_context_metadata_statedump(struct lttng_session *session,
2194 struct lttng_ctx *ctx)
2195 {
2196 int ret = 0;
2197 int i;
2198
2199 if (!ctx)
2200 return 0;
2201 for (i = 0; i < ctx->nr_fields; i++) {
2202 const struct lttng_ctx_field *field = &ctx->fields[i];
2203
2204 ret = _lttng_field_statedump(session, &field->event_field, 2);
2205 if (ret)
2206 return ret;
2207 }
2208 return ret;
2209 }
2210
2211 static
2212 int _lttng_fields_metadata_statedump(struct lttng_session *session,
2213 struct lttng_event *event)
2214 {
2215 const struct lttng_event_desc *desc = event->desc;
2216 int ret = 0;
2217 int i;
2218
2219 for (i = 0; i < desc->nr_fields; i++) {
2220 const struct lttng_event_field *field = &desc->fields[i];
2221
2222 ret = _lttng_field_statedump(session, field, 2);
2223 if (ret)
2224 return ret;
2225 }
2226 return ret;
2227 }
2228
2229 /*
2230 * Must be called with sessions_mutex held.
2231 */
2232 static
2233 int _lttng_event_metadata_statedump(struct lttng_session *session,
2234 struct lttng_channel *chan,
2235 struct lttng_event *event)
2236 {
2237 int ret = 0;
2238
2239 if (event->metadata_dumped || !READ_ONCE(session->active))
2240 return 0;
2241 if (chan->channel_type == METADATA_CHANNEL)
2242 return 0;
2243
2244 ret = lttng_metadata_printf(session,
2245 "event {\n"
2246 " name = \"%s\";\n"
2247 " id = %u;\n"
2248 " stream_id = %u;\n",
2249 event->desc->name,
2250 event->id,
2251 event->chan->id);
2252 if (ret)
2253 goto end;
2254
2255 if (event->ctx) {
2256 ret = lttng_metadata_printf(session,
2257 " context := struct {\n");
2258 if (ret)
2259 goto end;
2260 }
2261 ret = _lttng_context_metadata_statedump(session, event->ctx);
2262 if (ret)
2263 goto end;
2264 if (event->ctx) {
2265 ret = lttng_metadata_printf(session,
2266 " };\n");
2267 if (ret)
2268 goto end;
2269 }
2270
2271 ret = lttng_metadata_printf(session,
2272 " fields := struct {\n"
2273 );
2274 if (ret)
2275 goto end;
2276
2277 ret = _lttng_fields_metadata_statedump(session, event);
2278 if (ret)
2279 goto end;
2280
2281 /*
2282 * LTTng space reservation can only reserve multiples of the
2283 * byte size.
2284 */
2285 ret = lttng_metadata_printf(session,
2286 " };\n"
2287 "};\n\n");
2288 if (ret)
2289 goto end;
2290
2291 event->metadata_dumped = 1;
2292 end:
2293 return ret;
2294
2295 }
2296
2297 /*
2298 * Must be called with sessions_mutex held.
2299 */
2300 static
2301 int _lttng_channel_metadata_statedump(struct lttng_session *session,
2302 struct lttng_channel *chan)
2303 {
2304 int ret = 0;
2305
2306 if (chan->metadata_dumped || !READ_ONCE(session->active))
2307 return 0;
2308
2309 if (chan->channel_type == METADATA_CHANNEL)
2310 return 0;
2311
2312 WARN_ON_ONCE(!chan->header_type);
2313 ret = lttng_metadata_printf(session,
2314 "stream {\n"
2315 " id = %u;\n"
2316 " event.header := %s;\n"
2317 " packet.context := struct packet_context;\n",
2318 chan->id,
2319 chan->header_type == 1 ? "struct event_header_compact" :
2320 "struct event_header_large");
2321 if (ret)
2322 goto end;
2323
2324 if (chan->ctx) {
2325 ret = lttng_metadata_printf(session,
2326 " event.context := struct {\n");
2327 if (ret)
2328 goto end;
2329 }
2330 ret = _lttng_context_metadata_statedump(session, chan->ctx);
2331 if (ret)
2332 goto end;
2333 if (chan->ctx) {
2334 ret = lttng_metadata_printf(session,
2335 " };\n");
2336 if (ret)
2337 goto end;
2338 }
2339
2340 ret = lttng_metadata_printf(session,
2341 "};\n\n");
2342
2343 chan->metadata_dumped = 1;
2344 end:
2345 return ret;
2346 }
2347
2348 /*
2349 * Must be called with sessions_mutex held.
2350 */
2351 static
2352 int _lttng_stream_packet_context_declare(struct lttng_session *session)
2353 {
2354 return lttng_metadata_printf(session,
2355 "struct packet_context {\n"
2356 " uint64_clock_monotonic_t timestamp_begin;\n"
2357 " uint64_clock_monotonic_t timestamp_end;\n"
2358 " uint64_t content_size;\n"
2359 " uint64_t packet_size;\n"
2360 " uint64_t packet_seq_num;\n"
2361 " unsigned long events_discarded;\n"
2362 " uint32_t cpu_id;\n"
2363 "};\n\n"
2364 );
2365 }
2366
2367 /*
2368 * Compact header:
2369 * id: range: 0 - 30.
2370 * id 31 is reserved to indicate an extended header.
2371 *
2372 * Large header:
2373 * id: range: 0 - 65534.
2374 * id 65535 is reserved to indicate an extended header.
2375 *
2376 * Must be called with sessions_mutex held.
2377 */
2378 static
2379 int _lttng_event_header_declare(struct lttng_session *session)
2380 {
2381 return lttng_metadata_printf(session,
2382 "struct event_header_compact {\n"
2383 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
2384 " variant <id> {\n"
2385 " struct {\n"
2386 " uint27_clock_monotonic_t timestamp;\n"
2387 " } compact;\n"
2388 " struct {\n"
2389 " uint32_t id;\n"
2390 " uint64_clock_monotonic_t timestamp;\n"
2391 " } extended;\n"
2392 " } v;\n"
2393 "} align(%u);\n"
2394 "\n"
2395 "struct event_header_large {\n"
2396 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
2397 " variant <id> {\n"
2398 " struct {\n"
2399 " uint32_clock_monotonic_t timestamp;\n"
2400 " } compact;\n"
2401 " struct {\n"
2402 " uint32_t id;\n"
2403 " uint64_clock_monotonic_t timestamp;\n"
2404 " } extended;\n"
2405 " } v;\n"
2406 "} align(%u);\n\n",
2407 lttng_alignof(uint32_t) * CHAR_BIT,
2408 lttng_alignof(uint16_t) * CHAR_BIT
2409 );
2410 }
2411
2412 /*
2413 * Approximation of NTP time of day to clock monotonic correlation,
2414 * taken at start of trace.
2415 * Yes, this is only an approximation. Yes, we can (and will) do better
2416 * in future versions.
2417 * This function may return a negative offset. It may happen if the
2418 * system sets the REALTIME clock to 0 after boot.
2419 *
2420 * Use 64bit timespec on kernels that have it, this makes 32bit arch
2421 * y2038 compliant.
2422 */
2423 static
2424 int64_t measure_clock_offset(void)
2425 {
2426 uint64_t monotonic_avg, monotonic[2], realtime;
2427 uint64_t tcf = trace_clock_freq();
2428 int64_t offset;
2429 unsigned long flags;
2430 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2431 struct timespec64 rts = { 0, 0 };
2432 #else
2433 struct timespec rts = { 0, 0 };
2434 #endif
2435
2436 /* Disable interrupts to increase correlation precision. */
2437 local_irq_save(flags);
2438 monotonic[0] = trace_clock_read64();
2439 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
2440 ktime_get_real_ts64(&rts);
2441 #else
2442 getnstimeofday(&rts);
2443 #endif
2444 monotonic[1] = trace_clock_read64();
2445 local_irq_restore(flags);
2446
2447 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
2448 realtime = (uint64_t) rts.tv_sec * tcf;
2449 if (tcf == NSEC_PER_SEC) {
2450 realtime += rts.tv_nsec;
2451 } else {
2452 uint64_t n = rts.tv_nsec * tcf;
2453
2454 do_div(n, NSEC_PER_SEC);
2455 realtime += n;
2456 }
2457 offset = (int64_t) realtime - monotonic_avg;
2458 return offset;
2459 }
2460
2461 /*
2462 * Output metadata into this session's metadata buffers.
2463 * Must be called with sessions_mutex held.
2464 */
2465 static
2466 int _lttng_session_metadata_statedump(struct lttng_session *session)
2467 {
2468 unsigned char *uuid_c = session->uuid.b;
2469 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
2470 struct lttng_channel *chan;
2471 struct lttng_event *event;
2472 int ret = 0;
2473
2474 if (!READ_ONCE(session->active))
2475 return 0;
2476 if (session->metadata_dumped)
2477 goto skip_session;
2478
2479 snprintf(uuid_s, sizeof(uuid_s),
2480 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
2481 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
2482 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
2483 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
2484 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
2485
2486 ret = lttng_metadata_printf(session,
2487 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
2488 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
2489 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
2490 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
2491 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
2492 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
2493 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
2494 "\n"
2495 "trace {\n"
2496 " major = %u;\n"
2497 " minor = %u;\n"
2498 " uuid = \"%s\";\n"
2499 " byte_order = %s;\n"
2500 " packet.header := struct {\n"
2501 " uint32_t magic;\n"
2502 " uint8_t uuid[16];\n"
2503 " uint32_t stream_id;\n"
2504 " uint64_t stream_instance_id;\n"
2505 " };\n"
2506 "};\n\n",
2507 lttng_alignof(uint8_t) * CHAR_BIT,
2508 lttng_alignof(uint16_t) * CHAR_BIT,
2509 lttng_alignof(uint32_t) * CHAR_BIT,
2510 lttng_alignof(uint64_t) * CHAR_BIT,
2511 sizeof(unsigned long) * CHAR_BIT,
2512 lttng_alignof(unsigned long) * CHAR_BIT,
2513 CTF_SPEC_MAJOR,
2514 CTF_SPEC_MINOR,
2515 uuid_s,
2516 #if __BYTE_ORDER == __BIG_ENDIAN
2517 "be"
2518 #else
2519 "le"
2520 #endif
2521 );
2522 if (ret)
2523 goto end;
2524
2525 ret = lttng_metadata_printf(session,
2526 "env {\n"
2527 " hostname = \"%s\";\n"
2528 " domain = \"kernel\";\n"
2529 " sysname = \"%s\";\n"
2530 " kernel_release = \"%s\";\n"
2531 " kernel_version = \"%s\";\n"
2532 " tracer_name = \"lttng-modules\";\n"
2533 " tracer_major = %d;\n"
2534 " tracer_minor = %d;\n"
2535 " tracer_patchlevel = %d;\n"
2536 "};\n\n",
2537 current->nsproxy->uts_ns->name.nodename,
2538 utsname()->sysname,
2539 utsname()->release,
2540 utsname()->version,
2541 LTTNG_MODULES_MAJOR_VERSION,
2542 LTTNG_MODULES_MINOR_VERSION,
2543 LTTNG_MODULES_PATCHLEVEL_VERSION
2544 );
2545 if (ret)
2546 goto end;
2547
2548 ret = lttng_metadata_printf(session,
2549 "clock {\n"
2550 " name = \"%s\";\n",
2551 trace_clock_name()
2552 );
2553 if (ret)
2554 goto end;
2555
2556 if (!trace_clock_uuid(clock_uuid_s)) {
2557 ret = lttng_metadata_printf(session,
2558 " uuid = \"%s\";\n",
2559 clock_uuid_s
2560 );
2561 if (ret)
2562 goto end;
2563 }
2564
2565 ret = lttng_metadata_printf(session,
2566 " description = \"%s\";\n"
2567 " freq = %llu; /* Frequency, in Hz */\n"
2568 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
2569 " offset = %lld;\n"
2570 "};\n\n",
2571 trace_clock_description(),
2572 (unsigned long long) trace_clock_freq(),
2573 (long long) measure_clock_offset()
2574 );
2575 if (ret)
2576 goto end;
2577
2578 ret = lttng_metadata_printf(session,
2579 "typealias integer {\n"
2580 " size = 27; align = 1; signed = false;\n"
2581 " map = clock.%s.value;\n"
2582 "} := uint27_clock_monotonic_t;\n"
2583 "\n"
2584 "typealias integer {\n"
2585 " size = 32; align = %u; signed = false;\n"
2586 " map = clock.%s.value;\n"
2587 "} := uint32_clock_monotonic_t;\n"
2588 "\n"
2589 "typealias integer {\n"
2590 " size = 64; align = %u; signed = false;\n"
2591 " map = clock.%s.value;\n"
2592 "} := uint64_clock_monotonic_t;\n\n",
2593 trace_clock_name(),
2594 lttng_alignof(uint32_t) * CHAR_BIT,
2595 trace_clock_name(),
2596 lttng_alignof(uint64_t) * CHAR_BIT,
2597 trace_clock_name()
2598 );
2599 if (ret)
2600 goto end;
2601
2602 ret = _lttng_stream_packet_context_declare(session);
2603 if (ret)
2604 goto end;
2605
2606 ret = _lttng_event_header_declare(session);
2607 if (ret)
2608 goto end;
2609
2610 skip_session:
2611 list_for_each_entry(chan, &session->chan, list) {
2612 ret = _lttng_channel_metadata_statedump(session, chan);
2613 if (ret)
2614 goto end;
2615 }
2616
2617 list_for_each_entry(event, &session->events, list) {
2618 ret = _lttng_event_metadata_statedump(session, event->chan, event);
2619 if (ret)
2620 goto end;
2621 }
2622 session->metadata_dumped = 1;
2623 end:
2624 return ret;
2625 }
2626
2627 /**
2628 * lttng_transport_register - LTT transport registration
2629 * @transport: transport structure
2630 *
2631 * Registers a transport which can be used as output to extract the data out of
2632 * LTTng. The module calling this registration function must ensure that no
2633 * trap-inducing code will be executed by the transport functions. E.g.
2634 * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
2635 * is made visible to the transport function. This registration acts as a
2636 * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
2637 * after its registration must it synchronize the TLBs.
2638 */
2639 void lttng_transport_register(struct lttng_transport *transport)
2640 {
2641 /*
2642 * Make sure no page fault can be triggered by the module about to be
2643 * registered. We deal with this here so we don't have to call
2644 * vmalloc_sync_all() in each module's init.
2645 */
2646 wrapper_vmalloc_sync_all();
2647
2648 mutex_lock(&sessions_mutex);
2649 list_add_tail(&transport->node, &lttng_transport_list);
2650 mutex_unlock(&sessions_mutex);
2651 }
2652 EXPORT_SYMBOL_GPL(lttng_transport_register);
2653
2654 /**
2655 * lttng_transport_unregister - LTT transport unregistration
2656 * @transport: transport structure
2657 */
2658 void lttng_transport_unregister(struct lttng_transport *transport)
2659 {
2660 mutex_lock(&sessions_mutex);
2661 list_del(&transport->node);
2662 mutex_unlock(&sessions_mutex);
2663 }
2664 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
2665
2666 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
2667
2668 enum cpuhp_state lttng_hp_prepare;
2669 enum cpuhp_state lttng_hp_online;
2670
2671 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
2672 {
2673 struct lttng_cpuhp_node *lttng_node;
2674
2675 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2676 switch (lttng_node->component) {
2677 case LTTNG_RING_BUFFER_FRONTEND:
2678 return 0;
2679 case LTTNG_RING_BUFFER_BACKEND:
2680 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
2681 case LTTNG_RING_BUFFER_ITER:
2682 return 0;
2683 case LTTNG_CONTEXT_PERF_COUNTERS:
2684 return 0;
2685 default:
2686 return -EINVAL;
2687 }
2688 }
2689
2690 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
2691 {
2692 struct lttng_cpuhp_node *lttng_node;
2693
2694 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2695 switch (lttng_node->component) {
2696 case LTTNG_RING_BUFFER_FRONTEND:
2697 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
2698 case LTTNG_RING_BUFFER_BACKEND:
2699 return 0;
2700 case LTTNG_RING_BUFFER_ITER:
2701 return 0;
2702 case LTTNG_CONTEXT_PERF_COUNTERS:
2703 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
2704 default:
2705 return -EINVAL;
2706 }
2707 }
2708
2709 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
2710 {
2711 struct lttng_cpuhp_node *lttng_node;
2712
2713 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2714 switch (lttng_node->component) {
2715 case LTTNG_RING_BUFFER_FRONTEND:
2716 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
2717 case LTTNG_RING_BUFFER_BACKEND:
2718 return 0;
2719 case LTTNG_RING_BUFFER_ITER:
2720 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
2721 case LTTNG_CONTEXT_PERF_COUNTERS:
2722 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
2723 default:
2724 return -EINVAL;
2725 }
2726 }
2727
2728 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
2729 {
2730 struct lttng_cpuhp_node *lttng_node;
2731
2732 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
2733 switch (lttng_node->component) {
2734 case LTTNG_RING_BUFFER_FRONTEND:
2735 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
2736 case LTTNG_RING_BUFFER_BACKEND:
2737 return 0;
2738 case LTTNG_RING_BUFFER_ITER:
2739 return 0;
2740 case LTTNG_CONTEXT_PERF_COUNTERS:
2741 return 0;
2742 default:
2743 return -EINVAL;
2744 }
2745 }
2746
2747 static int __init lttng_init_cpu_hotplug(void)
2748 {
2749 int ret;
2750
2751 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
2752 lttng_hotplug_prepare,
2753 lttng_hotplug_dead);
2754 if (ret < 0) {
2755 return ret;
2756 }
2757 lttng_hp_prepare = ret;
2758 lttng_rb_set_hp_prepare(ret);
2759
2760 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
2761 lttng_hotplug_online,
2762 lttng_hotplug_offline);
2763 if (ret < 0) {
2764 cpuhp_remove_multi_state(lttng_hp_prepare);
2765 lttng_hp_prepare = 0;
2766 return ret;
2767 }
2768 lttng_hp_online = ret;
2769 lttng_rb_set_hp_online(ret);
2770
2771 return 0;
2772 }
2773
2774 static void __exit lttng_exit_cpu_hotplug(void)
2775 {
2776 lttng_rb_set_hp_online(0);
2777 cpuhp_remove_multi_state(lttng_hp_online);
2778 lttng_rb_set_hp_prepare(0);
2779 cpuhp_remove_multi_state(lttng_hp_prepare);
2780 }
2781
2782 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2783 static int lttng_init_cpu_hotplug(void)
2784 {
2785 return 0;
2786 }
2787 static void lttng_exit_cpu_hotplug(void)
2788 {
2789 }
2790 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
2791
2792
2793 static int __init lttng_events_init(void)
2794 {
2795 int ret;
2796
2797 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
2798 if (ret)
2799 return ret;
2800 ret = wrapper_get_pfnblock_flags_mask_init();
2801 if (ret)
2802 return ret;
2803 ret = wrapper_get_pageblock_flags_mask_init();
2804 if (ret)
2805 return ret;
2806 ret = lttng_probes_init();
2807 if (ret)
2808 return ret;
2809 ret = lttng_context_init();
2810 if (ret)
2811 return ret;
2812 ret = lttng_tracepoint_init();
2813 if (ret)
2814 goto error_tp;
2815 event_cache = KMEM_CACHE(lttng_event, 0);
2816 if (!event_cache) {
2817 ret = -ENOMEM;
2818 goto error_kmem;
2819 }
2820 ret = lttng_abi_init();
2821 if (ret)
2822 goto error_abi;
2823 ret = lttng_logger_init();
2824 if (ret)
2825 goto error_logger;
2826 ret = lttng_init_cpu_hotplug();
2827 if (ret)
2828 goto error_hotplug;
2829 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
2830 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2831 __stringify(LTTNG_MODULES_MINOR_VERSION),
2832 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2833 LTTNG_MODULES_EXTRAVERSION,
2834 LTTNG_VERSION_NAME,
2835 #ifdef LTTNG_EXTRA_VERSION_GIT
2836 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2837 #else
2838 "",
2839 #endif
2840 #ifdef LTTNG_EXTRA_VERSION_NAME
2841 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2842 #else
2843 "");
2844 #endif
2845 return 0;
2846
2847 error_hotplug:
2848 lttng_logger_exit();
2849 error_logger:
2850 lttng_abi_exit();
2851 error_abi:
2852 kmem_cache_destroy(event_cache);
2853 error_kmem:
2854 lttng_tracepoint_exit();
2855 error_tp:
2856 lttng_context_exit();
2857 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
2858 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2859 __stringify(LTTNG_MODULES_MINOR_VERSION),
2860 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2861 LTTNG_MODULES_EXTRAVERSION,
2862 LTTNG_VERSION_NAME,
2863 #ifdef LTTNG_EXTRA_VERSION_GIT
2864 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2865 #else
2866 "",
2867 #endif
2868 #ifdef LTTNG_EXTRA_VERSION_NAME
2869 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2870 #else
2871 "");
2872 #endif
2873 return ret;
2874 }
2875
2876 module_init(lttng_events_init);
2877
2878 static void __exit lttng_events_exit(void)
2879 {
2880 struct lttng_session *session, *tmpsession;
2881
2882 lttng_exit_cpu_hotplug();
2883 lttng_logger_exit();
2884 lttng_abi_exit();
2885 list_for_each_entry_safe(session, tmpsession, &sessions, list)
2886 lttng_session_destroy(session);
2887 kmem_cache_destroy(event_cache);
2888 lttng_tracepoint_exit();
2889 lttng_context_exit();
2890 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
2891 __stringify(LTTNG_MODULES_MAJOR_VERSION),
2892 __stringify(LTTNG_MODULES_MINOR_VERSION),
2893 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
2894 LTTNG_MODULES_EXTRAVERSION,
2895 LTTNG_VERSION_NAME,
2896 #ifdef LTTNG_EXTRA_VERSION_GIT
2897 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
2898 #else
2899 "",
2900 #endif
2901 #ifdef LTTNG_EXTRA_VERSION_NAME
2902 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
2903 #else
2904 "");
2905 #endif
2906 }
2907
2908 module_exit(lttng_events_exit);
2909
2910 #include "extra_version/patches.i"
2911 #ifdef LTTNG_EXTRA_VERSION_GIT
2912 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
2913 #endif
2914 #ifdef LTTNG_EXTRA_VERSION_NAME
2915 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
2916 #endif
2917 MODULE_LICENSE("GPL and additional rights");
2918 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
2919 MODULE_DESCRIPTION("LTTng Events");
2920 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
2921 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
2922 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
2923 LTTNG_MODULES_EXTRAVERSION);
This page took 0.142953 seconds and 4 git commands to generate.