Cleanup: Remove dead code in _lttng_kernel_event_create()
[lttng-modules.git] / src / lttng-events.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-events.c
4 *
5 * Holds LTTng per-session event registry.
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 /*
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
13 */
14 #include "wrapper/page_alloc.h"
15
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <wrapper/file.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/dmi.h>
30
31 #include <wrapper/compiler_attributes.h>
32 #include <wrapper/uuid.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
34 #include <wrapper/random.h>
35 #include <wrapper/tracepoint.h>
36 #include <wrapper/list.h>
37 #include <wrapper/types.h>
38 #include <lttng/kernel-version.h>
39 #include <lttng/events.h>
40 #include <lttng/events-internal.h>
41 #include <lttng/lttng-bytecode.h>
42 #include <lttng/tracer.h>
43 #include <lttng/event-notifier-notification.h>
44 #include <lttng/abi-old.h>
45 #include <lttng/endian.h>
46 #include <lttng/string-utils.h>
47 #include <lttng/utils.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
50 #include <wrapper/time.h>
51
52 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0))
53 #include <linux/stdarg.h>
54 #else
55 #include <stdarg.h>
56 #endif
57
58 #define METADATA_CACHE_DEFAULT_SIZE 4096
59
60 static LIST_HEAD(sessions);
61 static LIST_HEAD(event_notifier_groups);
62 static LIST_HEAD(lttng_transport_list);
63 static LIST_HEAD(lttng_counter_transport_list);
64 /*
65 * Protect the sessions and metadata caches.
66 */
67 static DEFINE_MUTEX(sessions_mutex);
68 static struct kmem_cache *event_recorder_cache;
69 static struct kmem_cache *event_recorder_private_cache;
70 static struct kmem_cache *event_notifier_cache;
71 static struct kmem_cache *event_notifier_private_cache;
72
73 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session);
74 static void lttng_session_sync_event_enablers(struct lttng_kernel_session *session);
75 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group);
76 static void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler);
77
78 static void _lttng_event_destroy(struct lttng_kernel_event_common *event);
79 static void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan);
80 static void _lttng_event_unregister(struct lttng_kernel_event_common *event);
81 static
82 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event);
83 static
84 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session);
85 static
86 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream);
87 static
88 int _lttng_type_statedump(struct lttng_kernel_session *session,
89 const struct lttng_kernel_type_common *type,
90 enum lttng_kernel_string_encoding parent_encoding,
91 size_t nesting);
92 static
93 int _lttng_field_statedump(struct lttng_kernel_session *session,
94 const struct lttng_kernel_event_field *field,
95 size_t nesting, const char **prev_field_name_p);
96
97 void synchronize_trace(void)
98 {
99 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
100 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
101 synchronize_rcu();
102 #else
103 synchronize_sched();
104 #endif
105
106 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
107 #ifdef CONFIG_PREEMPT_RT_FULL
108 synchronize_rcu();
109 #endif
110 #else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
111 #ifdef CONFIG_PREEMPT_RT
112 synchronize_rcu();
113 #endif
114 #endif /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) */
115 }
116
117 void lttng_lock_sessions(void)
118 {
119 mutex_lock(&sessions_mutex);
120 }
121
122 void lttng_unlock_sessions(void)
123 {
124 mutex_unlock(&sessions_mutex);
125 }
126
127 static struct lttng_transport *lttng_transport_find(const char *name)
128 {
129 struct lttng_transport *transport;
130
131 list_for_each_entry(transport, &lttng_transport_list, node) {
132 if (!strcmp(transport->name, name))
133 return transport;
134 }
135 return NULL;
136 }
137
138 /*
139 * Called with sessions lock held.
140 */
141 int lttng_session_active(void)
142 {
143 struct lttng_kernel_session_private *iter;
144
145 list_for_each_entry(iter, &sessions, list) {
146 if (iter->pub->active)
147 return 1;
148 }
149 return 0;
150 }
151
152 struct lttng_kernel_session *lttng_session_create(void)
153 {
154 struct lttng_kernel_session *session;
155 struct lttng_kernel_session_private *session_priv;
156 struct lttng_metadata_cache *metadata_cache;
157 int i;
158
159 mutex_lock(&sessions_mutex);
160 session = lttng_kvzalloc(sizeof(*session), GFP_KERNEL);
161 if (!session)
162 goto err;
163 session_priv = lttng_kvzalloc(sizeof(*session_priv), GFP_KERNEL);
164 if (!session_priv)
165 goto err_free_session;
166 session->priv = session_priv;
167 session_priv->pub = session;
168
169 INIT_LIST_HEAD(&session_priv->chan);
170 INIT_LIST_HEAD(&session_priv->events);
171 lttng_guid_gen(&session_priv->uuid);
172
173 metadata_cache = kzalloc(sizeof(struct lttng_metadata_cache),
174 GFP_KERNEL);
175 if (!metadata_cache)
176 goto err_free_session_private;
177 metadata_cache->data = vzalloc(METADATA_CACHE_DEFAULT_SIZE);
178 if (!metadata_cache->data)
179 goto err_free_cache;
180 metadata_cache->cache_alloc = METADATA_CACHE_DEFAULT_SIZE;
181 kref_init(&metadata_cache->refcount);
182 mutex_init(&metadata_cache->lock);
183 session_priv->metadata_cache = metadata_cache;
184 INIT_LIST_HEAD(&metadata_cache->metadata_stream);
185 memcpy(&metadata_cache->uuid, &session_priv->uuid,
186 sizeof(metadata_cache->uuid));
187 INIT_LIST_HEAD(&session_priv->enablers_head);
188 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
189 INIT_HLIST_HEAD(&session_priv->events_ht.table[i]);
190 list_add(&session_priv->list, &sessions);
191
192 if (lttng_id_tracker_init(&session->pid_tracker, session, TRACKER_PID))
193 goto tracker_alloc_error;
194 if (lttng_id_tracker_init(&session->vpid_tracker, session, TRACKER_VPID))
195 goto tracker_alloc_error;
196 if (lttng_id_tracker_init(&session->uid_tracker, session, TRACKER_UID))
197 goto tracker_alloc_error;
198 if (lttng_id_tracker_init(&session->vuid_tracker, session, TRACKER_VUID))
199 goto tracker_alloc_error;
200 if (lttng_id_tracker_init(&session->gid_tracker, session, TRACKER_GID))
201 goto tracker_alloc_error;
202 if (lttng_id_tracker_init(&session->vgid_tracker, session, TRACKER_VGID))
203 goto tracker_alloc_error;
204
205 mutex_unlock(&sessions_mutex);
206
207 return session;
208
209 tracker_alloc_error:
210 lttng_id_tracker_fini(&session->pid_tracker);
211 lttng_id_tracker_fini(&session->vpid_tracker);
212 lttng_id_tracker_fini(&session->uid_tracker);
213 lttng_id_tracker_fini(&session->vuid_tracker);
214 lttng_id_tracker_fini(&session->gid_tracker);
215 lttng_id_tracker_fini(&session->vgid_tracker);
216 err_free_cache:
217 kfree(metadata_cache);
218 err_free_session_private:
219 lttng_kvfree(session_priv);
220 err_free_session:
221 lttng_kvfree(session);
222 err:
223 mutex_unlock(&sessions_mutex);
224 return NULL;
225 }
226
227 static
228 struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
229 {
230 struct lttng_counter_transport *transport;
231
232 list_for_each_entry(transport, &lttng_counter_transport_list, node) {
233 if (!strcmp(transport->name, name))
234 return transport;
235 }
236 return NULL;
237 }
238
239 struct lttng_counter *lttng_kernel_counter_create(
240 const char *counter_transport_name,
241 size_t number_dimensions, const size_t *dimensions_sizes)
242 {
243 struct lttng_counter *counter = NULL;
244 struct lttng_counter_transport *counter_transport = NULL;
245
246 counter_transport = lttng_counter_transport_find(counter_transport_name);
247 if (!counter_transport) {
248 printk(KERN_WARNING "LTTng: counter transport %s not found.\n",
249 counter_transport_name);
250 goto notransport;
251 }
252 if (!try_module_get(counter_transport->owner)) {
253 printk(KERN_WARNING "LTTng: Can't lock counter transport module.\n");
254 goto notransport;
255 }
256
257 counter = lttng_kvzalloc(sizeof(struct lttng_counter), GFP_KERNEL);
258 if (!counter)
259 goto nomem;
260
261 /* Create event notifier error counter. */
262 counter->ops = &counter_transport->ops;
263 counter->transport = counter_transport;
264
265 counter->counter = counter->ops->counter_create(
266 number_dimensions, dimensions_sizes, 0);
267 if (!counter->counter) {
268 goto create_error;
269 }
270
271 return counter;
272
273 create_error:
274 lttng_kvfree(counter);
275 nomem:
276 if (counter_transport)
277 module_put(counter_transport->owner);
278 notransport:
279 return NULL;
280 }
281
282 struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
283 {
284 struct lttng_transport *transport = NULL;
285 struct lttng_event_notifier_group *event_notifier_group;
286 const char *transport_name = "relay-event-notifier";
287 size_t subbuf_size = 4096; //TODO
288 size_t num_subbuf = 16; //TODO
289 unsigned int switch_timer_interval = 0;
290 unsigned int read_timer_interval = 0;
291 int i;
292
293 mutex_lock(&sessions_mutex);
294
295 transport = lttng_transport_find(transport_name);
296 if (!transport) {
297 printk(KERN_WARNING "LTTng: transport %s not found\n",
298 transport_name);
299 goto notransport;
300 }
301 if (!try_module_get(transport->owner)) {
302 printk(KERN_WARNING "LTTng: Can't lock transport %s module.\n",
303 transport_name);
304 goto notransport;
305 }
306
307 event_notifier_group = lttng_kvzalloc(sizeof(struct lttng_event_notifier_group),
308 GFP_KERNEL);
309 if (!event_notifier_group)
310 goto nomem;
311
312 /*
313 * Initialize the ring buffer used to store event notifier
314 * notifications.
315 */
316 event_notifier_group->ops = &transport->ops;
317 event_notifier_group->chan = transport->ops.priv->channel_create(
318 transport_name, event_notifier_group, NULL,
319 subbuf_size, num_subbuf, switch_timer_interval,
320 read_timer_interval);
321 if (!event_notifier_group->chan)
322 goto create_error;
323
324 event_notifier_group->transport = transport;
325
326 INIT_LIST_HEAD(&event_notifier_group->enablers_head);
327 INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
328 for (i = 0; i < LTTNG_EVENT_HT_SIZE; i++)
329 INIT_HLIST_HEAD(&event_notifier_group->events_ht.table[i]);
330
331 list_add(&event_notifier_group->node, &event_notifier_groups);
332
333 mutex_unlock(&sessions_mutex);
334
335 return event_notifier_group;
336
337 create_error:
338 lttng_kvfree(event_notifier_group);
339 nomem:
340 if (transport)
341 module_put(transport->owner);
342 notransport:
343 mutex_unlock(&sessions_mutex);
344 return NULL;
345 }
346
347 void metadata_cache_destroy(struct kref *kref)
348 {
349 struct lttng_metadata_cache *cache =
350 container_of(kref, struct lttng_metadata_cache, refcount);
351 vfree(cache->data);
352 kfree(cache);
353 }
354
355 void lttng_session_destroy(struct lttng_kernel_session *session)
356 {
357 struct lttng_kernel_channel_buffer_private *chan_priv, *tmpchan_priv;
358 struct lttng_kernel_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
359 struct lttng_metadata_stream *metadata_stream;
360 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
361 int ret;
362
363 mutex_lock(&sessions_mutex);
364 WRITE_ONCE(session->active, 0);
365 list_for_each_entry(chan_priv, &session->priv->chan, node) {
366 ret = lttng_syscalls_unregister_syscall_table(&chan_priv->parent.syscall_table);
367 WARN_ON(ret);
368 }
369 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node)
370 _lttng_event_unregister(&event_recorder_priv->pub->parent);
371 synchronize_trace(); /* Wait for in-flight events to complete */
372 list_for_each_entry(chan_priv, &session->priv->chan, node) {
373 ret = lttng_syscalls_destroy_syscall_table(&chan_priv->parent.syscall_table);
374 WARN_ON(ret);
375 }
376 list_for_each_entry_safe(event_enabler, tmp_event_enabler, &session->priv->enablers_head, node)
377 lttng_event_enabler_destroy(event_enabler);
378 list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv, &session->priv->events, parent.node)
379 _lttng_event_destroy(&event_recorder_priv->pub->parent);
380 list_for_each_entry_safe(chan_priv, tmpchan_priv, &session->priv->chan, node) {
381 BUG_ON(chan_priv->channel_type == METADATA_CHANNEL);
382 _lttng_channel_destroy(chan_priv->pub);
383 }
384 mutex_lock(&session->priv->metadata_cache->lock);
385 list_for_each_entry(metadata_stream, &session->priv->metadata_cache->metadata_stream, list)
386 _lttng_metadata_channel_hangup(metadata_stream);
387 mutex_unlock(&session->priv->metadata_cache->lock);
388 lttng_id_tracker_fini(&session->pid_tracker);
389 lttng_id_tracker_fini(&session->vpid_tracker);
390 lttng_id_tracker_fini(&session->uid_tracker);
391 lttng_id_tracker_fini(&session->vuid_tracker);
392 lttng_id_tracker_fini(&session->gid_tracker);
393 lttng_id_tracker_fini(&session->vgid_tracker);
394 kref_put(&session->priv->metadata_cache->refcount, metadata_cache_destroy);
395 list_del(&session->priv->list);
396 mutex_unlock(&sessions_mutex);
397 lttng_kvfree(session->priv);
398 lttng_kvfree(session);
399 }
400
401 void lttng_event_notifier_group_destroy(
402 struct lttng_event_notifier_group *event_notifier_group)
403 {
404 struct lttng_event_enabler_common *event_enabler, *tmp_event_enabler;
405 struct lttng_kernel_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
406 int ret;
407
408 if (!event_notifier_group)
409 return;
410
411 mutex_lock(&sessions_mutex);
412
413 ret = lttng_syscalls_unregister_syscall_table(&event_notifier_group->syscall_table);
414 WARN_ON(ret);
415
416 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
417 &event_notifier_group->event_notifiers_head, parent.node)
418 _lttng_event_unregister(&event_notifier_priv->pub->parent);
419
420 /* Wait for in-flight event notifier to complete */
421 synchronize_trace();
422
423 irq_work_sync(&event_notifier_group->wakeup_pending);
424
425 ret = lttng_syscalls_destroy_syscall_table(&event_notifier_group->syscall_table);
426 WARN_ON(ret);
427
428 list_for_each_entry_safe(event_enabler, tmp_event_enabler,
429 &event_notifier_group->enablers_head, node)
430 lttng_event_enabler_destroy(event_enabler);
431
432 list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
433 &event_notifier_group->event_notifiers_head, parent.node)
434 _lttng_event_destroy(&event_notifier_priv->pub->parent);
435
436 if (event_notifier_group->error_counter) {
437 struct lttng_counter *error_counter = event_notifier_group->error_counter;
438
439 error_counter->ops->counter_destroy(error_counter->counter);
440 module_put(error_counter->transport->owner);
441 lttng_kvfree(error_counter);
442 event_notifier_group->error_counter = NULL;
443 }
444
445 event_notifier_group->ops->priv->channel_destroy(event_notifier_group->chan);
446 module_put(event_notifier_group->transport->owner);
447 list_del(&event_notifier_group->node);
448
449 mutex_unlock(&sessions_mutex);
450 lttng_kvfree(event_notifier_group);
451 }
452
453 int lttng_session_statedump(struct lttng_kernel_session *session)
454 {
455 int ret;
456
457 mutex_lock(&sessions_mutex);
458 ret = lttng_statedump_start(session);
459 mutex_unlock(&sessions_mutex);
460 return ret;
461 }
462
463 int lttng_session_enable(struct lttng_kernel_session *session)
464 {
465 int ret = 0;
466 struct lttng_kernel_channel_buffer_private *chan_priv;
467
468 mutex_lock(&sessions_mutex);
469 if (session->active) {
470 ret = -EBUSY;
471 goto end;
472 }
473
474 /* Set transient enabler state to "enabled" */
475 session->priv->tstate = 1;
476
477 /* We need to sync enablers with session before activation. */
478 lttng_session_sync_event_enablers(session);
479
480 /*
481 * Snapshot the number of events per channel to know the type of header
482 * we need to use.
483 */
484 list_for_each_entry(chan_priv, &session->priv->chan, node) {
485 if (chan_priv->header_type)
486 continue; /* don't change it if session stop/restart */
487 if (chan_priv->free_event_id < 31)
488 chan_priv->header_type = 1; /* compact */
489 else
490 chan_priv->header_type = 2; /* large */
491 }
492
493 /* Clear each stream's quiescent state. */
494 list_for_each_entry(chan_priv, &session->priv->chan, node) {
495 if (chan_priv->channel_type != METADATA_CHANNEL)
496 lib_ring_buffer_clear_quiescent_channel(chan_priv->rb_chan);
497 }
498
499 WRITE_ONCE(session->active, 1);
500 WRITE_ONCE(session->priv->been_active, 1);
501 ret = _lttng_session_metadata_statedump(session);
502 if (ret) {
503 WRITE_ONCE(session->active, 0);
504 goto end;
505 }
506 ret = lttng_statedump_start(session);
507 if (ret)
508 WRITE_ONCE(session->active, 0);
509 end:
510 mutex_unlock(&sessions_mutex);
511 return ret;
512 }
513
514 int lttng_session_disable(struct lttng_kernel_session *session)
515 {
516 int ret = 0;
517 struct lttng_kernel_channel_buffer_private *chan_priv;
518
519 mutex_lock(&sessions_mutex);
520 if (!session->active) {
521 ret = -EBUSY;
522 goto end;
523 }
524 WRITE_ONCE(session->active, 0);
525
526 /* Set transient enabler state to "disabled" */
527 session->priv->tstate = 0;
528 lttng_session_sync_event_enablers(session);
529
530 /* Set each stream's quiescent state. */
531 list_for_each_entry(chan_priv, &session->priv->chan, node) {
532 if (chan_priv->channel_type != METADATA_CHANNEL)
533 lib_ring_buffer_set_quiescent_channel(chan_priv->rb_chan);
534 }
535 end:
536 mutex_unlock(&sessions_mutex);
537 return ret;
538 }
539
540 int lttng_session_metadata_regenerate(struct lttng_kernel_session *session)
541 {
542 int ret = 0;
543 struct lttng_kernel_channel_buffer_private *chan_priv;
544 struct lttng_kernel_event_recorder_private *event_recorder_priv;
545 struct lttng_metadata_cache *cache = session->priv->metadata_cache;
546 struct lttng_metadata_stream *stream;
547
548 mutex_lock(&sessions_mutex);
549 if (!session->active) {
550 ret = -EBUSY;
551 goto end;
552 }
553
554 mutex_lock(&cache->lock);
555 memset(cache->data, 0, cache->cache_alloc);
556 cache->metadata_written = 0;
557 cache->version++;
558 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list) {
559 stream->metadata_out = 0;
560 stream->metadata_in = 0;
561 }
562 mutex_unlock(&cache->lock);
563
564 session->priv->metadata_dumped = 0;
565 list_for_each_entry(chan_priv, &session->priv->chan, node) {
566 chan_priv->metadata_dumped = 0;
567 }
568
569 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
570 event_recorder_priv->metadata_dumped = 0;
571 }
572
573 ret = _lttng_session_metadata_statedump(session);
574
575 end:
576 mutex_unlock(&sessions_mutex);
577 return ret;
578 }
579
580 static
581 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common *channel)
582 {
583 struct lttng_kernel_channel_buffer *chan_buf;
584
585 if (channel->type != LTTNG_KERNEL_CHANNEL_TYPE_BUFFER)
586 return false;
587 chan_buf = container_of(channel, struct lttng_kernel_channel_buffer, parent);
588 if (chan_buf->priv->channel_type == METADATA_CHANNEL)
589 return true;
590 return false;
591 }
592
593 int lttng_channel_enable(struct lttng_kernel_channel_common *channel)
594 {
595 int ret = 0;
596
597 mutex_lock(&sessions_mutex);
598 if (is_channel_buffer_metadata(channel)) {
599 ret = -EPERM;
600 goto end;
601 }
602 if (channel->enabled) {
603 ret = -EEXIST;
604 goto end;
605 }
606 /* Set transient enabler state to "enabled" */
607 channel->priv->tstate = 1;
608 lttng_session_sync_event_enablers(channel->session);
609 /* Set atomically the state to "enabled" */
610 WRITE_ONCE(channel->enabled, 1);
611 end:
612 mutex_unlock(&sessions_mutex);
613 return ret;
614 }
615
616 int lttng_channel_disable(struct lttng_kernel_channel_common *channel)
617 {
618 int ret = 0;
619
620 mutex_lock(&sessions_mutex);
621 if (is_channel_buffer_metadata(channel)) {
622 ret = -EPERM;
623 goto end;
624 }
625 if (!channel->enabled) {
626 ret = -EEXIST;
627 goto end;
628 }
629 /* Set atomically the state to "disabled" */
630 WRITE_ONCE(channel->enabled, 0);
631 /* Set transient enabler state to "enabled" */
632 channel->priv->tstate = 0;
633 lttng_session_sync_event_enablers(channel->session);
634 end:
635 mutex_unlock(&sessions_mutex);
636 return ret;
637 }
638
639 int lttng_event_enable(struct lttng_kernel_event_common *event)
640 {
641 int ret = 0;
642
643 mutex_lock(&sessions_mutex);
644 switch (event->type) {
645 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
646 {
647 struct lttng_kernel_event_recorder *event_recorder =
648 container_of(event, struct lttng_kernel_event_recorder, parent);
649
650 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
651 ret = -EPERM;
652 goto end;
653 }
654 break;
655 }
656 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
657 switch (event->priv->instrumentation) {
658 case LTTNG_KERNEL_ABI_KRETPROBE:
659 ret = -EINVAL;
660 goto end;
661 default:
662 break;
663 }
664 break;
665 default:
666 break;
667 }
668
669 if (event->enabled) {
670 ret = -EEXIST;
671 goto end;
672 }
673 switch (event->priv->instrumentation) {
674 case LTTNG_KERNEL_ABI_TRACEPOINT:
675 lttng_fallthrough;
676 case LTTNG_KERNEL_ABI_SYSCALL:
677 ret = -EINVAL;
678 break;
679
680 case LTTNG_KERNEL_ABI_KPROBE:
681 lttng_fallthrough;
682 case LTTNG_KERNEL_ABI_UPROBE:
683 WRITE_ONCE(event->enabled, 1);
684 break;
685
686 case LTTNG_KERNEL_ABI_KRETPROBE:
687 ret = lttng_kretprobes_event_enable_state(event, 1);
688 break;
689
690 case LTTNG_KERNEL_ABI_FUNCTION:
691 lttng_fallthrough;
692 case LTTNG_KERNEL_ABI_NOOP:
693 lttng_fallthrough;
694 default:
695 WARN_ON_ONCE(1);
696 ret = -EINVAL;
697 }
698 end:
699 mutex_unlock(&sessions_mutex);
700 return ret;
701 }
702
703 int lttng_event_disable(struct lttng_kernel_event_common *event)
704 {
705 int ret = 0;
706
707 mutex_lock(&sessions_mutex);
708 switch (event->type) {
709 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
710 {
711 struct lttng_kernel_event_recorder *event_recorder =
712 container_of(event, struct lttng_kernel_event_recorder, parent);
713
714 if (event_recorder->chan->priv->channel_type == METADATA_CHANNEL) {
715 ret = -EPERM;
716 goto end;
717 }
718 break;
719 }
720 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
721 switch (event->priv->instrumentation) {
722 case LTTNG_KERNEL_ABI_KRETPROBE:
723 ret = -EINVAL;
724 goto end;
725 default:
726 break;
727 }
728 break;
729 default:
730 break;
731 }
732
733 if (!event->enabled) {
734 ret = -EEXIST;
735 goto end;
736 }
737 switch (event->priv->instrumentation) {
738 case LTTNG_KERNEL_ABI_TRACEPOINT:
739 lttng_fallthrough;
740 case LTTNG_KERNEL_ABI_SYSCALL:
741 ret = -EINVAL;
742 break;
743
744 case LTTNG_KERNEL_ABI_KPROBE:
745 lttng_fallthrough;
746 case LTTNG_KERNEL_ABI_UPROBE:
747 WRITE_ONCE(event->enabled, 0);
748 break;
749
750 case LTTNG_KERNEL_ABI_KRETPROBE:
751 ret = lttng_kretprobes_event_enable_state(event, 0);
752 break;
753
754 case LTTNG_KERNEL_ABI_FUNCTION:
755 lttng_fallthrough;
756 case LTTNG_KERNEL_ABI_NOOP:
757 lttng_fallthrough;
758 default:
759 WARN_ON_ONCE(1);
760 ret = -EINVAL;
761 }
762 end:
763 mutex_unlock(&sessions_mutex);
764 return ret;
765 }
766
767 struct lttng_kernel_channel_buffer *lttng_channel_buffer_create(struct lttng_kernel_session *session,
768 const char *transport_name,
769 void *buf_addr,
770 size_t subbuf_size, size_t num_subbuf,
771 unsigned int switch_timer_interval,
772 unsigned int read_timer_interval,
773 enum channel_type channel_type)
774 {
775 struct lttng_kernel_channel_buffer *chan;
776 struct lttng_kernel_channel_buffer_private *chan_priv;
777 struct lttng_transport *transport = NULL;
778
779 mutex_lock(&sessions_mutex);
780 if (session->priv->been_active && channel_type != METADATA_CHANNEL)
781 goto active; /* Refuse to add channel to active session */
782 transport = lttng_transport_find(transport_name);
783 if (!transport) {
784 printk(KERN_WARNING "LTTng: transport %s not found\n",
785 transport_name);
786 goto notransport;
787 }
788 if (!try_module_get(transport->owner)) {
789 printk(KERN_WARNING "LTTng: Can't lock transport module.\n");
790 goto notransport;
791 }
792 chan = kzalloc(sizeof(struct lttng_kernel_channel_buffer), GFP_KERNEL);
793 if (!chan)
794 goto nomem;
795 chan_priv = kzalloc(sizeof(struct lttng_kernel_channel_buffer_private), GFP_KERNEL);
796 if (!chan_priv)
797 goto nomem_priv;
798 chan->priv = chan_priv;
799 chan_priv->pub = chan;
800 chan->parent.type = LTTNG_KERNEL_CHANNEL_TYPE_BUFFER;
801 chan->parent.session = session;
802 chan->priv->id = session->priv->free_chan_id++;
803 chan->ops = &transport->ops;
804 /*
805 * Note: the channel creation op already writes into the packet
806 * headers. Therefore the "chan" information used as input
807 * should be already accessible.
808 */
809 chan->priv->rb_chan = transport->ops.priv->channel_create(transport_name,
810 chan, buf_addr, subbuf_size, num_subbuf,
811 switch_timer_interval, read_timer_interval);
812 if (!chan->priv->rb_chan)
813 goto create_error;
814 chan->priv->parent.tstate = 1;
815 chan->parent.enabled = 1;
816 chan->priv->transport = transport;
817 chan->priv->channel_type = channel_type;
818 list_add(&chan->priv->node, &session->priv->chan);
819 mutex_unlock(&sessions_mutex);
820 return chan;
821
822 create_error:
823 kfree(chan_priv);
824 nomem_priv:
825 kfree(chan);
826 nomem:
827 if (transport)
828 module_put(transport->owner);
829 notransport:
830 active:
831 mutex_unlock(&sessions_mutex);
832 return NULL;
833 }
834
835 /*
836 * Only used internally at session destruction for per-cpu channels, and
837 * when metadata channel is released.
838 * Needs to be called with sessions mutex held.
839 */
840 static
841 void _lttng_channel_destroy(struct lttng_kernel_channel_buffer *chan)
842 {
843 chan->ops->priv->channel_destroy(chan->priv->rb_chan);
844 module_put(chan->priv->transport->owner);
845 list_del(&chan->priv->node);
846 lttng_kernel_destroy_context(chan->priv->ctx);
847 kfree(chan->priv);
848 kfree(chan);
849 }
850
851 void lttng_metadata_channel_destroy(struct lttng_kernel_channel_buffer *chan)
852 {
853 BUG_ON(chan->priv->channel_type != METADATA_CHANNEL);
854
855 /* Protect the metadata cache with the sessions_mutex. */
856 mutex_lock(&sessions_mutex);
857 _lttng_channel_destroy(chan);
858 mutex_unlock(&sessions_mutex);
859 }
860 EXPORT_SYMBOL_GPL(lttng_metadata_channel_destroy);
861
862 static
863 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream *stream)
864 {
865 stream->finalized = 1;
866 wake_up_interruptible(&stream->read_wait);
867 }
868
869 static
870 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common *event_enabler)
871 {
872 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
873 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
874
875 switch (event_enabler->enabler_type) {
876 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
877 {
878 struct lttng_event_recorder_enabler *event_recorder_enabler =
879 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
880 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
881
882 switch (itype) {
883 case LTTNG_KERNEL_ABI_TRACEPOINT:
884 lttng_fallthrough;
885 case LTTNG_KERNEL_ABI_KPROBE:
886 lttng_fallthrough;
887 case LTTNG_KERNEL_ABI_SYSCALL:
888 lttng_fallthrough;
889 case LTTNG_KERNEL_ABI_UPROBE:
890 if (chan->priv->free_event_id == -1U)
891 return false;
892 return true;
893 case LTTNG_KERNEL_ABI_KRETPROBE:
894 /* kretprobes require 2 event IDs. */
895 if (chan->priv->free_event_id >= -2U)
896 return false;
897 return true;
898 default:
899 WARN_ON_ONCE(1);
900 return false;
901 }
902 }
903 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
904 return true;
905 default:
906 WARN_ON_ONCE(1);
907 return false;
908 }
909 }
910
911 static
912 struct lttng_kernel_event_common *lttng_kernel_event_alloc(struct lttng_event_enabler_common *event_enabler)
913 {
914 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
915 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
916
917 switch (event_enabler->enabler_type) {
918 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
919 {
920 struct lttng_event_recorder_enabler *event_recorder_enabler =
921 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
922 struct lttng_kernel_event_recorder *event_recorder;
923 struct lttng_kernel_event_recorder_private *event_recorder_priv;
924 struct lttng_kernel_channel_buffer *chan = event_recorder_enabler->chan;
925
926 event_recorder = kmem_cache_zalloc(event_recorder_cache, GFP_KERNEL);
927 if (!event_recorder)
928 return NULL;
929 event_recorder_priv = kmem_cache_zalloc(event_recorder_private_cache, GFP_KERNEL);
930 if (!event_recorder_priv) {
931 kmem_cache_free(event_recorder_private_cache, event_recorder);
932 return NULL;
933 }
934 event_recorder_priv->pub = event_recorder;
935 event_recorder_priv->parent.pub = &event_recorder->parent;
936 event_recorder->priv = event_recorder_priv;
937 event_recorder->parent.priv = &event_recorder_priv->parent;
938
939 event_recorder->parent.type = LTTNG_KERNEL_EVENT_TYPE_RECORDER;
940 event_recorder->parent.run_filter = lttng_kernel_interpret_event_filter;
941 event_recorder->priv->parent.instrumentation = itype;
942 INIT_LIST_HEAD(&event_recorder->priv->parent.filter_bytecode_runtime_head);
943 INIT_LIST_HEAD(&event_recorder->priv->parent.enablers_ref_head);
944
945 event_recorder->chan = chan;
946 event_recorder->priv->id = chan->priv->free_event_id++;
947 return &event_recorder->parent;
948 }
949 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
950 {
951 struct lttng_event_notifier_enabler *event_notifier_enabler =
952 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
953 struct lttng_kernel_event_notifier *event_notifier;
954 struct lttng_kernel_event_notifier_private *event_notifier_priv;
955
956 event_notifier = kmem_cache_zalloc(event_notifier_cache, GFP_KERNEL);
957 if (!event_notifier)
958 return NULL;
959 event_notifier_priv = kmem_cache_zalloc(event_notifier_private_cache, GFP_KERNEL);
960 if (!event_notifier_priv) {
961 kmem_cache_free(event_notifier_private_cache, event_notifier);
962 return NULL;
963 }
964 event_notifier_priv->pub = event_notifier;
965 event_notifier_priv->parent.pub = &event_notifier->parent;
966 event_notifier->priv = event_notifier_priv;
967 event_notifier->parent.priv = &event_notifier_priv->parent;
968
969 event_notifier->parent.type = LTTNG_KERNEL_EVENT_TYPE_NOTIFIER;
970 event_notifier->parent.run_filter = lttng_kernel_interpret_event_filter;
971 event_notifier->priv->parent.instrumentation = itype;
972 event_notifier->priv->parent.user_token = event_enabler->user_token;
973 INIT_LIST_HEAD(&event_notifier->priv->parent.filter_bytecode_runtime_head);
974 INIT_LIST_HEAD(&event_notifier->priv->parent.enablers_ref_head);
975
976 event_notifier->priv->group = event_notifier_enabler->group;
977 event_notifier->priv->error_counter_index = event_notifier_enabler->error_counter_index;
978 event_notifier->priv->num_captures = 0;
979 event_notifier->notification_send = lttng_event_notifier_notification_send;
980 INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
981 return &event_notifier->parent;
982 }
983 default:
984 return NULL;
985 }
986 }
987
988 static
989 void lttng_kernel_event_free(struct lttng_kernel_event_common *event)
990 {
991 switch (event->type) {
992 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
993 {
994 struct lttng_kernel_event_recorder *event_recorder =
995 container_of(event, struct lttng_kernel_event_recorder, parent);
996
997 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
998 kmem_cache_free(event_recorder_cache, event_recorder);
999 break;
1000 }
1001 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1002 {
1003 struct lttng_kernel_event_notifier *event_notifier =
1004 container_of(event, struct lttng_kernel_event_notifier, parent);
1005
1006 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1007 kmem_cache_free(event_notifier_cache, event_notifier);
1008 break;
1009 }
1010 default:
1011 WARN_ON_ONCE(1);
1012 }
1013 }
1014
1015 static
1016 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common *event)
1017 {
1018 switch (event->type) {
1019 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1020 return 0;
1021 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1022 {
1023 struct lttng_kernel_event_notifier *event_notifier =
1024 container_of(event, struct lttng_kernel_event_notifier, parent);
1025 struct lttng_counter *error_counter;
1026 struct lttng_event_notifier_group *event_notifier_group = event_notifier->priv->group;
1027 size_t dimension_index[1];
1028 int ret;
1029
1030 /*
1031 * Clear the error counter bucket. The sessiond keeps track of which
1032 * bucket is currently in use. We trust it. The session lock
1033 * synchronizes against concurrent creation of the error
1034 * counter.
1035 */
1036 error_counter = event_notifier_group->error_counter;
1037 if (!error_counter)
1038 return 0;
1039 /*
1040 * Check that the index is within the boundary of the counter.
1041 */
1042 if (event_notifier->priv->error_counter_index >= event_notifier_group->error_counter_len) {
1043 printk(KERN_INFO "LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1044 event_notifier_group->error_counter_len, event_notifier->priv->error_counter_index);
1045 return -EINVAL;
1046 }
1047
1048 dimension_index[0] = event_notifier->priv->error_counter_index;
1049 ret = error_counter->ops->counter_clear(error_counter->counter, dimension_index);
1050 if (ret) {
1051 printk(KERN_INFO "LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1052 event_notifier->priv->error_counter_index);
1053 return -EINVAL;
1054 }
1055 return 0;
1056 }
1057 default:
1058 return -EINVAL;
1059 }
1060 }
1061
1062 /*
1063 * Supports event creation while tracing session is active.
1064 * Needs to be called with sessions mutex held.
1065 */
1066 struct lttng_kernel_event_common *_lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1067 const struct lttng_kernel_event_desc *event_desc)
1068 {
1069 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
1070 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
1071 struct lttng_kernel_abi_event *event_param = &event_enabler->event_param;
1072 enum lttng_kernel_abi_instrumentation itype = event_param->instrumentation;
1073 struct lttng_kernel_event_common_private *event_priv;
1074 struct lttng_kernel_event_common *event;
1075 const char *event_name;
1076 struct hlist_head *head;
1077 int ret;
1078
1079 if (!lttng_kernel_event_id_available(event_enabler)) {
1080 ret = -EMFILE;
1081 goto full;
1082 }
1083
1084 switch (itype) {
1085 case LTTNG_KERNEL_ABI_TRACEPOINT:
1086 event_name = event_desc->event_name;
1087 break;
1088
1089 case LTTNG_KERNEL_ABI_KPROBE:
1090 lttng_fallthrough;
1091 case LTTNG_KERNEL_ABI_UPROBE:
1092 lttng_fallthrough;
1093 case LTTNG_KERNEL_ABI_KRETPROBE:
1094 lttng_fallthrough;
1095 case LTTNG_KERNEL_ABI_SYSCALL:
1096 event_name = event_param->name;
1097 break;
1098
1099 case LTTNG_KERNEL_ABI_FUNCTION:
1100 lttng_fallthrough;
1101 case LTTNG_KERNEL_ABI_NOOP:
1102 lttng_fallthrough;
1103 default:
1104 WARN_ON_ONCE(1);
1105 ret = -EINVAL;
1106 goto type_error;
1107 }
1108
1109 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, event_name);
1110 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
1111 if (lttng_event_enabler_event_name_match_event(event_enabler, event_name, event_priv->pub)) {
1112 ret = -EEXIST;
1113 goto exist;
1114 }
1115 }
1116
1117 event = lttng_kernel_event_alloc(event_enabler);
1118 if (!event) {
1119 ret = -ENOMEM;
1120 goto alloc_error;
1121 }
1122
1123 switch (itype) {
1124 case LTTNG_KERNEL_ABI_TRACEPOINT:
1125 /* Event will be enabled by enabler sync. */
1126 event->enabled = 0;
1127 event->priv->registered = 0;
1128 event->priv->desc = lttng_event_desc_get(event_name);
1129 if (!event->priv->desc) {
1130 ret = -ENOENT;
1131 goto register_error;
1132 }
1133 /* Populate lttng_event structure before event registration. */
1134 smp_wmb();
1135 break;
1136
1137 case LTTNG_KERNEL_ABI_KPROBE:
1138 /*
1139 * Needs to be explicitly enabled after creation, since
1140 * we may want to apply filters.
1141 */
1142 event->enabled = 0;
1143 event->priv->registered = 1;
1144 /*
1145 * Populate lttng_event structure before event
1146 * registration.
1147 */
1148 smp_wmb();
1149 ret = lttng_kprobes_register_event(event_name,
1150 event_param->u.kprobe.symbol_name,
1151 event_param->u.kprobe.offset,
1152 event_param->u.kprobe.addr,
1153 event);
1154 if (ret) {
1155 ret = -EINVAL;
1156 goto register_error;
1157 }
1158 ret = try_module_get(event->priv->desc->owner);
1159 WARN_ON_ONCE(!ret);
1160 break;
1161
1162 case LTTNG_KERNEL_ABI_KRETPROBE:
1163 {
1164 struct lttng_kernel_event_common *event_return;
1165
1166 /* kretprobe defines 2 events */
1167 /*
1168 * Needs to be explicitly enabled after creation, since
1169 * we may want to apply filters.
1170 */
1171 event->enabled = 0;
1172 event->priv->registered = 1;
1173
1174 event_return = lttng_kernel_event_alloc(event_enabler);
1175 if (!event) {
1176 ret = -ENOMEM;
1177 goto alloc_error;
1178 }
1179
1180 event_return->enabled = 0;
1181 event_return->priv->registered = 1;
1182
1183 /*
1184 * Populate lttng_event structure before kretprobe registration.
1185 */
1186 smp_wmb();
1187 ret = lttng_kretprobes_register(event_name,
1188 event_param->u.kretprobe.symbol_name,
1189 event_param->u.kretprobe.offset,
1190 event_param->u.kretprobe.addr,
1191 event, event_return);
1192 if (ret) {
1193 lttng_kernel_event_free(event_return);
1194 ret = -EINVAL;
1195 goto register_error;
1196 }
1197 /* Take 2 refs on the module: one per event. */
1198 ret = try_module_get(event->priv->desc->owner);
1199 WARN_ON_ONCE(!ret);
1200 ret = try_module_get(event_return->priv->desc->owner);
1201 WARN_ON_ONCE(!ret);
1202 ret = _lttng_event_recorder_metadata_statedump(event_return);
1203 WARN_ON_ONCE(ret > 0);
1204 if (ret) {
1205 lttng_kernel_event_free(event_return);
1206 module_put(event_return->priv->desc->owner);
1207 module_put(event->priv->desc->owner);
1208 goto statedump_error;
1209 }
1210 list_add(&event_return->priv->node, event_list_head);
1211 break;
1212 }
1213
1214 case LTTNG_KERNEL_ABI_SYSCALL:
1215 /*
1216 * Needs to be explicitly enabled after creation, since
1217 * we may want to apply filters.
1218 */
1219 event->enabled = 0;
1220 event->priv->registered = 0;
1221 event->priv->desc = event_desc;
1222 switch (event_param->u.syscall.entryexit) {
1223 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1224 ret = -EINVAL;
1225 goto register_error;
1226 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1227 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_ENTRY;
1228 break;
1229 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1230 event->priv->u.syscall.entryexit = LTTNG_SYSCALL_EXIT;
1231 break;
1232 }
1233 switch (event_param->u.syscall.abi) {
1234 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1235 ret = -EINVAL;
1236 goto register_error;
1237 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1238 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_NATIVE;
1239 break;
1240 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1241 event->priv->u.syscall.abi = LTTNG_SYSCALL_ABI_COMPAT;
1242 break;
1243 }
1244 if (!event->priv->desc) {
1245 ret = -EINVAL;
1246 goto register_error;
1247 }
1248 break;
1249
1250 case LTTNG_KERNEL_ABI_UPROBE:
1251 /*
1252 * Needs to be explicitly enabled after creation, since
1253 * we may want to apply filters.
1254 */
1255 event->enabled = 0;
1256 event->priv->registered = 1;
1257
1258 /*
1259 * Populate lttng_event structure before event
1260 * registration.
1261 */
1262 smp_wmb();
1263
1264 ret = lttng_uprobes_register_event(event_param->name,
1265 event_param->u.uprobe.fd,
1266 event);
1267 if (ret)
1268 goto register_error;
1269 ret = try_module_get(event->priv->desc->owner);
1270 WARN_ON_ONCE(!ret);
1271 break;
1272
1273 default:
1274 WARN_ON_ONCE(1);
1275 ret = -EINVAL;
1276 goto register_error;
1277 }
1278
1279 ret = _lttng_event_recorder_metadata_statedump(event);
1280 WARN_ON_ONCE(ret > 0);
1281 if (ret) {
1282 goto statedump_error;
1283 }
1284
1285 ret = lttng_kernel_event_notifier_clear_error_counter(event);
1286 if (ret)
1287 goto register_error;
1288
1289 hlist_add_head(&event->priv->hlist_node, head);
1290 list_add(&event->priv->node, event_list_head);
1291
1292 return event;
1293
1294 statedump_error:
1295 /* If a statedump error occurs, events will not be readable. */
1296 register_error:
1297 lttng_kernel_event_free(event);
1298 alloc_error:
1299 exist:
1300 type_error:
1301 full:
1302 return ERR_PTR(ret);
1303 }
1304
1305 struct lttng_kernel_event_common *lttng_kernel_event_create(struct lttng_event_enabler_common *event_enabler,
1306 const struct lttng_kernel_event_desc *event_desc)
1307 {
1308 struct lttng_kernel_event_common *event;
1309
1310 mutex_lock(&sessions_mutex);
1311 event = _lttng_kernel_event_create(event_enabler, event_desc);
1312 mutex_unlock(&sessions_mutex);
1313 return event;
1314 }
1315
1316 int lttng_kernel_counter_read(struct lttng_counter *counter,
1317 const size_t *dim_indexes, int32_t cpu,
1318 int64_t *val, bool *overflow, bool *underflow)
1319 {
1320 return counter->ops->counter_read(counter->counter, dim_indexes,
1321 cpu, val, overflow, underflow);
1322 }
1323
1324 int lttng_kernel_counter_aggregate(struct lttng_counter *counter,
1325 const size_t *dim_indexes, int64_t *val,
1326 bool *overflow, bool *underflow)
1327 {
1328 return counter->ops->counter_aggregate(counter->counter, dim_indexes,
1329 val, overflow, underflow);
1330 }
1331
1332 int lttng_kernel_counter_clear(struct lttng_counter *counter,
1333 const size_t *dim_indexes)
1334 {
1335 return counter->ops->counter_clear(counter->counter, dim_indexes);
1336 }
1337
1338 /* Only used for tracepoints and system calls for now. */
1339 static
1340 void register_event(struct lttng_kernel_event_common *event)
1341 {
1342 const struct lttng_kernel_event_desc *desc;
1343 int ret = -EINVAL;
1344
1345 WARN_ON_ONCE(event->priv->registered);
1346
1347 desc = event->priv->desc;
1348 switch (event->priv->instrumentation) {
1349 case LTTNG_KERNEL_ABI_TRACEPOINT:
1350 ret = lttng_wrapper_tracepoint_probe_register(desc->event_kname,
1351 desc->tp_class->probe_callback,
1352 event);
1353 break;
1354
1355 case LTTNG_KERNEL_ABI_SYSCALL:
1356 ret = lttng_syscall_filter_enable_event(event);
1357 break;
1358
1359 case LTTNG_KERNEL_ABI_KPROBE:
1360 lttng_fallthrough;
1361 case LTTNG_KERNEL_ABI_UPROBE:
1362 ret = 0;
1363 break;
1364
1365 case LTTNG_KERNEL_ABI_KRETPROBE:
1366 switch (event->type) {
1367 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1368 ret = 0;
1369 break;
1370 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1371 WARN_ON_ONCE(1);
1372 break;
1373 }
1374 break;
1375
1376 case LTTNG_KERNEL_ABI_FUNCTION:
1377 lttng_fallthrough;
1378 case LTTNG_KERNEL_ABI_NOOP:
1379 lttng_fallthrough;
1380 default:
1381 WARN_ON_ONCE(1);
1382 }
1383 WARN_ON_ONCE(ret);
1384 if (!ret)
1385 event->priv->registered = 1;
1386 }
1387
1388 static
1389 void unregister_event(struct lttng_kernel_event_common *event)
1390 {
1391 struct lttng_kernel_event_common_private *event_priv = event->priv;
1392 const struct lttng_kernel_event_desc *desc;
1393 int ret = -EINVAL;
1394
1395 WARN_ON_ONCE(!event->priv->registered);
1396
1397 desc = event_priv->desc;
1398 switch (event_priv->instrumentation) {
1399 case LTTNG_KERNEL_ABI_TRACEPOINT:
1400 ret = lttng_wrapper_tracepoint_probe_unregister(event_priv->desc->event_kname,
1401 event_priv->desc->tp_class->probe_callback,
1402 event);
1403 break;
1404
1405 case LTTNG_KERNEL_ABI_KPROBE:
1406 lttng_kprobes_unregister_event(event);
1407 ret = 0;
1408 break;
1409
1410 case LTTNG_KERNEL_ABI_KRETPROBE:
1411 switch (event->type) {
1412 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1413 lttng_kretprobes_unregister(event);
1414 ret = 0;
1415 break;
1416 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1417 WARN_ON_ONCE(1);
1418 break;
1419 }
1420 break;
1421
1422 case LTTNG_KERNEL_ABI_SYSCALL:
1423 ret = lttng_syscall_filter_disable_event(event);
1424 break;
1425
1426 case LTTNG_KERNEL_ABI_NOOP:
1427 switch (event->type) {
1428 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1429 ret = 0;
1430 break;
1431 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1432 WARN_ON_ONCE(1);
1433 break;
1434 }
1435 break;
1436
1437 case LTTNG_KERNEL_ABI_UPROBE:
1438 lttng_uprobes_unregister_event(event);
1439 ret = 0;
1440 break;
1441
1442 case LTTNG_KERNEL_ABI_FUNCTION:
1443 lttng_fallthrough;
1444 default:
1445 WARN_ON_ONCE(1);
1446 }
1447 WARN_ON_ONCE(ret);
1448 if (!ret)
1449 event_priv->registered = 0;
1450 }
1451
1452 static
1453 void _lttng_event_unregister(struct lttng_kernel_event_common *event)
1454 {
1455 if (event->priv->registered)
1456 unregister_event(event);
1457 }
1458
1459 /*
1460 * Only used internally at session destruction.
1461 */
1462 static
1463 void _lttng_event_destroy(struct lttng_kernel_event_common *event)
1464 {
1465 struct lttng_kernel_event_common_private *event_priv = event->priv;
1466 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
1467
1468 lttng_free_event_filter_runtime(event);
1469 /* Free event enabler refs */
1470 list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
1471 &event_priv->enablers_ref_head, node)
1472 kfree(enabler_ref);
1473
1474 switch (event->type) {
1475 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
1476 {
1477 struct lttng_kernel_event_recorder *event_recorder =
1478 container_of(event, struct lttng_kernel_event_recorder, parent);
1479
1480 switch (event_priv->instrumentation) {
1481 case LTTNG_KERNEL_ABI_TRACEPOINT:
1482 lttng_event_desc_put(event_priv->desc);
1483 break;
1484
1485 case LTTNG_KERNEL_ABI_KPROBE:
1486 module_put(event_priv->desc->owner);
1487 lttng_kprobes_destroy_event_private(&event_recorder->parent);
1488 break;
1489
1490 case LTTNG_KERNEL_ABI_KRETPROBE:
1491 module_put(event_priv->desc->owner);
1492 lttng_kretprobes_destroy_private(&event_recorder->parent);
1493 break;
1494
1495 case LTTNG_KERNEL_ABI_SYSCALL:
1496 break;
1497
1498 case LTTNG_KERNEL_ABI_UPROBE:
1499 module_put(event_priv->desc->owner);
1500 lttng_uprobes_destroy_event_private(&event_recorder->parent);
1501 break;
1502
1503 case LTTNG_KERNEL_ABI_FUNCTION:
1504 lttng_fallthrough;
1505 case LTTNG_KERNEL_ABI_NOOP:
1506 lttng_fallthrough;
1507 default:
1508 WARN_ON_ONCE(1);
1509 }
1510 list_del(&event_recorder->priv->parent.node);
1511 kmem_cache_free(event_recorder_private_cache, event_recorder->priv);
1512 kmem_cache_free(event_recorder_cache, event_recorder);
1513 break;
1514 }
1515 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
1516 {
1517 struct lttng_kernel_event_notifier *event_notifier =
1518 container_of(event, struct lttng_kernel_event_notifier, parent);
1519
1520 switch (event_notifier->priv->parent.instrumentation) {
1521 case LTTNG_KERNEL_ABI_TRACEPOINT:
1522 lttng_event_desc_put(event_notifier->priv->parent.desc);
1523 break;
1524
1525 case LTTNG_KERNEL_ABI_KPROBE:
1526 module_put(event_notifier->priv->parent.desc->owner);
1527 lttng_kprobes_destroy_event_private(&event_notifier->parent);
1528 break;
1529
1530 case LTTNG_KERNEL_ABI_SYSCALL:
1531 break;
1532
1533 case LTTNG_KERNEL_ABI_UPROBE:
1534 module_put(event_notifier->priv->parent.desc->owner);
1535 lttng_uprobes_destroy_event_private(&event_notifier->parent);
1536 break;
1537
1538 case LTTNG_KERNEL_ABI_KRETPROBE:
1539 lttng_fallthrough;
1540 case LTTNG_KERNEL_ABI_FUNCTION:
1541 lttng_fallthrough;
1542 case LTTNG_KERNEL_ABI_NOOP:
1543 lttng_fallthrough;
1544 default:
1545 WARN_ON_ONCE(1);
1546 }
1547 list_del(&event_notifier->priv->parent.node);
1548 kmem_cache_free(event_notifier_private_cache, event_notifier->priv);
1549 kmem_cache_free(event_notifier_cache, event_notifier);
1550 break;
1551 }
1552 default:
1553 WARN_ON_ONCE(1);
1554 }
1555 }
1556
1557 struct lttng_kernel_id_tracker *get_tracker(struct lttng_kernel_session *session,
1558 enum tracker_type tracker_type)
1559 {
1560 switch (tracker_type) {
1561 case TRACKER_PID:
1562 return &session->pid_tracker;
1563 case TRACKER_VPID:
1564 return &session->vpid_tracker;
1565 case TRACKER_UID:
1566 return &session->uid_tracker;
1567 case TRACKER_VUID:
1568 return &session->vuid_tracker;
1569 case TRACKER_GID:
1570 return &session->gid_tracker;
1571 case TRACKER_VGID:
1572 return &session->vgid_tracker;
1573 default:
1574 WARN_ON_ONCE(1);
1575 return NULL;
1576 }
1577 }
1578
1579 int lttng_session_track_id(struct lttng_kernel_session *session,
1580 enum tracker_type tracker_type, int id)
1581 {
1582 struct lttng_kernel_id_tracker *tracker;
1583 int ret;
1584
1585 tracker = get_tracker(session, tracker_type);
1586 if (!tracker)
1587 return -EINVAL;
1588 if (id < -1)
1589 return -EINVAL;
1590 mutex_lock(&sessions_mutex);
1591 if (id == -1) {
1592 /* track all ids: destroy tracker. */
1593 lttng_id_tracker_destroy(tracker, true);
1594 ret = 0;
1595 } else {
1596 ret = lttng_id_tracker_add(tracker, id);
1597 }
1598 mutex_unlock(&sessions_mutex);
1599 return ret;
1600 }
1601
1602 int lttng_session_untrack_id(struct lttng_kernel_session *session,
1603 enum tracker_type tracker_type, int id)
1604 {
1605 struct lttng_kernel_id_tracker *tracker;
1606 int ret;
1607
1608 tracker = get_tracker(session, tracker_type);
1609 if (!tracker)
1610 return -EINVAL;
1611 if (id < -1)
1612 return -EINVAL;
1613 mutex_lock(&sessions_mutex);
1614 if (id == -1) {
1615 /* untrack all ids: replace by empty tracker. */
1616 ret = lttng_id_tracker_empty_set(tracker);
1617 } else {
1618 ret = lttng_id_tracker_del(tracker, id);
1619 }
1620 mutex_unlock(&sessions_mutex);
1621 return ret;
1622 }
1623
1624 static
1625 void *id_list_start(struct seq_file *m, loff_t *pos)
1626 {
1627 struct lttng_kernel_id_tracker *id_tracker = m->private;
1628 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1629 struct lttng_id_hash_node *e;
1630 int iter = 0, i;
1631
1632 mutex_lock(&sessions_mutex);
1633 if (id_tracker_p) {
1634 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1635 struct hlist_head *head = &id_tracker_p->id_hash[i];
1636
1637 lttng_hlist_for_each_entry(e, head, hlist) {
1638 if (iter++ >= *pos)
1639 return e;
1640 }
1641 }
1642 } else {
1643 /* ID tracker disabled. */
1644 if (iter >= *pos && iter == 0) {
1645 return id_tracker_p; /* empty tracker */
1646 }
1647 iter++;
1648 }
1649 /* End of list */
1650 return NULL;
1651 }
1652
1653 /* Called with sessions_mutex held. */
1654 static
1655 void *id_list_next(struct seq_file *m, void *p, loff_t *ppos)
1656 {
1657 struct lttng_kernel_id_tracker *id_tracker = m->private;
1658 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1659 struct lttng_id_hash_node *e;
1660 int iter = 0, i;
1661
1662 (*ppos)++;
1663 if (id_tracker_p) {
1664 for (i = 0; i < LTTNG_ID_TABLE_SIZE; i++) {
1665 struct hlist_head *head = &id_tracker_p->id_hash[i];
1666
1667 lttng_hlist_for_each_entry(e, head, hlist) {
1668 if (iter++ >= *ppos)
1669 return e;
1670 }
1671 }
1672 } else {
1673 /* ID tracker disabled. */
1674 if (iter >= *ppos && iter == 0)
1675 return p; /* empty tracker */
1676 iter++;
1677 }
1678
1679 /* End of list */
1680 return NULL;
1681 }
1682
1683 static
1684 void id_list_stop(struct seq_file *m, void *p)
1685 {
1686 mutex_unlock(&sessions_mutex);
1687 }
1688
1689 static
1690 int id_list_show(struct seq_file *m, void *p)
1691 {
1692 struct lttng_kernel_id_tracker *id_tracker = m->private;
1693 struct lttng_kernel_id_tracker_rcu *id_tracker_p = id_tracker->p;
1694 int id;
1695
1696 if (p == id_tracker_p) {
1697 /* Tracker disabled. */
1698 id = -1;
1699 } else {
1700 const struct lttng_id_hash_node *e = p;
1701
1702 id = lttng_id_tracker_get_node_id(e);
1703 }
1704 switch (id_tracker->priv->tracker_type) {
1705 case TRACKER_PID:
1706 seq_printf(m, "process { pid = %d; };\n", id);
1707 break;
1708 case TRACKER_VPID:
1709 seq_printf(m, "process { vpid = %d; };\n", id);
1710 break;
1711 case TRACKER_UID:
1712 seq_printf(m, "user { uid = %d; };\n", id);
1713 break;
1714 case TRACKER_VUID:
1715 seq_printf(m, "user { vuid = %d; };\n", id);
1716 break;
1717 case TRACKER_GID:
1718 seq_printf(m, "group { gid = %d; };\n", id);
1719 break;
1720 case TRACKER_VGID:
1721 seq_printf(m, "group { vgid = %d; };\n", id);
1722 break;
1723 default:
1724 seq_printf(m, "UNKNOWN { field = %d };\n", id);
1725 }
1726 return 0;
1727 }
1728
1729 static
1730 const struct seq_operations lttng_tracker_ids_list_seq_ops = {
1731 .start = id_list_start,
1732 .next = id_list_next,
1733 .stop = id_list_stop,
1734 .show = id_list_show,
1735 };
1736
1737 static
1738 int lttng_tracker_ids_list_open(struct inode *inode, struct file *file)
1739 {
1740 return seq_open(file, &lttng_tracker_ids_list_seq_ops);
1741 }
1742
1743 static
1744 int lttng_tracker_ids_list_release(struct inode *inode, struct file *file)
1745 {
1746 struct seq_file *m = file->private_data;
1747 struct lttng_kernel_id_tracker *id_tracker = m->private;
1748 int ret;
1749
1750 WARN_ON_ONCE(!id_tracker);
1751 ret = seq_release(inode, file);
1752 if (!ret)
1753 fput(id_tracker->priv->session->priv->file);
1754 return ret;
1755 }
1756
1757 const struct file_operations lttng_tracker_ids_list_fops = {
1758 .owner = THIS_MODULE,
1759 .open = lttng_tracker_ids_list_open,
1760 .read = seq_read,
1761 .llseek = seq_lseek,
1762 .release = lttng_tracker_ids_list_release,
1763 };
1764
1765 int lttng_session_list_tracker_ids(struct lttng_kernel_session *session,
1766 enum tracker_type tracker_type)
1767 {
1768 struct file *tracker_ids_list_file;
1769 struct seq_file *m;
1770 int file_fd, ret;
1771
1772 file_fd = lttng_get_unused_fd();
1773 if (file_fd < 0) {
1774 ret = file_fd;
1775 goto fd_error;
1776 }
1777
1778 tracker_ids_list_file = anon_inode_getfile("[lttng_tracker_ids_list]",
1779 &lttng_tracker_ids_list_fops,
1780 NULL, O_RDWR);
1781 if (IS_ERR(tracker_ids_list_file)) {
1782 ret = PTR_ERR(tracker_ids_list_file);
1783 goto file_error;
1784 }
1785 if (!atomic_long_add_unless(&session->priv->file->f_count, 1, LONG_MAX)) {
1786 ret = -EOVERFLOW;
1787 goto refcount_error;
1788 }
1789 ret = lttng_tracker_ids_list_fops.open(NULL, tracker_ids_list_file);
1790 if (ret < 0)
1791 goto open_error;
1792 m = tracker_ids_list_file->private_data;
1793
1794 m->private = get_tracker(session, tracker_type);
1795 BUG_ON(!m->private);
1796 fd_install(file_fd, tracker_ids_list_file);
1797
1798 return file_fd;
1799
1800 open_error:
1801 atomic_long_dec(&session->priv->file->f_count);
1802 refcount_error:
1803 fput(tracker_ids_list_file);
1804 file_error:
1805 put_unused_fd(file_fd);
1806 fd_error:
1807 return ret;
1808 }
1809
1810 /*
1811 * Enabler management.
1812 */
1813 static
1814 int lttng_match_enabler_star_glob(const char *desc_name,
1815 const char *pattern)
1816 {
1817 if (!strutils_star_glob_match(pattern, LTTNG_SIZE_MAX,
1818 desc_name, LTTNG_SIZE_MAX))
1819 return 0;
1820 return 1;
1821 }
1822
1823 static
1824 int lttng_match_enabler_name(const char *desc_name,
1825 const char *name)
1826 {
1827 if (strcmp(desc_name, name))
1828 return 0;
1829 return 1;
1830 }
1831
1832 static
1833 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc *desc,
1834 struct lttng_event_enabler_common *enabler)
1835 {
1836 const char *desc_name, *enabler_name;
1837 bool compat = false, entry = false;
1838
1839 enabler_name = enabler->event_param.name;
1840 switch (enabler->event_param.instrumentation) {
1841 case LTTNG_KERNEL_ABI_TRACEPOINT:
1842 desc_name = desc->event_name;
1843 switch (enabler->format_type) {
1844 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1845 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1846 case LTTNG_ENABLER_FORMAT_NAME:
1847 return lttng_match_enabler_name(desc_name, enabler_name);
1848 default:
1849 return -EINVAL;
1850 }
1851 break;
1852
1853 case LTTNG_KERNEL_ABI_SYSCALL:
1854 desc_name = desc->event_name;
1855 if (!strncmp(desc_name, "compat_", strlen("compat_"))) {
1856 desc_name += strlen("compat_");
1857 compat = true;
1858 }
1859 if (!strncmp(desc_name, "syscall_exit_",
1860 strlen("syscall_exit_"))) {
1861 desc_name += strlen("syscall_exit_");
1862 } else if (!strncmp(desc_name, "syscall_entry_",
1863 strlen("syscall_entry_"))) {
1864 desc_name += strlen("syscall_entry_");
1865 entry = true;
1866 } else {
1867 WARN_ON_ONCE(1);
1868 return -EINVAL;
1869 }
1870 switch (enabler->event_param.u.syscall.entryexit) {
1871 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT:
1872 break;
1873 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY:
1874 if (!entry)
1875 return 0;
1876 break;
1877 case LTTNG_KERNEL_ABI_SYSCALL_EXIT:
1878 if (entry)
1879 return 0;
1880 break;
1881 default:
1882 return -EINVAL;
1883 }
1884 switch (enabler->event_param.u.syscall.abi) {
1885 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL:
1886 break;
1887 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE:
1888 if (compat)
1889 return 0;
1890 break;
1891 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT:
1892 if (!compat)
1893 return 0;
1894 break;
1895 default:
1896 return -EINVAL;
1897 }
1898 switch (enabler->event_param.u.syscall.match) {
1899 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME:
1900 switch (enabler->format_type) {
1901 case LTTNG_ENABLER_FORMAT_STAR_GLOB:
1902 return lttng_match_enabler_star_glob(desc_name, enabler_name);
1903 case LTTNG_ENABLER_FORMAT_NAME:
1904 return lttng_match_enabler_name(desc_name, enabler_name);
1905 default:
1906 return -EINVAL;
1907 }
1908 break;
1909 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR:
1910 return -EINVAL; /* Not implemented. */
1911 default:
1912 return -EINVAL;
1913 }
1914 break;
1915
1916 default:
1917 WARN_ON_ONCE(1);
1918 return -EINVAL;
1919 }
1920 }
1921
1922 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc *desc,
1923 struct lttng_event_enabler_common *enabler)
1924 {
1925 int ret;
1926
1927 ret = lttng_desc_match_enabler_check(desc, enabler);
1928 if (ret < 0) {
1929 WARN_ON_ONCE(1);
1930 return false;
1931 }
1932 return ret;
1933 }
1934
1935 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common *event_enabler,
1936 struct lttng_kernel_event_common *event)
1937 {
1938 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1939 return false;
1940
1941 switch (event_enabler->enabler_type) {
1942 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1943 {
1944 struct lttng_event_recorder_enabler *event_recorder_enabler =
1945 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1946 struct lttng_kernel_event_recorder *event_recorder =
1947 container_of(event, struct lttng_kernel_event_recorder, parent);
1948
1949 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1950 && event_recorder->chan == event_recorder_enabler->chan)
1951 return true;
1952 else
1953 return false;
1954 }
1955 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1956 {
1957 struct lttng_event_notifier_enabler *event_notifier_enabler =
1958 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1959 struct lttng_kernel_event_notifier *event_notifier =
1960 container_of(event, struct lttng_kernel_event_notifier, parent);
1961
1962 if (lttng_desc_match_enabler(event->priv->desc, event_enabler)
1963 && event_notifier->priv->group == event_notifier_enabler->group
1964 && event->priv->user_token == event_enabler->user_token)
1965 return true;
1966 else
1967 return false;
1968 }
1969 default:
1970 WARN_ON_ONCE(1);
1971 return false;
1972 }
1973 }
1974
1975 bool lttng_event_enabler_desc_match_event(struct lttng_event_enabler_common *event_enabler,
1976 const struct lttng_kernel_event_desc *desc,
1977 struct lttng_kernel_event_common *event)
1978 {
1979 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
1980 return false;
1981
1982 switch (event_enabler->enabler_type) {
1983 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
1984 {
1985 struct lttng_event_recorder_enabler *event_recorder_enabler =
1986 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
1987 struct lttng_kernel_event_recorder *event_recorder =
1988 container_of(event, struct lttng_kernel_event_recorder, parent);
1989
1990 if (event->priv->desc == desc && event_recorder->chan == event_recorder_enabler->chan)
1991 return true;
1992 else
1993 return false;
1994 }
1995 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
1996 {
1997 struct lttng_event_notifier_enabler *event_notifier_enabler =
1998 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
1999 struct lttng_kernel_event_notifier *event_notifier =
2000 container_of(event, struct lttng_kernel_event_notifier, parent);
2001
2002 if (event->priv->desc == desc
2003 && event_notifier->priv->group == event_notifier_enabler->group
2004 && event->priv->user_token == event_enabler->user_token)
2005 return true;
2006 else
2007 return false;
2008 }
2009 default:
2010 WARN_ON_ONCE(1);
2011 return false;
2012 }
2013 }
2014
2015 bool lttng_event_enabler_event_name_match_event(struct lttng_event_enabler_common *event_enabler,
2016 const char *event_name,
2017 struct lttng_kernel_event_common *event)
2018 {
2019 if (event_enabler->event_param.instrumentation != event->priv->instrumentation)
2020 return false;
2021
2022 switch (event_enabler->enabler_type) {
2023 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2024 {
2025 struct lttng_event_recorder_enabler *event_recorder_enabler =
2026 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2027 struct lttng_kernel_event_recorder *event_recorder =
2028 container_of(event, struct lttng_kernel_event_recorder, parent);
2029
2030 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2031 && event_recorder->chan == event_recorder_enabler->chan)
2032 return true;
2033 else
2034 return false;
2035 }
2036 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2037 {
2038 struct lttng_event_notifier_enabler *event_notifier_enabler =
2039 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2040 struct lttng_kernel_event_notifier *event_notifier =
2041 container_of(event, struct lttng_kernel_event_notifier, parent);
2042
2043 if (!strncmp(event->priv->desc->event_name, event_name, LTTNG_KERNEL_ABI_SYM_NAME_LEN - 1)
2044 && event_notifier->priv->group == event_notifier_enabler->group
2045 && event->priv->user_token == event_enabler->user_token)
2046 return true;
2047 else
2048 return false;
2049 }
2050 default:
2051 WARN_ON_ONCE(1);
2052 return false;
2053 }
2054 }
2055
2056 static
2057 struct lttng_enabler_ref *lttng_enabler_ref(
2058 struct list_head *enablers_ref_list,
2059 struct lttng_event_enabler_common *enabler)
2060 {
2061 struct lttng_enabler_ref *enabler_ref;
2062
2063 list_for_each_entry(enabler_ref, enablers_ref_list, node) {
2064 if (enabler_ref->ref == enabler)
2065 return enabler_ref;
2066 }
2067 return NULL;
2068 }
2069
2070 static
2071 void lttng_event_enabler_create_tracepoint_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2072 {
2073 struct lttng_event_ht *events_ht = lttng_get_event_ht_from_enabler(event_enabler);
2074 struct lttng_kernel_probe_desc *probe_desc;
2075 const struct lttng_kernel_event_desc *desc;
2076 struct list_head *probe_list;
2077 int i;
2078
2079 probe_list = lttng_get_probe_list_head();
2080 /*
2081 * For each probe event, if we find that a probe event matches
2082 * our enabler, create an associated lttng_event if not
2083 * already present.
2084 */
2085 list_for_each_entry(probe_desc, probe_list, head) {
2086 for (i = 0; i < probe_desc->nr_events; i++) {
2087 bool found = false;
2088 struct hlist_head *head;
2089 struct lttng_kernel_event_common *event;
2090 struct lttng_kernel_event_common_private *event_priv;
2091
2092 desc = probe_desc->event_desc[i];
2093 if (!lttng_desc_match_enabler(desc, event_enabler))
2094 continue;
2095
2096 /*
2097 * Check if already created.
2098 */
2099 head = utils_borrow_hash_table_bucket(events_ht->table, LTTNG_EVENT_HT_SIZE, desc->event_name);
2100 lttng_hlist_for_each_entry(event_priv, head, hlist_node) {
2101 if (lttng_event_enabler_desc_match_event(event_enabler, desc, event_priv->pub)) {
2102 found = true;
2103 break;
2104 }
2105 }
2106 if (found)
2107 continue;
2108
2109 /*
2110 * We need to create an event for this event probe.
2111 */
2112 event = _lttng_kernel_event_create(event_enabler, desc);
2113 if (IS_ERR(event)) {
2114 printk(KERN_INFO "LTTng: Unable to create event %s\n",
2115 probe_desc->event_desc[i]->event_name);
2116 }
2117 }
2118 }
2119 }
2120
2121 /*
2122 * Create event if it is missing and present in the list of tracepoint probes.
2123 * Should be called with sessions mutex held.
2124 */
2125 static
2126 void lttng_event_enabler_create_events_if_missing(struct lttng_event_enabler_common *event_enabler)
2127 {
2128 int ret;
2129
2130 switch (event_enabler->event_param.instrumentation) {
2131 case LTTNG_KERNEL_ABI_TRACEPOINT:
2132 lttng_event_enabler_create_tracepoint_events_if_missing(event_enabler);
2133 break;
2134
2135 case LTTNG_KERNEL_ABI_SYSCALL:
2136 ret = lttng_event_enabler_create_syscall_events_if_missing(event_enabler);
2137 WARN_ON_ONCE(ret);
2138 break;
2139
2140 default:
2141 WARN_ON_ONCE(1);
2142 break;
2143 }
2144 }
2145
2146 static
2147 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common *event_enabler,
2148 struct lttng_kernel_event_common *event)
2149 {
2150 /* Link filter bytecodes if not linked yet. */
2151 lttng_enabler_link_bytecode(event->priv->desc, lttng_static_ctx,
2152 &event->priv->filter_bytecode_runtime_head, &event_enabler->filter_bytecode_head);
2153 }
2154
2155 static
2156 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common *event_enabler,
2157 struct lttng_kernel_event_common *event)
2158 {
2159 switch (event_enabler->enabler_type) {
2160 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2161 break;
2162 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2163 {
2164 struct lttng_event_notifier_enabler *event_notifier_enabler =
2165 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2166 struct lttng_kernel_event_notifier *event_notifier =
2167 container_of(event, struct lttng_kernel_event_notifier, parent);
2168
2169 /* Link capture bytecodes if not linked yet. */
2170 lttng_enabler_link_bytecode(event->priv->desc,
2171 lttng_static_ctx, &event_notifier->priv->capture_bytecode_runtime_head,
2172 &event_notifier_enabler->capture_bytecode_head);
2173 event_notifier->priv->num_captures = event_notifier_enabler->num_captures;
2174 break;
2175 }
2176 default:
2177 WARN_ON_ONCE(1);
2178 }
2179 }
2180
2181 /*
2182 * Create events associated with an event_enabler (if not already present),
2183 * and add backward reference from the event to the enabler.
2184 * Should be called with sessions mutex held.
2185 */
2186 static
2187 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common *event_enabler)
2188 {
2189 struct list_head *event_list_head = lttng_get_event_list_head_from_enabler(event_enabler);
2190 struct lttng_kernel_event_common_private *event_priv;
2191
2192 lttng_syscall_table_set_wildcard_all(event_enabler);
2193
2194 /* First ensure that probe events are created for this enabler. */
2195 lttng_event_enabler_create_events_if_missing(event_enabler);
2196
2197 /* Link the created event with its associated enabler. */
2198 list_for_each_entry(event_priv, event_list_head, node) {
2199 struct lttng_kernel_event_common *event = event_priv->pub;
2200 struct lttng_enabler_ref *enabler_ref;
2201
2202 if (!lttng_event_enabler_match_event(event_enabler, event))
2203 continue;
2204
2205 enabler_ref = lttng_enabler_ref(&event_priv->enablers_ref_head, event_enabler);
2206 if (!enabler_ref) {
2207 /*
2208 * If no backward ref, create it.
2209 * Add backward ref from event_notifier to enabler.
2210 */
2211 enabler_ref = kzalloc(sizeof(*enabler_ref), GFP_KERNEL);
2212 if (!enabler_ref)
2213 return -ENOMEM;
2214
2215 enabler_ref->ref = event_enabler;
2216 list_add(&enabler_ref->node, &event_priv->enablers_ref_head);
2217 }
2218
2219 lttng_event_enabler_init_event_filter(event_enabler, event);
2220 lttng_event_enabler_init_event_capture(event_enabler, event);
2221 }
2222 return 0;
2223 }
2224
2225 /*
2226 * Called at module load: connect the probe on all enablers matching
2227 * this event.
2228 * Called with sessions lock held.
2229 */
2230 int lttng_fix_pending_events(void)
2231 {
2232 struct lttng_kernel_session_private *session_priv;
2233
2234 list_for_each_entry(session_priv, &sessions, list)
2235 lttng_session_lazy_sync_event_enablers(session_priv->pub);
2236 return 0;
2237 }
2238
2239 static bool lttng_event_notifier_group_has_active_event_notifiers(
2240 struct lttng_event_notifier_group *event_notifier_group)
2241 {
2242 struct lttng_event_enabler_common *event_enabler;
2243
2244 list_for_each_entry(event_enabler, &event_notifier_group->enablers_head, node) {
2245 if (event_enabler->enabled)
2246 return true;
2247 }
2248 return false;
2249 }
2250
2251 bool lttng_event_notifier_active(void)
2252 {
2253 struct lttng_event_notifier_group *event_notifier_group;
2254
2255 list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
2256 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group))
2257 return true;
2258 }
2259 return false;
2260 }
2261
2262 int lttng_fix_pending_event_notifiers(void)
2263 {
2264 struct lttng_event_notifier_group *event_notifier_group;
2265
2266 list_for_each_entry(event_notifier_group, &event_notifier_groups, node)
2267 lttng_event_notifier_group_sync_enablers(event_notifier_group);
2268 return 0;
2269 }
2270
2271 struct lttng_event_recorder_enabler *lttng_event_recorder_enabler_create(
2272 enum lttng_enabler_format_type format_type,
2273 struct lttng_kernel_abi_event *event_param,
2274 struct lttng_kernel_channel_buffer *chan)
2275 {
2276 struct lttng_event_recorder_enabler *event_enabler;
2277
2278 event_enabler = kzalloc(sizeof(*event_enabler), GFP_KERNEL);
2279 if (!event_enabler)
2280 return NULL;
2281 event_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_RECORDER;
2282 event_enabler->parent.format_type = format_type;
2283 INIT_LIST_HEAD(&event_enabler->parent.filter_bytecode_head);
2284 memcpy(&event_enabler->parent.event_param, event_param,
2285 sizeof(event_enabler->parent.event_param));
2286 event_enabler->chan = chan;
2287 /* ctx left NULL */
2288 event_enabler->parent.enabled = 0;
2289 return event_enabler;
2290 }
2291
2292 void lttng_event_enabler_session_add(struct lttng_kernel_session *session,
2293 struct lttng_event_recorder_enabler *event_enabler)
2294 {
2295 mutex_lock(&sessions_mutex);
2296 list_add(&event_enabler->parent.node, &session->priv->enablers_head);
2297 event_enabler->parent.published = true;
2298 lttng_session_lazy_sync_event_enablers(session);
2299 mutex_unlock(&sessions_mutex);
2300 }
2301
2302 int lttng_event_enabler_enable(struct lttng_event_enabler_common *event_enabler)
2303 {
2304 mutex_lock(&sessions_mutex);
2305 event_enabler->enabled = 1;
2306 lttng_event_enabler_sync(event_enabler);
2307 mutex_unlock(&sessions_mutex);
2308 return 0;
2309 }
2310
2311 int lttng_event_enabler_disable(struct lttng_event_enabler_common *event_enabler)
2312 {
2313 mutex_lock(&sessions_mutex);
2314 event_enabler->enabled = 0;
2315 lttng_event_enabler_sync(event_enabler);
2316 mutex_unlock(&sessions_mutex);
2317 return 0;
2318 }
2319
2320 static
2321 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *enabler,
2322 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2323 {
2324 struct lttng_kernel_bytecode_node *bytecode_node;
2325 uint32_t bytecode_len;
2326 int ret;
2327
2328 ret = get_user(bytecode_len, &bytecode->len);
2329 if (ret)
2330 return ret;
2331 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2332 GFP_KERNEL);
2333 if (!bytecode_node)
2334 return -ENOMEM;
2335 ret = copy_from_user(&bytecode_node->bc, bytecode,
2336 sizeof(*bytecode) + bytecode_len);
2337 if (ret)
2338 goto error_free;
2339
2340 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_FILTER;
2341 bytecode_node->enabler = enabler;
2342 /* Enforce length based on allocated size */
2343 bytecode_node->bc.len = bytecode_len;
2344 list_add_tail(&bytecode_node->node, &enabler->filter_bytecode_head);
2345
2346 return 0;
2347
2348 error_free:
2349 lttng_kvfree(bytecode_node);
2350 return ret;
2351 }
2352
2353 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common *event_enabler,
2354 struct lttng_kernel_abi_filter_bytecode __user *bytecode)
2355 {
2356 int ret;
2357 ret = lttng_enabler_attach_filter_bytecode(event_enabler, bytecode);
2358 if (ret)
2359 goto error;
2360 lttng_event_enabler_sync(event_enabler);
2361 return 0;
2362
2363 error:
2364 return ret;
2365 }
2366
2367 int lttng_event_add_callsite(struct lttng_kernel_event_common *event,
2368 struct lttng_kernel_abi_event_callsite __user *callsite)
2369 {
2370
2371 switch (event->priv->instrumentation) {
2372 case LTTNG_KERNEL_ABI_UPROBE:
2373 return lttng_uprobes_event_add_callsite(event, callsite);
2374 default:
2375 return -EINVAL;
2376 }
2377 }
2378
2379 static
2380 void lttng_enabler_destroy(struct lttng_event_enabler_common *enabler)
2381 {
2382 struct lttng_kernel_bytecode_node *filter_node, *tmp_filter_node;
2383
2384 /* Destroy filter bytecode */
2385 list_for_each_entry_safe(filter_node, tmp_filter_node,
2386 &enabler->filter_bytecode_head, node) {
2387 lttng_kvfree(filter_node);
2388 }
2389 }
2390
2391 void lttng_event_enabler_destroy(struct lttng_event_enabler_common *event_enabler)
2392 {
2393 lttng_enabler_destroy(event_enabler);
2394 if (event_enabler->published)
2395 list_del(&event_enabler->node);
2396
2397 switch (event_enabler->enabler_type) {
2398 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2399 {
2400 struct lttng_event_recorder_enabler *event_recorder_enabler =
2401 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2402
2403 kfree(event_recorder_enabler);
2404 break;
2405 }
2406 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2407 {
2408 struct lttng_event_notifier_enabler *event_notifier_enabler =
2409 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2410
2411 kfree(event_notifier_enabler);
2412 break;
2413 }
2414 default:
2415 WARN_ON_ONCE(1);
2416 }
2417 }
2418
2419 struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
2420 enum lttng_enabler_format_type format_type,
2421 struct lttng_kernel_abi_event_notifier *event_notifier_param,
2422 struct lttng_event_notifier_group *event_notifier_group)
2423 {
2424 struct lttng_event_notifier_enabler *event_notifier_enabler;
2425
2426 event_notifier_enabler = kzalloc(sizeof(*event_notifier_enabler), GFP_KERNEL);
2427 if (!event_notifier_enabler)
2428 return NULL;
2429
2430 event_notifier_enabler->parent.enabler_type = LTTNG_EVENT_ENABLER_TYPE_NOTIFIER;
2431 event_notifier_enabler->parent.format_type = format_type;
2432 INIT_LIST_HEAD(&event_notifier_enabler->parent.filter_bytecode_head);
2433 INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
2434
2435 event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
2436 event_notifier_enabler->num_captures = 0;
2437
2438 memcpy(&event_notifier_enabler->parent.event_param, &event_notifier_param->event,
2439 sizeof(event_notifier_enabler->parent.event_param));
2440
2441 event_notifier_enabler->parent.enabled = 0;
2442 event_notifier_enabler->parent.user_token = event_notifier_param->event.token;
2443 event_notifier_enabler->group = event_notifier_group;
2444 return event_notifier_enabler;
2445 }
2446
2447 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group *event_notifier_group,
2448 struct lttng_event_notifier_enabler *event_notifier_enabler)
2449 {
2450 mutex_lock(&sessions_mutex);
2451 list_add(&event_notifier_enabler->parent.node, &event_notifier_enabler->group->enablers_head);
2452 event_notifier_enabler->parent.published = true;
2453 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2454 mutex_unlock(&sessions_mutex);
2455 }
2456
2457 int lttng_event_notifier_enabler_enable(
2458 struct lttng_event_notifier_enabler *event_notifier_enabler)
2459 {
2460 mutex_lock(&sessions_mutex);
2461 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
2462 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2463 mutex_unlock(&sessions_mutex);
2464 return 0;
2465 }
2466
2467 int lttng_event_notifier_enabler_disable(
2468 struct lttng_event_notifier_enabler *event_notifier_enabler)
2469 {
2470 mutex_lock(&sessions_mutex);
2471 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
2472 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2473 mutex_unlock(&sessions_mutex);
2474 return 0;
2475 }
2476
2477 int lttng_event_notifier_enabler_attach_capture_bytecode(
2478 struct lttng_event_notifier_enabler *event_notifier_enabler,
2479 struct lttng_kernel_abi_capture_bytecode __user *bytecode)
2480 {
2481 struct lttng_kernel_bytecode_node *bytecode_node;
2482 struct lttng_event_enabler_common *enabler =
2483 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler);
2484 uint32_t bytecode_len;
2485 int ret;
2486
2487 ret = get_user(bytecode_len, &bytecode->len);
2488 if (ret)
2489 return ret;
2490
2491 bytecode_node = lttng_kvzalloc(sizeof(*bytecode_node) + bytecode_len,
2492 GFP_KERNEL);
2493 if (!bytecode_node)
2494 return -ENOMEM;
2495
2496 ret = copy_from_user(&bytecode_node->bc, bytecode,
2497 sizeof(*bytecode) + bytecode_len);
2498 if (ret)
2499 goto error_free;
2500
2501 bytecode_node->type = LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE;
2502 bytecode_node->enabler = enabler;
2503
2504 /* Enforce length based on allocated size */
2505 bytecode_node->bc.len = bytecode_len;
2506 list_add_tail(&bytecode_node->node, &event_notifier_enabler->capture_bytecode_head);
2507
2508 event_notifier_enabler->num_captures++;
2509
2510 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2511 goto end;
2512
2513 error_free:
2514 lttng_kvfree(bytecode_node);
2515 end:
2516 return ret;
2517 }
2518
2519 static
2520 void lttng_event_sync_filter_state(struct lttng_kernel_event_common *event)
2521 {
2522 int has_enablers_without_filter_bytecode = 0, nr_filters = 0;
2523 struct lttng_kernel_bytecode_runtime *runtime;
2524 struct lttng_enabler_ref *enabler_ref;
2525
2526 /* Check if has enablers without bytecode enabled */
2527 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2528 if (enabler_ref->ref->enabled
2529 && list_empty(&enabler_ref->ref->filter_bytecode_head)) {
2530 has_enablers_without_filter_bytecode = 1;
2531 break;
2532 }
2533 }
2534 event->priv->has_enablers_without_filter_bytecode = has_enablers_without_filter_bytecode;
2535
2536 /* Enable filters */
2537 list_for_each_entry(runtime, &event->priv->filter_bytecode_runtime_head, node) {
2538 lttng_bytecode_sync_state(runtime);
2539 nr_filters++;
2540 }
2541 WRITE_ONCE(event->eval_filter, !(has_enablers_without_filter_bytecode || !nr_filters));
2542 }
2543
2544 static
2545 void lttng_event_sync_capture_state(struct lttng_kernel_event_common *event)
2546 {
2547 switch (event->type) {
2548 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2549 break;
2550 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2551 {
2552 struct lttng_kernel_event_notifier *event_notifier =
2553 container_of(event, struct lttng_kernel_event_notifier, parent);
2554 struct lttng_kernel_bytecode_runtime *runtime;
2555 int nr_captures = 0;
2556
2557 /* Enable captures */
2558 list_for_each_entry(runtime, &event_notifier->priv->capture_bytecode_runtime_head, node) {
2559 lttng_bytecode_sync_state(runtime);
2560 nr_captures++;
2561 }
2562 WRITE_ONCE(event_notifier->eval_capture, !!nr_captures);
2563 break;
2564 }
2565 default:
2566 WARN_ON_ONCE(1);
2567 }
2568 }
2569
2570 static
2571 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common *event)
2572 {
2573 struct lttng_enabler_ref *enabler_ref;
2574 bool enabled = false;
2575
2576 switch (event->priv->instrumentation) {
2577 case LTTNG_KERNEL_ABI_TRACEPOINT:
2578 lttng_fallthrough;
2579 case LTTNG_KERNEL_ABI_SYSCALL:
2580 /* Enable events */
2581 list_for_each_entry(enabler_ref, &event->priv->enablers_ref_head, node) {
2582 if (enabler_ref->ref->enabled) {
2583 enabled = true;
2584 break;
2585 }
2586 }
2587 break;
2588 default:
2589 WARN_ON_ONCE(1);
2590 return false;
2591 }
2592
2593 switch (event->type) {
2594 case LTTNG_KERNEL_EVENT_TYPE_RECORDER:
2595 {
2596 struct lttng_kernel_event_recorder *event_recorder =
2597 container_of(event, struct lttng_kernel_event_recorder, parent);
2598
2599 /*
2600 * Enabled state is based on union of enablers, with
2601 * intersection of session and channel transient enable
2602 * states.
2603 */
2604 return enabled && event_recorder->chan->parent.session->priv->tstate && event_recorder->chan->priv->parent.tstate;
2605 }
2606 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER:
2607 return enabled;
2608 default:
2609 WARN_ON_ONCE(1);
2610 return false;
2611 }
2612 }
2613
2614 static
2615 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common *event)
2616 {
2617 switch (event->priv->instrumentation) {
2618 case LTTNG_KERNEL_ABI_TRACEPOINT:
2619 lttng_fallthrough;
2620 case LTTNG_KERNEL_ABI_SYSCALL:
2621 return true;
2622
2623 default:
2624 /* Not handled with lazy sync. */
2625 return false;
2626 }
2627 }
2628
2629 /*
2630 * Should be called with sessions mutex held.
2631 */
2632 static
2633 void lttng_sync_event_list(struct list_head *event_enabler_list,
2634 struct list_head *event_list)
2635 {
2636 struct lttng_kernel_event_common_private *event_priv;
2637 struct lttng_event_enabler_common *event_enabler;
2638
2639 list_for_each_entry(event_enabler, event_enabler_list, node)
2640 lttng_event_enabler_ref_events(event_enabler);
2641
2642 /*
2643 * For each event, if at least one of its enablers is enabled,
2644 * and its channel and session transient states are enabled, we
2645 * enable the event, else we disable it.
2646 */
2647 list_for_each_entry(event_priv, event_list, node) {
2648 struct lttng_kernel_event_common *event = event_priv->pub;
2649 bool enabled;
2650
2651 if (!lttng_event_is_lazy_sync(event))
2652 continue;
2653
2654 enabled = lttng_get_event_enabled_state(event);
2655 WRITE_ONCE(event->enabled, enabled);
2656 /*
2657 * Sync tracepoint registration with event enabled state.
2658 */
2659 if (enabled) {
2660 if (!event_priv->registered)
2661 register_event(event);
2662 } else {
2663 if (event_priv->registered)
2664 unregister_event(event);
2665 }
2666
2667 lttng_event_sync_filter_state(event);
2668 lttng_event_sync_capture_state(event);
2669 }
2670 }
2671
2672 /*
2673 * lttng_session_sync_event_enablers should be called just before starting a
2674 * session.
2675 */
2676 static
2677 void lttng_session_sync_event_enablers(struct lttng_kernel_session *session)
2678 {
2679 lttng_sync_event_list(&session->priv->enablers_head, &session->priv->events);
2680 }
2681
2682 /*
2683 * Apply enablers to session events, adding events to session if need
2684 * be. It is required after each modification applied to an active
2685 * session, and right before session "start".
2686 * "lazy" sync means we only sync if required.
2687 * Should be called with sessions mutex held.
2688 */
2689 static
2690 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session *session)
2691 {
2692 /* We can skip if session is not active */
2693 if (!session->active)
2694 return;
2695 lttng_session_sync_event_enablers(session);
2696 }
2697
2698 static
2699 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
2700 {
2701 lttng_sync_event_list(&event_notifier_group->enablers_head, &event_notifier_group->event_notifiers_head);
2702 }
2703
2704 static
2705 void lttng_event_enabler_sync(struct lttng_event_enabler_common *event_enabler)
2706 {
2707 switch (event_enabler->enabler_type) {
2708 case LTTNG_EVENT_ENABLER_TYPE_RECORDER:
2709 {
2710 struct lttng_event_recorder_enabler *event_recorder_enabler =
2711 container_of(event_enabler, struct lttng_event_recorder_enabler, parent);
2712 lttng_session_lazy_sync_event_enablers(event_recorder_enabler->chan->parent.session);
2713 break;
2714 }
2715 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER:
2716 {
2717 struct lttng_event_notifier_enabler *event_notifier_enabler =
2718 container_of(event_enabler, struct lttng_event_notifier_enabler, parent);
2719 lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
2720 break;
2721 }
2722 default:
2723 WARN_ON_ONCE(1);
2724 }
2725 }
2726
2727 /*
2728 * Serialize at most one packet worth of metadata into a metadata
2729 * channel.
2730 * We grab the metadata cache mutex to get exclusive access to our metadata
2731 * buffer and to the metadata cache. Exclusive access to the metadata buffer
2732 * allows us to do racy operations such as looking for remaining space left in
2733 * packet and write, since mutual exclusion protects us from concurrent writes.
2734 * Mutual exclusion on the metadata cache allow us to read the cache content
2735 * without racing against reallocation of the cache by updates.
2736 * Returns the number of bytes written in the channel, 0 if no data
2737 * was written and a negative value on error.
2738 */
2739 int lttng_metadata_output_channel(struct lttng_metadata_stream *stream,
2740 struct lttng_kernel_ring_buffer_channel *chan, bool *coherent)
2741 {
2742 struct lttng_kernel_ring_buffer_ctx ctx;
2743 int ret = 0;
2744 size_t len, reserve_len;
2745
2746 /*
2747 * Ensure we support mutiple get_next / put sequences followed by
2748 * put_next. The metadata cache lock protects reading the metadata
2749 * cache. It can indeed be read concurrently by "get_next_subbuf" and
2750 * "flush" operations on the buffer invoked by different processes.
2751 * Moreover, since the metadata cache memory can be reallocated, we
2752 * need to have exclusive access against updates even though we only
2753 * read it.
2754 */
2755 mutex_lock(&stream->metadata_cache->lock);
2756 WARN_ON(stream->metadata_in < stream->metadata_out);
2757 if (stream->metadata_in != stream->metadata_out)
2758 goto end;
2759
2760 /* Metadata regenerated, change the version. */
2761 if (stream->metadata_cache->version != stream->version)
2762 stream->version = stream->metadata_cache->version;
2763
2764 len = stream->metadata_cache->metadata_written -
2765 stream->metadata_in;
2766 if (!len)
2767 goto end;
2768 reserve_len = min_t(size_t,
2769 stream->transport->ops.priv->packet_avail_size(chan),
2770 len);
2771 lib_ring_buffer_ctx_init(&ctx, chan, reserve_len,
2772 sizeof(char), NULL);
2773 /*
2774 * If reservation failed, return an error to the caller.
2775 */
2776 ret = stream->transport->ops.event_reserve(&ctx);
2777 if (ret != 0) {
2778 printk(KERN_WARNING "LTTng: Metadata event reservation failed\n");
2779 stream->coherent = false;
2780 goto end;
2781 }
2782 stream->transport->ops.event_write(&ctx,
2783 stream->metadata_cache->data + stream->metadata_in,
2784 reserve_len, 1);
2785 stream->transport->ops.event_commit(&ctx);
2786 stream->metadata_in += reserve_len;
2787 if (reserve_len < len)
2788 stream->coherent = false;
2789 else
2790 stream->coherent = true;
2791 ret = reserve_len;
2792
2793 end:
2794 if (coherent)
2795 *coherent = stream->coherent;
2796 mutex_unlock(&stream->metadata_cache->lock);
2797 return ret;
2798 }
2799
2800 static
2801 void lttng_metadata_begin(struct lttng_kernel_session *session)
2802 {
2803 if (atomic_inc_return(&session->priv->metadata_cache->producing) == 1)
2804 mutex_lock(&session->priv->metadata_cache->lock);
2805 }
2806
2807 static
2808 void lttng_metadata_end(struct lttng_kernel_session *session)
2809 {
2810 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2811 if (atomic_dec_return(&session->priv->metadata_cache->producing) == 0) {
2812 struct lttng_metadata_stream *stream;
2813
2814 list_for_each_entry(stream, &session->priv->metadata_cache->metadata_stream, list)
2815 wake_up_interruptible(&stream->read_wait);
2816 mutex_unlock(&session->priv->metadata_cache->lock);
2817 }
2818 }
2819
2820 /*
2821 * Write the metadata to the metadata cache.
2822 * Must be called with sessions_mutex held.
2823 * The metadata cache lock protects us from concurrent read access from
2824 * thread outputting metadata content to ring buffer.
2825 * The content of the printf is printed as a single atomic metadata
2826 * transaction.
2827 */
2828 int lttng_metadata_printf(struct lttng_kernel_session *session,
2829 const char *fmt, ...)
2830 {
2831 char *str;
2832 size_t len;
2833 va_list ap;
2834
2835 WARN_ON_ONCE(!LTTNG_READ_ONCE(session->active));
2836
2837 va_start(ap, fmt);
2838 str = kvasprintf(GFP_KERNEL, fmt, ap);
2839 va_end(ap);
2840 if (!str)
2841 return -ENOMEM;
2842
2843 len = strlen(str);
2844 WARN_ON_ONCE(!atomic_read(&session->priv->metadata_cache->producing));
2845 if (session->priv->metadata_cache->metadata_written + len >
2846 session->priv->metadata_cache->cache_alloc) {
2847 char *tmp_cache_realloc;
2848 unsigned int tmp_cache_alloc_size;
2849
2850 tmp_cache_alloc_size = max_t(unsigned int,
2851 session->priv->metadata_cache->cache_alloc + len,
2852 session->priv->metadata_cache->cache_alloc << 1);
2853 tmp_cache_realloc = vzalloc(tmp_cache_alloc_size);
2854 if (!tmp_cache_realloc)
2855 goto err;
2856 if (session->priv->metadata_cache->data) {
2857 memcpy(tmp_cache_realloc,
2858 session->priv->metadata_cache->data,
2859 session->priv->metadata_cache->cache_alloc);
2860 vfree(session->priv->metadata_cache->data);
2861 }
2862
2863 session->priv->metadata_cache->cache_alloc = tmp_cache_alloc_size;
2864 session->priv->metadata_cache->data = tmp_cache_realloc;
2865 }
2866 memcpy(session->priv->metadata_cache->data +
2867 session->priv->metadata_cache->metadata_written,
2868 str, len);
2869 session->priv->metadata_cache->metadata_written += len;
2870 kfree(str);
2871
2872 return 0;
2873
2874 err:
2875 kfree(str);
2876 return -ENOMEM;
2877 }
2878
2879 static
2880 int print_tabs(struct lttng_kernel_session *session, size_t nesting)
2881 {
2882 size_t i;
2883
2884 for (i = 0; i < nesting; i++) {
2885 int ret;
2886
2887 ret = lttng_metadata_printf(session, " ");
2888 if (ret) {
2889 return ret;
2890 }
2891 }
2892 return 0;
2893 }
2894
2895 static
2896 int lttng_field_name_statedump(struct lttng_kernel_session *session,
2897 const struct lttng_kernel_event_field *field,
2898 size_t nesting)
2899 {
2900 return lttng_metadata_printf(session, " _%s;\n", field->name);
2901 }
2902
2903 static
2904 int _lttng_integer_type_statedump(struct lttng_kernel_session *session,
2905 const struct lttng_kernel_type_integer *type,
2906 enum lttng_kernel_string_encoding parent_encoding,
2907 size_t nesting)
2908 {
2909 int ret;
2910
2911 ret = print_tabs(session, nesting);
2912 if (ret)
2913 return ret;
2914 ret = lttng_metadata_printf(session,
2915 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
2916 type->size,
2917 type->alignment,
2918 type->signedness,
2919 (parent_encoding == lttng_kernel_string_encoding_none)
2920 ? "none"
2921 : (parent_encoding == lttng_kernel_string_encoding_UTF8)
2922 ? "UTF8"
2923 : "ASCII",
2924 type->base,
2925 #if __BYTE_ORDER == __BIG_ENDIAN
2926 type->reverse_byte_order ? " byte_order = le;" : ""
2927 #else
2928 type->reverse_byte_order ? " byte_order = be;" : ""
2929 #endif
2930 );
2931 return ret;
2932 }
2933
2934 /*
2935 * Must be called with sessions_mutex held.
2936 */
2937 static
2938 int _lttng_struct_type_statedump(struct lttng_kernel_session *session,
2939 const struct lttng_kernel_type_struct *type,
2940 size_t nesting)
2941 {
2942 const char *prev_field_name = NULL;
2943 int ret;
2944 uint32_t i, nr_fields;
2945 unsigned int alignment;
2946
2947 ret = print_tabs(session, nesting);
2948 if (ret)
2949 return ret;
2950 ret = lttng_metadata_printf(session,
2951 "struct {\n");
2952 if (ret)
2953 return ret;
2954 nr_fields = type->nr_fields;
2955 for (i = 0; i < nr_fields; i++) {
2956 const struct lttng_kernel_event_field *iter_field;
2957
2958 iter_field = type->fields[i];
2959 ret = _lttng_field_statedump(session, iter_field, nesting + 1, &prev_field_name);
2960 if (ret)
2961 return ret;
2962 }
2963 ret = print_tabs(session, nesting);
2964 if (ret)
2965 return ret;
2966 alignment = type->alignment;
2967 if (alignment) {
2968 ret = lttng_metadata_printf(session,
2969 "} align(%u)",
2970 alignment);
2971 } else {
2972 ret = lttng_metadata_printf(session,
2973 "}");
2974 }
2975 return ret;
2976 }
2977
2978 /*
2979 * Must be called with sessions_mutex held.
2980 */
2981 static
2982 int _lttng_struct_field_statedump(struct lttng_kernel_session *session,
2983 const struct lttng_kernel_event_field *field,
2984 size_t nesting)
2985 {
2986 int ret;
2987
2988 ret = _lttng_struct_type_statedump(session,
2989 lttng_kernel_get_type_struct(field->type), nesting);
2990 if (ret)
2991 return ret;
2992 return lttng_field_name_statedump(session, field, nesting);
2993 }
2994
2995 /*
2996 * Must be called with sessions_mutex held.
2997 */
2998 static
2999 int _lttng_variant_type_statedump(struct lttng_kernel_session *session,
3000 const struct lttng_kernel_type_variant *type,
3001 size_t nesting,
3002 const char *prev_field_name)
3003 {
3004 const char *tag_name;
3005 int ret;
3006 uint32_t i, nr_choices;
3007
3008 tag_name = type->tag_name;
3009 if (!tag_name)
3010 tag_name = prev_field_name;
3011 if (!tag_name)
3012 return -EINVAL;
3013 /*
3014 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3015 */
3016 if (type->alignment != 0)
3017 return -EINVAL;
3018 ret = print_tabs(session, nesting);
3019 if (ret)
3020 return ret;
3021 ret = lttng_metadata_printf(session,
3022 "variant <_%s> {\n",
3023 tag_name);
3024 if (ret)
3025 return ret;
3026 nr_choices = type->nr_choices;
3027 for (i = 0; i < nr_choices; i++) {
3028 const struct lttng_kernel_event_field *iter_field;
3029
3030 iter_field = type->choices[i];
3031 ret = _lttng_field_statedump(session, iter_field, nesting + 1, NULL);
3032 if (ret)
3033 return ret;
3034 }
3035 ret = print_tabs(session, nesting);
3036 if (ret)
3037 return ret;
3038 ret = lttng_metadata_printf(session,
3039 "}");
3040 return ret;
3041 }
3042
3043 /*
3044 * Must be called with sessions_mutex held.
3045 */
3046 static
3047 int _lttng_variant_field_statedump(struct lttng_kernel_session *session,
3048 const struct lttng_kernel_event_field *field,
3049 size_t nesting,
3050 const char *prev_field_name)
3051 {
3052 int ret;
3053
3054 ret = _lttng_variant_type_statedump(session,
3055 lttng_kernel_get_type_variant(field->type), nesting,
3056 prev_field_name);
3057 if (ret)
3058 return ret;
3059 return lttng_field_name_statedump(session, field, nesting);
3060 }
3061
3062 /*
3063 * Must be called with sessions_mutex held.
3064 */
3065 static
3066 int _lttng_array_field_statedump(struct lttng_kernel_session *session,
3067 const struct lttng_kernel_event_field *field,
3068 size_t nesting)
3069 {
3070 int ret;
3071 const struct lttng_kernel_type_array *array_type;
3072 const struct lttng_kernel_type_common *elem_type;
3073
3074 array_type = lttng_kernel_get_type_array(field->type);
3075 WARN_ON_ONCE(!array_type);
3076
3077 if (array_type->alignment) {
3078 ret = print_tabs(session, nesting);
3079 if (ret)
3080 return ret;
3081 ret = lttng_metadata_printf(session,
3082 "struct { } align(%u) _%s_padding;\n",
3083 array_type->alignment * CHAR_BIT,
3084 field->name);
3085 if (ret)
3086 return ret;
3087 }
3088 /*
3089 * Nested compound types: Only array of structures and variants are
3090 * currently supported.
3091 */
3092 elem_type = array_type->elem_type;
3093 switch (elem_type->type) {
3094 case lttng_kernel_type_integer:
3095 case lttng_kernel_type_struct:
3096 case lttng_kernel_type_variant:
3097 ret = _lttng_type_statedump(session, elem_type,
3098 array_type->encoding, nesting);
3099 if (ret)
3100 return ret;
3101 break;
3102
3103 default:
3104 return -EINVAL;
3105 }
3106 ret = lttng_metadata_printf(session,
3107 " _%s[%u];\n",
3108 field->name,
3109 array_type->length);
3110 return ret;
3111 }
3112
3113 /*
3114 * Must be called with sessions_mutex held.
3115 */
3116 static
3117 int _lttng_sequence_field_statedump(struct lttng_kernel_session *session,
3118 const struct lttng_kernel_event_field *field,
3119 size_t nesting,
3120 const char *prev_field_name)
3121 {
3122 int ret;
3123 const char *length_name;
3124 const struct lttng_kernel_type_sequence *sequence_type;
3125 const struct lttng_kernel_type_common *elem_type;
3126
3127 sequence_type = lttng_kernel_get_type_sequence(field->type);
3128 WARN_ON_ONCE(!sequence_type);
3129
3130 length_name = sequence_type->length_name;
3131 if (!length_name)
3132 length_name = prev_field_name;
3133 if (!length_name)
3134 return -EINVAL;
3135
3136 if (sequence_type->alignment) {
3137 ret = print_tabs(session, nesting);
3138 if (ret)
3139 return ret;
3140 ret = lttng_metadata_printf(session,
3141 "struct { } align(%u) _%s_padding;\n",
3142 sequence_type->alignment * CHAR_BIT,
3143 field->name);
3144 if (ret)
3145 return ret;
3146 }
3147
3148 /*
3149 * Nested compound types: Only array of structures and variants are
3150 * currently supported.
3151 */
3152 elem_type = sequence_type->elem_type;
3153 switch (elem_type->type) {
3154 case lttng_kernel_type_integer:
3155 case lttng_kernel_type_struct:
3156 case lttng_kernel_type_variant:
3157 ret = _lttng_type_statedump(session, elem_type,
3158 sequence_type->encoding, nesting);
3159 if (ret)
3160 return ret;
3161 break;
3162
3163 default:
3164 return -EINVAL;
3165 }
3166 ret = lttng_metadata_printf(session,
3167 " _%s[ _%s ];\n",
3168 field->name,
3169 length_name);
3170 return ret;
3171 }
3172
3173 /*
3174 * Must be called with sessions_mutex held.
3175 */
3176 static
3177 int _lttng_enum_type_statedump(struct lttng_kernel_session *session,
3178 const struct lttng_kernel_type_enum *type,
3179 size_t nesting)
3180 {
3181 const struct lttng_kernel_enum_desc *enum_desc;
3182 const struct lttng_kernel_type_common *container_type;
3183 int ret;
3184 unsigned int i, nr_entries;
3185
3186 container_type = type->container_type;
3187 if (container_type->type != lttng_kernel_type_integer) {
3188 ret = -EINVAL;
3189 goto end;
3190 }
3191 enum_desc = type->desc;
3192 nr_entries = enum_desc->nr_entries;
3193
3194 ret = print_tabs(session, nesting);
3195 if (ret)
3196 goto end;
3197 ret = lttng_metadata_printf(session, "enum : ");
3198 if (ret)
3199 goto end;
3200 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(container_type),
3201 lttng_kernel_string_encoding_none, 0);
3202 if (ret)
3203 goto end;
3204 ret = lttng_metadata_printf(session, " {\n");
3205 if (ret)
3206 goto end;
3207 /* Dump all entries */
3208 for (i = 0; i < nr_entries; i++) {
3209 const struct lttng_kernel_enum_entry *entry = enum_desc->entries[i];
3210 int j, len;
3211
3212 ret = print_tabs(session, nesting + 1);
3213 if (ret)
3214 goto end;
3215 ret = lttng_metadata_printf(session,
3216 "\"");
3217 if (ret)
3218 goto end;
3219 len = strlen(entry->string);
3220 /* Escape the character '"' */
3221 for (j = 0; j < len; j++) {
3222 char c = entry->string[j];
3223
3224 switch (c) {
3225 case '"':
3226 ret = lttng_metadata_printf(session,
3227 "\\\"");
3228 break;
3229 case '\\':
3230 ret = lttng_metadata_printf(session,
3231 "\\\\");
3232 break;
3233 default:
3234 ret = lttng_metadata_printf(session,
3235 "%c", c);
3236 break;
3237 }
3238 if (ret)
3239 goto end;
3240 }
3241 ret = lttng_metadata_printf(session, "\"");
3242 if (ret)
3243 goto end;
3244
3245 if (entry->options.is_auto) {
3246 ret = lttng_metadata_printf(session, ",\n");
3247 if (ret)
3248 goto end;
3249 } else {
3250 ret = lttng_metadata_printf(session,
3251 " = ");
3252 if (ret)
3253 goto end;
3254 if (entry->start.signedness)
3255 ret = lttng_metadata_printf(session,
3256 "%lld", (long long) entry->start.value);
3257 else
3258 ret = lttng_metadata_printf(session,
3259 "%llu", entry->start.value);
3260 if (ret)
3261 goto end;
3262 if (entry->start.signedness == entry->end.signedness &&
3263 entry->start.value
3264 == entry->end.value) {
3265 ret = lttng_metadata_printf(session,
3266 ",\n");
3267 } else {
3268 if (entry->end.signedness) {
3269 ret = lttng_metadata_printf(session,
3270 " ... %lld,\n",
3271 (long long) entry->end.value);
3272 } else {
3273 ret = lttng_metadata_printf(session,
3274 " ... %llu,\n",
3275 entry->end.value);
3276 }
3277 }
3278 if (ret)
3279 goto end;
3280 }
3281 }
3282 ret = print_tabs(session, nesting);
3283 if (ret)
3284 goto end;
3285 ret = lttng_metadata_printf(session, "}");
3286 end:
3287 return ret;
3288 }
3289
3290 /*
3291 * Must be called with sessions_mutex held.
3292 */
3293 static
3294 int _lttng_enum_field_statedump(struct lttng_kernel_session *session,
3295 const struct lttng_kernel_event_field *field,
3296 size_t nesting)
3297 {
3298 int ret;
3299 const struct lttng_kernel_type_enum *enum_type;
3300
3301 enum_type = lttng_kernel_get_type_enum(field->type);
3302 WARN_ON_ONCE(!enum_type);
3303 ret = _lttng_enum_type_statedump(session, enum_type, nesting);
3304 if (ret)
3305 return ret;
3306 return lttng_field_name_statedump(session, field, nesting);
3307 }
3308
3309 static
3310 int _lttng_integer_field_statedump(struct lttng_kernel_session *session,
3311 const struct lttng_kernel_event_field *field,
3312 size_t nesting)
3313 {
3314 int ret;
3315
3316 ret = _lttng_integer_type_statedump(session, lttng_kernel_get_type_integer(field->type),
3317 lttng_kernel_string_encoding_none, nesting);
3318 if (ret)
3319 return ret;
3320 return lttng_field_name_statedump(session, field, nesting);
3321 }
3322
3323 static
3324 int _lttng_string_type_statedump(struct lttng_kernel_session *session,
3325 const struct lttng_kernel_type_string *type,
3326 size_t nesting)
3327 {
3328 int ret;
3329
3330 /* Default encoding is UTF8 */
3331 ret = print_tabs(session, nesting);
3332 if (ret)
3333 return ret;
3334 ret = lttng_metadata_printf(session,
3335 "string%s",
3336 type->encoding == lttng_kernel_string_encoding_ASCII ?
3337 " { encoding = ASCII; }" : "");
3338 return ret;
3339 }
3340
3341 static
3342 int _lttng_string_field_statedump(struct lttng_kernel_session *session,
3343 const struct lttng_kernel_event_field *field,
3344 size_t nesting)
3345 {
3346 const struct lttng_kernel_type_string *string_type;
3347 int ret;
3348
3349 string_type = lttng_kernel_get_type_string(field->type);
3350 WARN_ON_ONCE(!string_type);
3351 ret = _lttng_string_type_statedump(session, string_type, nesting);
3352 if (ret)
3353 return ret;
3354 return lttng_field_name_statedump(session, field, nesting);
3355 }
3356
3357 /*
3358 * Must be called with sessions_mutex held.
3359 */
3360 static
3361 int _lttng_type_statedump(struct lttng_kernel_session *session,
3362 const struct lttng_kernel_type_common *type,
3363 enum lttng_kernel_string_encoding parent_encoding,
3364 size_t nesting)
3365 {
3366 int ret = 0;
3367
3368 switch (type->type) {
3369 case lttng_kernel_type_integer:
3370 ret = _lttng_integer_type_statedump(session,
3371 lttng_kernel_get_type_integer(type),
3372 parent_encoding, nesting);
3373 break;
3374 case lttng_kernel_type_enum:
3375 ret = _lttng_enum_type_statedump(session,
3376 lttng_kernel_get_type_enum(type),
3377 nesting);
3378 break;
3379 case lttng_kernel_type_string:
3380 ret = _lttng_string_type_statedump(session,
3381 lttng_kernel_get_type_string(type),
3382 nesting);
3383 break;
3384 case lttng_kernel_type_struct:
3385 ret = _lttng_struct_type_statedump(session,
3386 lttng_kernel_get_type_struct(type),
3387 nesting);
3388 break;
3389 case lttng_kernel_type_variant:
3390 ret = _lttng_variant_type_statedump(session,
3391 lttng_kernel_get_type_variant(type),
3392 nesting, NULL);
3393 break;
3394
3395 /* Nested arrays and sequences are not supported yet. */
3396 case lttng_kernel_type_array:
3397 case lttng_kernel_type_sequence:
3398 default:
3399 WARN_ON_ONCE(1);
3400 return -EINVAL;
3401 }
3402 return ret;
3403 }
3404
3405 /*
3406 * Must be called with sessions_mutex held.
3407 */
3408 static
3409 int _lttng_field_statedump(struct lttng_kernel_session *session,
3410 const struct lttng_kernel_event_field *field,
3411 size_t nesting,
3412 const char **prev_field_name_p)
3413 {
3414 const char *prev_field_name = NULL;
3415 int ret = 0;
3416
3417 if (prev_field_name_p)
3418 prev_field_name = *prev_field_name_p;
3419 switch (field->type->type) {
3420 case lttng_kernel_type_integer:
3421 ret = _lttng_integer_field_statedump(session, field, nesting);
3422 break;
3423 case lttng_kernel_type_enum:
3424 ret = _lttng_enum_field_statedump(session, field, nesting);
3425 break;
3426 case lttng_kernel_type_string:
3427 ret = _lttng_string_field_statedump(session, field, nesting);
3428 break;
3429 case lttng_kernel_type_struct:
3430 ret = _lttng_struct_field_statedump(session, field, nesting);
3431 break;
3432 case lttng_kernel_type_array:
3433 ret = _lttng_array_field_statedump(session, field, nesting);
3434 break;
3435 case lttng_kernel_type_sequence:
3436 ret = _lttng_sequence_field_statedump(session, field, nesting, prev_field_name);
3437 break;
3438 case lttng_kernel_type_variant:
3439 ret = _lttng_variant_field_statedump(session, field, nesting, prev_field_name);
3440 break;
3441
3442 default:
3443 WARN_ON_ONCE(1);
3444 return -EINVAL;
3445 }
3446 if (prev_field_name_p)
3447 *prev_field_name_p = field->name;
3448 return ret;
3449 }
3450
3451 static
3452 int _lttng_context_metadata_statedump(struct lttng_kernel_session *session,
3453 struct lttng_kernel_ctx *ctx)
3454 {
3455 const char *prev_field_name = NULL;
3456 int ret = 0;
3457 int i;
3458
3459 if (!ctx)
3460 return 0;
3461 for (i = 0; i < ctx->nr_fields; i++) {
3462 const struct lttng_kernel_ctx_field *field = &ctx->fields[i];
3463
3464 ret = _lttng_field_statedump(session, field->event_field, 2, &prev_field_name);
3465 if (ret)
3466 return ret;
3467 }
3468 return ret;
3469 }
3470
3471 static
3472 int _lttng_fields_metadata_statedump(struct lttng_kernel_session *session,
3473 struct lttng_kernel_event_recorder *event_recorder)
3474 {
3475 const char *prev_field_name = NULL;
3476 const struct lttng_kernel_event_desc *desc = event_recorder->priv->parent.desc;
3477 int ret = 0;
3478 int i;
3479
3480 for (i = 0; i < desc->tp_class->nr_fields; i++) {
3481 const struct lttng_kernel_event_field *field = desc->tp_class->fields[i];
3482
3483 ret = _lttng_field_statedump(session, field, 2, &prev_field_name);
3484 if (ret)
3485 return ret;
3486 }
3487 return ret;
3488 }
3489
3490 /*
3491 * Must be called with sessions_mutex held.
3492 * The entire event metadata is printed as a single atomic metadata
3493 * transaction.
3494 */
3495 static
3496 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common *event)
3497 {
3498 struct lttng_kernel_event_recorder *event_recorder;
3499 struct lttng_kernel_channel_buffer *chan;
3500 struct lttng_kernel_session *session;
3501 int ret = 0;
3502
3503 if (event->type != LTTNG_KERNEL_EVENT_TYPE_RECORDER)
3504 return 0;
3505 event_recorder = container_of(event, struct lttng_kernel_event_recorder, parent);
3506 chan = event_recorder->chan;
3507 session = chan->parent.session;
3508
3509 if (event_recorder->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3510 return 0;
3511 if (chan->priv->channel_type == METADATA_CHANNEL)
3512 return 0;
3513
3514 lttng_metadata_begin(session);
3515
3516 ret = lttng_metadata_printf(session,
3517 "event {\n"
3518 " name = \"%s\";\n"
3519 " id = %u;\n"
3520 " stream_id = %u;\n",
3521 event_recorder->priv->parent.desc->event_name,
3522 event_recorder->priv->id,
3523 event_recorder->chan->priv->id);
3524 if (ret)
3525 goto end;
3526
3527 ret = lttng_metadata_printf(session,
3528 " fields := struct {\n"
3529 );
3530 if (ret)
3531 goto end;
3532
3533 ret = _lttng_fields_metadata_statedump(session, event_recorder);
3534 if (ret)
3535 goto end;
3536
3537 /*
3538 * LTTng space reservation can only reserve multiples of the
3539 * byte size.
3540 */
3541 ret = lttng_metadata_printf(session,
3542 " };\n"
3543 "};\n\n");
3544 if (ret)
3545 goto end;
3546
3547 event_recorder->priv->metadata_dumped = 1;
3548 end:
3549 lttng_metadata_end(session);
3550 return ret;
3551
3552 }
3553
3554 /*
3555 * Must be called with sessions_mutex held.
3556 * The entire channel metadata is printed as a single atomic metadata
3557 * transaction.
3558 */
3559 static
3560 int _lttng_channel_metadata_statedump(struct lttng_kernel_session *session,
3561 struct lttng_kernel_channel_buffer *chan)
3562 {
3563 int ret = 0;
3564
3565 if (chan->priv->metadata_dumped || !LTTNG_READ_ONCE(session->active))
3566 return 0;
3567
3568 if (chan->priv->channel_type == METADATA_CHANNEL)
3569 return 0;
3570
3571 lttng_metadata_begin(session);
3572
3573 WARN_ON_ONCE(!chan->priv->header_type);
3574 ret = lttng_metadata_printf(session,
3575 "stream {\n"
3576 " id = %u;\n"
3577 " event.header := %s;\n"
3578 " packet.context := struct packet_context;\n",
3579 chan->priv->id,
3580 chan->priv->header_type == 1 ? "struct event_header_compact" :
3581 "struct event_header_large");
3582 if (ret)
3583 goto end;
3584
3585 if (chan->priv->ctx) {
3586 ret = lttng_metadata_printf(session,
3587 " event.context := struct {\n");
3588 if (ret)
3589 goto end;
3590 }
3591 ret = _lttng_context_metadata_statedump(session, chan->priv->ctx);
3592 if (ret)
3593 goto end;
3594 if (chan->priv->ctx) {
3595 ret = lttng_metadata_printf(session,
3596 " };\n");
3597 if (ret)
3598 goto end;
3599 }
3600
3601 ret = lttng_metadata_printf(session,
3602 "};\n\n");
3603
3604 chan->priv->metadata_dumped = 1;
3605 end:
3606 lttng_metadata_end(session);
3607 return ret;
3608 }
3609
3610 /*
3611 * Must be called with sessions_mutex held.
3612 */
3613 static
3614 int _lttng_stream_packet_context_declare(struct lttng_kernel_session *session)
3615 {
3616 return lttng_metadata_printf(session,
3617 "struct packet_context {\n"
3618 " uint64_clock_monotonic_t timestamp_begin;\n"
3619 " uint64_clock_monotonic_t timestamp_end;\n"
3620 " uint64_t content_size;\n"
3621 " uint64_t packet_size;\n"
3622 " uint64_t packet_seq_num;\n"
3623 " unsigned long events_discarded;\n"
3624 " uint32_t cpu_id;\n"
3625 "};\n\n"
3626 );
3627 }
3628
3629 /*
3630 * Compact header:
3631 * id: range: 0 - 30.
3632 * id 31 is reserved to indicate an extended header.
3633 *
3634 * Large header:
3635 * id: range: 0 - 65534.
3636 * id 65535 is reserved to indicate an extended header.
3637 *
3638 * Must be called with sessions_mutex held.
3639 */
3640 static
3641 int _lttng_event_header_declare(struct lttng_kernel_session *session)
3642 {
3643 return lttng_metadata_printf(session,
3644 "struct event_header_compact {\n"
3645 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
3646 " variant <id> {\n"
3647 " struct {\n"
3648 " uint27_clock_monotonic_t timestamp;\n"
3649 " } compact;\n"
3650 " struct {\n"
3651 " uint32_t id;\n"
3652 " uint64_clock_monotonic_t timestamp;\n"
3653 " } extended;\n"
3654 " } v;\n"
3655 "} align(%u);\n"
3656 "\n"
3657 "struct event_header_large {\n"
3658 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
3659 " variant <id> {\n"
3660 " struct {\n"
3661 " uint32_clock_monotonic_t timestamp;\n"
3662 " } compact;\n"
3663 " struct {\n"
3664 " uint32_t id;\n"
3665 " uint64_clock_monotonic_t timestamp;\n"
3666 " } extended;\n"
3667 " } v;\n"
3668 "} align(%u);\n\n",
3669 lttng_alignof(uint32_t) * CHAR_BIT,
3670 lttng_alignof(uint16_t) * CHAR_BIT
3671 );
3672 }
3673
3674 /*
3675 * Approximation of NTP time of day to clock monotonic correlation,
3676 * taken at start of trace.
3677 * Yes, this is only an approximation. Yes, we can (and will) do better
3678 * in future versions.
3679 * This function may return a negative offset. It may happen if the
3680 * system sets the REALTIME clock to 0 after boot.
3681 *
3682 * Use 64bit timespec on kernels that have it, this makes 32bit arch
3683 * y2038 compliant.
3684 */
3685 static
3686 int64_t measure_clock_offset(void)
3687 {
3688 uint64_t monotonic_avg, monotonic[2], realtime;
3689 uint64_t tcf = trace_clock_freq();
3690 int64_t offset;
3691 unsigned long flags;
3692 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3693 struct timespec64 rts = { 0, 0 };
3694 #else
3695 struct timespec rts = { 0, 0 };
3696 #endif
3697
3698 /* Disable interrupts to increase correlation precision. */
3699 local_irq_save(flags);
3700 monotonic[0] = trace_clock_read64();
3701 #ifdef LTTNG_KERNEL_HAS_TIMESPEC64
3702 ktime_get_real_ts64(&rts);
3703 #else
3704 getnstimeofday(&rts);
3705 #endif
3706 monotonic[1] = trace_clock_read64();
3707 local_irq_restore(flags);
3708
3709 monotonic_avg = (monotonic[0] + monotonic[1]) >> 1;
3710 realtime = (uint64_t) rts.tv_sec * tcf;
3711 if (tcf == NSEC_PER_SEC) {
3712 realtime += rts.tv_nsec;
3713 } else {
3714 uint64_t n = rts.tv_nsec * tcf;
3715
3716 do_div(n, NSEC_PER_SEC);
3717 realtime += n;
3718 }
3719 offset = (int64_t) realtime - monotonic_avg;
3720 return offset;
3721 }
3722
3723 static
3724 int print_escaped_ctf_string(struct lttng_kernel_session *session, const char *string)
3725 {
3726 int ret = 0;
3727 size_t i;
3728 char cur;
3729
3730 i = 0;
3731 cur = string[i];
3732 while (cur != '\0') {
3733 switch (cur) {
3734 case '\n':
3735 ret = lttng_metadata_printf(session, "%s", "\\n");
3736 break;
3737 case '\\':
3738 case '"':
3739 ret = lttng_metadata_printf(session, "%c", '\\');
3740 if (ret)
3741 goto error;
3742 /* We still print the current char */
3743 lttng_fallthrough;
3744 default:
3745 ret = lttng_metadata_printf(session, "%c", cur);
3746 break;
3747 }
3748
3749 if (ret)
3750 goto error;
3751
3752 cur = string[++i];
3753 }
3754 error:
3755 return ret;
3756 }
3757
3758 static
3759 int print_metadata_escaped_field(struct lttng_kernel_session *session, const char *field,
3760 const char *field_value)
3761 {
3762 int ret;
3763
3764 ret = lttng_metadata_printf(session, " %s = \"", field);
3765 if (ret)
3766 goto error;
3767
3768 ret = print_escaped_ctf_string(session, field_value);
3769 if (ret)
3770 goto error;
3771
3772 ret = lttng_metadata_printf(session, "\";\n");
3773
3774 error:
3775 return ret;
3776 }
3777
3778 /*
3779 * Output metadata into this session's metadata buffers.
3780 * Must be called with sessions_mutex held.
3781 */
3782 static
3783 int _lttng_session_metadata_statedump(struct lttng_kernel_session *session)
3784 {
3785 unsigned char *uuid_c = session->priv->uuid.b;
3786 unsigned char uuid_s[37], clock_uuid_s[BOOT_ID_LEN];
3787 const char *product_uuid;
3788 struct lttng_kernel_channel_buffer_private *chan_priv;
3789 struct lttng_kernel_event_recorder_private *event_recorder_priv;
3790 int ret = 0;
3791
3792 if (!LTTNG_READ_ONCE(session->active))
3793 return 0;
3794
3795 lttng_metadata_begin(session);
3796
3797 if (session->priv->metadata_dumped)
3798 goto skip_session;
3799
3800 snprintf(uuid_s, sizeof(uuid_s),
3801 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
3802 uuid_c[0], uuid_c[1], uuid_c[2], uuid_c[3],
3803 uuid_c[4], uuid_c[5], uuid_c[6], uuid_c[7],
3804 uuid_c[8], uuid_c[9], uuid_c[10], uuid_c[11],
3805 uuid_c[12], uuid_c[13], uuid_c[14], uuid_c[15]);
3806
3807 ret = lttng_metadata_printf(session,
3808 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
3809 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
3810 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
3811 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
3812 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
3813 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
3814 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
3815 "\n"
3816 "trace {\n"
3817 " major = %u;\n"
3818 " minor = %u;\n"
3819 " uuid = \"%s\";\n"
3820 " byte_order = %s;\n"
3821 " packet.header := struct {\n"
3822 " uint32_t magic;\n"
3823 " uint8_t uuid[16];\n"
3824 " uint32_t stream_id;\n"
3825 " uint64_t stream_instance_id;\n"
3826 " };\n"
3827 "};\n\n",
3828 lttng_alignof(uint8_t) * CHAR_BIT,
3829 lttng_alignof(uint16_t) * CHAR_BIT,
3830 lttng_alignof(uint32_t) * CHAR_BIT,
3831 lttng_alignof(uint64_t) * CHAR_BIT,
3832 sizeof(unsigned long) * CHAR_BIT,
3833 lttng_alignof(unsigned long) * CHAR_BIT,
3834 CTF_SPEC_MAJOR,
3835 CTF_SPEC_MINOR,
3836 uuid_s,
3837 #if __BYTE_ORDER == __BIG_ENDIAN
3838 "be"
3839 #else
3840 "le"
3841 #endif
3842 );
3843 if (ret)
3844 goto end;
3845
3846 ret = lttng_metadata_printf(session,
3847 "env {\n"
3848 " hostname = \"%s\";\n"
3849 " domain = \"kernel\";\n"
3850 " sysname = \"%s\";\n"
3851 " kernel_release = \"%s\";\n"
3852 " kernel_version = \"%s\";\n"
3853 " tracer_name = \"lttng-modules\";\n"
3854 " tracer_major = %d;\n"
3855 " tracer_minor = %d;\n"
3856 " tracer_patchlevel = %d;\n"
3857 " trace_buffering_scheme = \"global\";\n",
3858 current->nsproxy->uts_ns->name.nodename,
3859 utsname()->sysname,
3860 utsname()->release,
3861 utsname()->version,
3862 LTTNG_MODULES_MAJOR_VERSION,
3863 LTTNG_MODULES_MINOR_VERSION,
3864 LTTNG_MODULES_PATCHLEVEL_VERSION
3865 );
3866 if (ret)
3867 goto end;
3868
3869 ret = print_metadata_escaped_field(session, "trace_name", session->priv->name);
3870 if (ret)
3871 goto end;
3872 ret = print_metadata_escaped_field(session, "trace_creation_datetime",
3873 session->priv->creation_time);
3874 if (ret)
3875 goto end;
3876
3877 /* Add the product UUID to the 'env' section */
3878 product_uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
3879 if (product_uuid) {
3880 ret = lttng_metadata_printf(session,
3881 " product_uuid = \"%s\";\n",
3882 product_uuid
3883 );
3884 if (ret)
3885 goto end;
3886 }
3887
3888 /* Close the 'env' section */
3889 ret = lttng_metadata_printf(session, "};\n\n");
3890 if (ret)
3891 goto end;
3892
3893 ret = lttng_metadata_printf(session,
3894 "clock {\n"
3895 " name = \"%s\";\n",
3896 trace_clock_name()
3897 );
3898 if (ret)
3899 goto end;
3900
3901 if (!trace_clock_uuid(clock_uuid_s)) {
3902 ret = lttng_metadata_printf(session,
3903 " uuid = \"%s\";\n",
3904 clock_uuid_s
3905 );
3906 if (ret)
3907 goto end;
3908 }
3909
3910 ret = lttng_metadata_printf(session,
3911 " description = \"%s\";\n"
3912 " freq = %llu; /* Frequency, in Hz */\n"
3913 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
3914 " offset = %lld;\n"
3915 "};\n\n",
3916 trace_clock_description(),
3917 (unsigned long long) trace_clock_freq(),
3918 (long long) measure_clock_offset()
3919 );
3920 if (ret)
3921 goto end;
3922
3923 ret = lttng_metadata_printf(session,
3924 "typealias integer {\n"
3925 " size = 27; align = 1; signed = false;\n"
3926 " map = clock.%s.value;\n"
3927 "} := uint27_clock_monotonic_t;\n"
3928 "\n"
3929 "typealias integer {\n"
3930 " size = 32; align = %u; signed = false;\n"
3931 " map = clock.%s.value;\n"
3932 "} := uint32_clock_monotonic_t;\n"
3933 "\n"
3934 "typealias integer {\n"
3935 " size = 64; align = %u; signed = false;\n"
3936 " map = clock.%s.value;\n"
3937 "} := uint64_clock_monotonic_t;\n\n",
3938 trace_clock_name(),
3939 lttng_alignof(uint32_t) * CHAR_BIT,
3940 trace_clock_name(),
3941 lttng_alignof(uint64_t) * CHAR_BIT,
3942 trace_clock_name()
3943 );
3944 if (ret)
3945 goto end;
3946
3947 ret = _lttng_stream_packet_context_declare(session);
3948 if (ret)
3949 goto end;
3950
3951 ret = _lttng_event_header_declare(session);
3952 if (ret)
3953 goto end;
3954
3955 skip_session:
3956 list_for_each_entry(chan_priv, &session->priv->chan, node) {
3957 ret = _lttng_channel_metadata_statedump(session, chan_priv->pub);
3958 if (ret)
3959 goto end;
3960 }
3961
3962 list_for_each_entry(event_recorder_priv, &session->priv->events, parent.node) {
3963 ret = _lttng_event_recorder_metadata_statedump(&event_recorder_priv->pub->parent);
3964 if (ret)
3965 goto end;
3966 }
3967 session->priv->metadata_dumped = 1;
3968 end:
3969 lttng_metadata_end(session);
3970 return ret;
3971 }
3972
3973 /**
3974 * lttng_transport_register - LTT transport registration
3975 * @transport: transport structure
3976 *
3977 * Registers a transport which can be used as output to extract the data out of
3978 * LTTng. The module calling this registration function must ensure that no
3979 * trap-inducing code will be executed by the transport functions. E.g.
3980 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
3981 * is made visible to the transport function. This registration acts as a
3982 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
3983 * after its registration must it synchronize the TLBs.
3984 */
3985 void lttng_transport_register(struct lttng_transport *transport)
3986 {
3987 /*
3988 * Make sure no page fault can be triggered by the module about to be
3989 * registered. We deal with this here so we don't have to call
3990 * vmalloc_sync_mappings() in each module's init.
3991 */
3992 wrapper_vmalloc_sync_mappings();
3993
3994 mutex_lock(&sessions_mutex);
3995 list_add_tail(&transport->node, &lttng_transport_list);
3996 mutex_unlock(&sessions_mutex);
3997 }
3998 EXPORT_SYMBOL_GPL(lttng_transport_register);
3999
4000 /**
4001 * lttng_transport_unregister - LTT transport unregistration
4002 * @transport: transport structure
4003 */
4004 void lttng_transport_unregister(struct lttng_transport *transport)
4005 {
4006 mutex_lock(&sessions_mutex);
4007 list_del(&transport->node);
4008 mutex_unlock(&sessions_mutex);
4009 }
4010 EXPORT_SYMBOL_GPL(lttng_transport_unregister);
4011
4012 void lttng_counter_transport_register(struct lttng_counter_transport *transport)
4013 {
4014 /*
4015 * Make sure no page fault can be triggered by the module about to be
4016 * registered. We deal with this here so we don't have to call
4017 * vmalloc_sync_mappings() in each module's init.
4018 */
4019 wrapper_vmalloc_sync_mappings();
4020
4021 mutex_lock(&sessions_mutex);
4022 list_add_tail(&transport->node, &lttng_counter_transport_list);
4023 mutex_unlock(&sessions_mutex);
4024 }
4025 EXPORT_SYMBOL_GPL(lttng_counter_transport_register);
4026
4027 void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
4028 {
4029 mutex_lock(&sessions_mutex);
4030 list_del(&transport->node);
4031 mutex_unlock(&sessions_mutex);
4032 }
4033 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister);
4034
4035 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4036
4037 enum cpuhp_state lttng_hp_prepare;
4038 enum cpuhp_state lttng_hp_online;
4039
4040 static int lttng_hotplug_prepare(unsigned int cpu, struct hlist_node *node)
4041 {
4042 struct lttng_cpuhp_node *lttng_node;
4043
4044 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4045 switch (lttng_node->component) {
4046 case LTTNG_RING_BUFFER_FRONTEND:
4047 return 0;
4048 case LTTNG_RING_BUFFER_BACKEND:
4049 return lttng_cpuhp_rb_backend_prepare(cpu, lttng_node);
4050 case LTTNG_RING_BUFFER_ITER:
4051 return 0;
4052 case LTTNG_CONTEXT_PERF_COUNTERS:
4053 return 0;
4054 default:
4055 return -EINVAL;
4056 }
4057 }
4058
4059 static int lttng_hotplug_dead(unsigned int cpu, struct hlist_node *node)
4060 {
4061 struct lttng_cpuhp_node *lttng_node;
4062
4063 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4064 switch (lttng_node->component) {
4065 case LTTNG_RING_BUFFER_FRONTEND:
4066 return lttng_cpuhp_rb_frontend_dead(cpu, lttng_node);
4067 case LTTNG_RING_BUFFER_BACKEND:
4068 return 0;
4069 case LTTNG_RING_BUFFER_ITER:
4070 return 0;
4071 case LTTNG_CONTEXT_PERF_COUNTERS:
4072 return lttng_cpuhp_perf_counter_dead(cpu, lttng_node);
4073 default:
4074 return -EINVAL;
4075 }
4076 }
4077
4078 static int lttng_hotplug_online(unsigned int cpu, struct hlist_node *node)
4079 {
4080 struct lttng_cpuhp_node *lttng_node;
4081
4082 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4083 switch (lttng_node->component) {
4084 case LTTNG_RING_BUFFER_FRONTEND:
4085 return lttng_cpuhp_rb_frontend_online(cpu, lttng_node);
4086 case LTTNG_RING_BUFFER_BACKEND:
4087 return 0;
4088 case LTTNG_RING_BUFFER_ITER:
4089 return lttng_cpuhp_rb_iter_online(cpu, lttng_node);
4090 case LTTNG_CONTEXT_PERF_COUNTERS:
4091 return lttng_cpuhp_perf_counter_online(cpu, lttng_node);
4092 default:
4093 return -EINVAL;
4094 }
4095 }
4096
4097 static int lttng_hotplug_offline(unsigned int cpu, struct hlist_node *node)
4098 {
4099 struct lttng_cpuhp_node *lttng_node;
4100
4101 lttng_node = container_of(node, struct lttng_cpuhp_node, node);
4102 switch (lttng_node->component) {
4103 case LTTNG_RING_BUFFER_FRONTEND:
4104 return lttng_cpuhp_rb_frontend_offline(cpu, lttng_node);
4105 case LTTNG_RING_BUFFER_BACKEND:
4106 return 0;
4107 case LTTNG_RING_BUFFER_ITER:
4108 return 0;
4109 case LTTNG_CONTEXT_PERF_COUNTERS:
4110 return 0;
4111 default:
4112 return -EINVAL;
4113 }
4114 }
4115
4116 static int __init lttng_init_cpu_hotplug(void)
4117 {
4118 int ret;
4119
4120 ret = cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN, "lttng:prepare",
4121 lttng_hotplug_prepare,
4122 lttng_hotplug_dead);
4123 if (ret < 0) {
4124 return ret;
4125 }
4126 lttng_hp_prepare = ret;
4127 lttng_rb_set_hp_prepare(ret);
4128
4129 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "lttng:online",
4130 lttng_hotplug_online,
4131 lttng_hotplug_offline);
4132 if (ret < 0) {
4133 cpuhp_remove_multi_state(lttng_hp_prepare);
4134 lttng_hp_prepare = 0;
4135 return ret;
4136 }
4137 lttng_hp_online = ret;
4138 lttng_rb_set_hp_online(ret);
4139
4140 return 0;
4141 }
4142
4143 static void __exit lttng_exit_cpu_hotplug(void)
4144 {
4145 lttng_rb_set_hp_online(0);
4146 cpuhp_remove_multi_state(lttng_hp_online);
4147 lttng_rb_set_hp_prepare(0);
4148 cpuhp_remove_multi_state(lttng_hp_prepare);
4149 }
4150
4151 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4152 static int lttng_init_cpu_hotplug(void)
4153 {
4154 return 0;
4155 }
4156 static void lttng_exit_cpu_hotplug(void)
4157 {
4158 }
4159 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4160
4161 static int __init lttng_events_init(void)
4162 {
4163 int ret;
4164
4165 ret = wrapper_lttng_fixup_sig(THIS_MODULE);
4166 if (ret)
4167 return ret;
4168 ret = wrapper_get_pfnblock_flags_mask_init();
4169 if (ret)
4170 return ret;
4171 ret = wrapper_get_pageblock_flags_mask_init();
4172 if (ret)
4173 return ret;
4174 ret = lttng_probes_init();
4175 if (ret)
4176 return ret;
4177 ret = lttng_context_init();
4178 if (ret)
4179 return ret;
4180 ret = lttng_tracepoint_init();
4181 if (ret)
4182 goto error_tp;
4183 event_recorder_cache = KMEM_CACHE(lttng_kernel_event_recorder, 0);
4184 if (!event_recorder_cache) {
4185 ret = -ENOMEM;
4186 goto error_kmem_event_recorder;
4187 }
4188 event_recorder_private_cache = KMEM_CACHE(lttng_kernel_event_recorder_private, 0);
4189 if (!event_recorder_private_cache) {
4190 ret = -ENOMEM;
4191 goto error_kmem_event_recorder_private;
4192 }
4193 event_notifier_cache = KMEM_CACHE(lttng_kernel_event_notifier, 0);
4194 if (!event_notifier_cache) {
4195 ret = -ENOMEM;
4196 goto error_kmem_event_notifier;
4197 }
4198 event_notifier_private_cache = KMEM_CACHE(lttng_kernel_event_notifier_private, 0);
4199 if (!event_notifier_private_cache) {
4200 ret = -ENOMEM;
4201 goto error_kmem_event_notifier_private;
4202 }
4203 ret = lttng_abi_init();
4204 if (ret)
4205 goto error_abi;
4206 ret = lttng_logger_init();
4207 if (ret)
4208 goto error_logger;
4209 ret = lttng_init_cpu_hotplug();
4210 if (ret)
4211 goto error_hotplug;
4212 printk(KERN_NOTICE "LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4213 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4214 __stringify(LTTNG_MODULES_MINOR_VERSION),
4215 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4216 LTTNG_MODULES_EXTRAVERSION,
4217 LTTNG_VERSION_NAME,
4218 #ifdef LTTNG_EXTRA_VERSION_GIT
4219 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4220 #else
4221 "",
4222 #endif
4223 #ifdef LTTNG_EXTRA_VERSION_NAME
4224 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4225 #else
4226 "");
4227 #endif
4228 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4229 printk(KERN_NOTICE "LTTng: Experimental bitwise enum enabled.\n");
4230 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4231 return 0;
4232
4233 error_hotplug:
4234 lttng_logger_exit();
4235 error_logger:
4236 lttng_abi_exit();
4237 error_abi:
4238 kmem_cache_destroy(event_notifier_private_cache);
4239 error_kmem_event_notifier_private:
4240 kmem_cache_destroy(event_notifier_cache);
4241 error_kmem_event_notifier:
4242 kmem_cache_destroy(event_recorder_private_cache);
4243 error_kmem_event_recorder_private:
4244 kmem_cache_destroy(event_recorder_cache);
4245 error_kmem_event_recorder:
4246 lttng_tracepoint_exit();
4247 error_tp:
4248 lttng_context_exit();
4249 printk(KERN_NOTICE "LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4250 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4251 __stringify(LTTNG_MODULES_MINOR_VERSION),
4252 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4253 LTTNG_MODULES_EXTRAVERSION,
4254 LTTNG_VERSION_NAME,
4255 #ifdef LTTNG_EXTRA_VERSION_GIT
4256 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4257 #else
4258 "",
4259 #endif
4260 #ifdef LTTNG_EXTRA_VERSION_NAME
4261 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4262 #else
4263 "");
4264 #endif
4265 return ret;
4266 }
4267
4268 module_init(lttng_events_init);
4269
4270 static void __exit lttng_events_exit(void)
4271 {
4272 struct lttng_kernel_session_private *session_priv, *tmpsession_priv;
4273
4274 lttng_exit_cpu_hotplug();
4275 lttng_logger_exit();
4276 lttng_abi_exit();
4277 list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, list)
4278 lttng_session_destroy(session_priv->pub);
4279 kmem_cache_destroy(event_recorder_cache);
4280 kmem_cache_destroy(event_recorder_private_cache);
4281 kmem_cache_destroy(event_notifier_cache);
4282 kmem_cache_destroy(event_notifier_private_cache);
4283 lttng_tracepoint_exit();
4284 lttng_context_exit();
4285 printk(KERN_NOTICE "LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4286 __stringify(LTTNG_MODULES_MAJOR_VERSION),
4287 __stringify(LTTNG_MODULES_MINOR_VERSION),
4288 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION),
4289 LTTNG_MODULES_EXTRAVERSION,
4290 LTTNG_VERSION_NAME,
4291 #ifdef LTTNG_EXTRA_VERSION_GIT
4292 LTTNG_EXTRA_VERSION_GIT[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT,
4293 #else
4294 "",
4295 #endif
4296 #ifdef LTTNG_EXTRA_VERSION_NAME
4297 LTTNG_EXTRA_VERSION_NAME[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME);
4298 #else
4299 "");
4300 #endif
4301 }
4302
4303 module_exit(lttng_events_exit);
4304
4305 #include <generated/patches.h>
4306 #ifdef LTTNG_EXTRA_VERSION_GIT
4307 MODULE_INFO(extra_version_git, LTTNG_EXTRA_VERSION_GIT);
4308 #endif
4309 #ifdef LTTNG_EXTRA_VERSION_NAME
4310 MODULE_INFO(extra_version_name, LTTNG_EXTRA_VERSION_NAME);
4311 #endif
4312 MODULE_LICENSE("GPL and additional rights");
4313 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4314 MODULE_DESCRIPTION("LTTng tracer");
4315 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
4316 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
4317 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
4318 LTTNG_MODULES_EXTRAVERSION);
This page took 0.162156 seconds and 5 git commands to generate.