1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
5 * Holds LTTng per-session event registry.
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 * This page_alloc.h wrapper needs to be included before gfpflags.h because it
12 * overrides a function with a define.
14 #include "wrapper/page_alloc.h"
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched.h>
19 #include <linux/slab.h>
20 #include <linux/jiffies.h>
21 #include <linux/utsname.h>
22 #include <linux/err.h>
23 #include <linux/seq_file.h>
24 #include <linux/file.h>
25 #include <linux/anon_inodes.h>
26 #include <linux/uaccess.h>
27 #include <linux/vmalloc.h>
28 #include <linux/dmi.h>
30 #include <wrapper/compiler_attributes.h>
31 #include <wrapper/uuid.h>
32 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
33 #include <wrapper/random.h>
34 #include <wrapper/tracepoint.h>
35 #include <wrapper/list.h>
36 #include <wrapper/types.h>
37 #include <lttng/kernel-version.h>
38 #include <lttng/events.h>
39 #include <lttng/events-internal.h>
40 #include <lttng/lttng-bytecode.h>
41 #include <lttng/tracer.h>
42 #include <lttng/event-notifier-notification.h>
43 #include <lttng/abi-old.h>
44 #include <lttng/endian.h>
45 #include <lttng/string-utils.h>
46 #include <lttng/utils.h>
47 #include <counter/counter.h>
48 #include <ringbuffer/backend.h>
49 #include <ringbuffer/frontend.h>
51 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,16,0) \
52 || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0))
53 #include <linux/stdarg.h>
58 #define METADATA_CACHE_DEFAULT_SIZE 4096
60 static LIST_HEAD(sessions
);
61 static LIST_HEAD(event_notifier_groups
);
62 static LIST_HEAD(lttng_transport_list
);
63 static LIST_HEAD(lttng_counter_transport_list
);
65 * Protect the sessions and metadata caches.
67 static DEFINE_MUTEX(sessions_mutex
);
68 static struct kmem_cache
*event_recorder_cache
;
69 static struct kmem_cache
*event_recorder_private_cache
;
70 static struct kmem_cache
*event_counter_cache
;
71 static struct kmem_cache
*event_counter_private_cache
;
72 static struct kmem_cache
*event_notifier_cache
;
73 static struct kmem_cache
*event_notifier_private_cache
;
75 static void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session
*session
);
76 static void lttng_session_sync_event_enablers(struct lttng_kernel_session
*session
);
77 static void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group
*event_notifier_group
);
78 static void lttng_event_enabler_sync(struct lttng_event_enabler_common
*event_enabler
);
80 static void _lttng_event_destroy(struct lttng_kernel_event_common
*event
);
81 static void _lttng_channel_destroy(struct lttng_kernel_channel_common
*chan
);
82 static void _lttng_event_unregister(struct lttng_kernel_event_common
*event
);
84 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common
*event
);
86 int _lttng_session_metadata_statedump(struct lttng_kernel_session
*session
);
88 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream
*stream
);
90 int _lttng_type_statedump(struct lttng_kernel_session
*session
,
91 const struct lttng_kernel_type_common
*type
,
92 enum lttng_kernel_string_encoding parent_encoding
,
95 int _lttng_field_statedump(struct lttng_kernel_session
*session
,
96 const struct lttng_kernel_event_field
*field
,
97 size_t nesting
, const char **prev_field_name_p
);
99 void synchronize_trace(void)
101 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,1,0) || \
102 LTTNG_RHEL_KERNEL_RANGE(4,18,0,193,0,0, 4,19,0,0,0,0))
108 #ifdef CONFIG_PREEMPT_RT_FULL
113 void lttng_lock_sessions(void)
115 mutex_lock(&sessions_mutex
);
118 void lttng_unlock_sessions(void)
120 mutex_unlock(&sessions_mutex
);
123 static struct lttng_transport
*lttng_transport_find(const char *name
)
125 struct lttng_transport
*transport
;
127 list_for_each_entry(transport
, <tng_transport_list
, node
) {
128 if (!strcmp(transport
->name
, name
))
135 * Called with sessions lock held.
137 int lttng_session_active(void)
139 struct lttng_kernel_session_private
*iter
;
141 list_for_each_entry(iter
, &sessions
, node
) {
142 if (iter
->pub
->active
)
148 struct lttng_kernel_session
*lttng_session_create(void)
150 struct lttng_kernel_session
*session
;
151 struct lttng_kernel_session_private
*session_priv
;
152 struct lttng_metadata_cache
*metadata_cache
;
155 mutex_lock(&sessions_mutex
);
156 session
= lttng_kvzalloc(sizeof(*session
), GFP_KERNEL
);
159 session_priv
= lttng_kvzalloc(sizeof(*session_priv
), GFP_KERNEL
);
161 goto err_free_session
;
162 session
->priv
= session_priv
;
163 session_priv
->pub
= session
;
165 INIT_LIST_HEAD(&session_priv
->chan_head
);
166 INIT_LIST_HEAD(&session_priv
->events_head
);
167 lttng_guid_gen(&session_priv
->uuid
);
169 metadata_cache
= kzalloc(sizeof(struct lttng_metadata_cache
),
172 goto err_free_session_private
;
173 metadata_cache
->data
= vzalloc(METADATA_CACHE_DEFAULT_SIZE
);
174 if (!metadata_cache
->data
)
176 metadata_cache
->cache_alloc
= METADATA_CACHE_DEFAULT_SIZE
;
177 kref_init(&metadata_cache
->refcount
);
178 mutex_init(&metadata_cache
->lock
);
179 session_priv
->metadata_cache
= metadata_cache
;
180 INIT_LIST_HEAD(&metadata_cache
->metadata_stream
);
181 memcpy(&metadata_cache
->uuid
, &session_priv
->uuid
,
182 sizeof(metadata_cache
->uuid
));
183 INIT_LIST_HEAD(&session_priv
->enablers_head
);
184 for (i
= 0; i
< LTTNG_EVENT_HT_SIZE
; i
++)
185 INIT_HLIST_HEAD(&session_priv
->events_name_ht
.table
[i
]);
186 for (i
= 0; i
< LTTNG_EVENT_HT_SIZE
; i
++)
187 INIT_HLIST_HEAD(&session_priv
->events_key_ht
.table
[i
]);
188 list_add(&session_priv
->node
, &sessions
);
190 if (lttng_id_tracker_init(&session
->pid_tracker
, session
, TRACKER_PID
))
191 goto tracker_alloc_error
;
192 if (lttng_id_tracker_init(&session
->vpid_tracker
, session
, TRACKER_VPID
))
193 goto tracker_alloc_error
;
194 if (lttng_id_tracker_init(&session
->uid_tracker
, session
, TRACKER_UID
))
195 goto tracker_alloc_error
;
196 if (lttng_id_tracker_init(&session
->vuid_tracker
, session
, TRACKER_VUID
))
197 goto tracker_alloc_error
;
198 if (lttng_id_tracker_init(&session
->gid_tracker
, session
, TRACKER_GID
))
199 goto tracker_alloc_error
;
200 if (lttng_id_tracker_init(&session
->vgid_tracker
, session
, TRACKER_VGID
))
201 goto tracker_alloc_error
;
203 mutex_unlock(&sessions_mutex
);
208 lttng_id_tracker_fini(&session
->pid_tracker
);
209 lttng_id_tracker_fini(&session
->vpid_tracker
);
210 lttng_id_tracker_fini(&session
->uid_tracker
);
211 lttng_id_tracker_fini(&session
->vuid_tracker
);
212 lttng_id_tracker_fini(&session
->gid_tracker
);
213 lttng_id_tracker_fini(&session
->vgid_tracker
);
215 kfree(metadata_cache
);
216 err_free_session_private
:
217 lttng_kvfree(session_priv
);
219 lttng_kvfree(session
);
221 mutex_unlock(&sessions_mutex
);
226 struct lttng_counter_transport
*lttng_counter_transport_find(const char *name
)
228 struct lttng_counter_transport
*transport
;
230 list_for_each_entry(transport
, <tng_counter_transport_list
, node
) {
231 if (!strcmp(transport
->name
, name
))
237 struct lttng_kernel_channel_counter
*lttng_kernel_counter_create(
238 const char *counter_transport_name
,
239 size_t number_dimensions
,
240 const struct lttng_kernel_counter_dimension
*dimensions
,
241 int64_t global_sum_step
,
244 struct lttng_counter_transport
*counter_transport
= NULL
;
245 struct lttng_kernel_channel_counter
*counter
= NULL
;
247 counter_transport
= lttng_counter_transport_find(counter_transport_name
);
248 if (!counter_transport
) {
249 printk(KERN_WARNING
"LTTng: counter transport %s not found.\n",
250 counter_transport_name
);
253 if (!try_module_get(counter_transport
->owner
)) {
254 printk(KERN_WARNING
"LTTng: Can't lock counter transport module.\n");
258 counter
= counter_transport
->ops
.priv
->counter_create(number_dimensions
, dimensions
,
263 /* Create event notifier error counter. */
264 counter
->ops
= &counter_transport
->ops
;
265 counter
->priv
->parent
.coalesce_hits
= coalesce_hits
;
266 counter
->priv
->transport
= counter_transport
;
267 mutex_init(&counter
->priv
->map
.lock
);
272 if (counter_transport
)
273 module_put(counter_transport
->owner
);
279 void lttng_kernel_counter_destroy(struct lttng_kernel_channel_counter
*counter
)
281 struct lttng_counter_transport
*counter_transport
= counter
->priv
->transport
;
283 lttng_kvfree(counter
->priv
->map
.descriptors
);
284 counter
->ops
->priv
->counter_destroy(counter
);
285 module_put(counter_transport
->owner
);
288 struct lttng_event_notifier_group
*lttng_event_notifier_group_create(void)
290 struct lttng_transport
*transport
= NULL
;
291 struct lttng_event_notifier_group
*event_notifier_group
;
292 const char *transport_name
= "relay-event-notifier";
293 size_t subbuf_size
= 4096; //TODO
294 size_t num_subbuf
= 16; //TODO
295 unsigned int switch_timer_interval
= 0;
296 unsigned int read_timer_interval
= 0;
299 mutex_lock(&sessions_mutex
);
301 transport
= lttng_transport_find(transport_name
);
303 printk(KERN_WARNING
"LTTng: transport %s not found\n",
307 if (!try_module_get(transport
->owner
)) {
308 printk(KERN_WARNING
"LTTng: Can't lock transport %s module.\n",
313 event_notifier_group
= lttng_kvzalloc(sizeof(struct lttng_event_notifier_group
),
315 if (!event_notifier_group
)
319 * Initialize the ring buffer used to store event notifier
322 event_notifier_group
->ops
= &transport
->ops
;
323 event_notifier_group
->chan
= transport
->ops
.priv
->channel_create(
324 transport_name
, event_notifier_group
, NULL
,
325 subbuf_size
, num_subbuf
, switch_timer_interval
,
326 read_timer_interval
);
327 if (!event_notifier_group
->chan
)
330 event_notifier_group
->transport
= transport
;
332 INIT_LIST_HEAD(&event_notifier_group
->enablers_head
);
333 INIT_LIST_HEAD(&event_notifier_group
->event_notifiers_head
);
334 for (i
= 0; i
< LTTNG_EVENT_HT_SIZE
; i
++)
335 INIT_HLIST_HEAD(&event_notifier_group
->events_name_ht
.table
[i
]);
337 list_add(&event_notifier_group
->node
, &event_notifier_groups
);
339 mutex_unlock(&sessions_mutex
);
341 return event_notifier_group
;
344 lttng_kvfree(event_notifier_group
);
347 module_put(transport
->owner
);
349 mutex_unlock(&sessions_mutex
);
353 void metadata_cache_destroy(struct kref
*kref
)
355 struct lttng_metadata_cache
*cache
=
356 container_of(kref
, struct lttng_metadata_cache
, refcount
);
361 void lttng_session_destroy(struct lttng_kernel_session
*session
)
363 struct lttng_kernel_channel_common_private
*chan_priv
, *tmpchan_priv
;
364 struct lttng_kernel_event_recorder_private
*event_recorder_priv
, *tmpevent_recorder_priv
;
365 struct lttng_metadata_stream
*metadata_stream
;
366 struct lttng_event_enabler_common
*event_enabler
, *tmp_event_enabler
;
369 mutex_lock(&sessions_mutex
);
370 WRITE_ONCE(session
->active
, 0);
371 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
372 ret
= lttng_syscalls_unregister_syscall_table(&chan_priv
->syscall_table
);
375 list_for_each_entry(event_recorder_priv
, &session
->priv
->events_head
, parent
.parent
.node
)
376 _lttng_event_unregister(&event_recorder_priv
->pub
->parent
);
377 synchronize_trace(); /* Wait for in-flight events to complete */
378 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
379 ret
= lttng_syscalls_destroy_syscall_table(&chan_priv
->syscall_table
);
382 list_for_each_entry_safe(event_enabler
, tmp_event_enabler
, &session
->priv
->enablers_head
, node
)
383 lttng_event_enabler_destroy(event_enabler
);
384 list_for_each_entry_safe(event_recorder_priv
, tmpevent_recorder_priv
, &session
->priv
->events_head
, parent
.parent
.node
)
385 _lttng_event_destroy(&event_recorder_priv
->pub
->parent
);
386 list_for_each_entry_safe(chan_priv
, tmpchan_priv
, &session
->priv
->chan_head
, node
) {
387 _lttng_channel_destroy(chan_priv
->pub
);
389 mutex_lock(&session
->priv
->metadata_cache
->lock
);
390 list_for_each_entry(metadata_stream
, &session
->priv
->metadata_cache
->metadata_stream
, node
)
391 _lttng_metadata_channel_hangup(metadata_stream
);
392 mutex_unlock(&session
->priv
->metadata_cache
->lock
);
393 lttng_id_tracker_fini(&session
->pid_tracker
);
394 lttng_id_tracker_fini(&session
->vpid_tracker
);
395 lttng_id_tracker_fini(&session
->uid_tracker
);
396 lttng_id_tracker_fini(&session
->vuid_tracker
);
397 lttng_id_tracker_fini(&session
->gid_tracker
);
398 lttng_id_tracker_fini(&session
->vgid_tracker
);
399 kref_put(&session
->priv
->metadata_cache
->refcount
, metadata_cache_destroy
);
400 list_del(&session
->priv
->node
);
401 mutex_unlock(&sessions_mutex
);
402 lttng_kvfree(session
->priv
);
403 lttng_kvfree(session
);
406 void lttng_event_notifier_group_destroy(
407 struct lttng_event_notifier_group
*event_notifier_group
)
409 struct lttng_event_enabler_common
*event_enabler
, *tmp_event_enabler
;
410 struct lttng_kernel_event_notifier_private
*event_notifier_priv
, *tmpevent_notifier_priv
;
413 if (!event_notifier_group
)
416 mutex_lock(&sessions_mutex
);
418 ret
= lttng_syscalls_unregister_syscall_table(&event_notifier_group
->syscall_table
);
421 list_for_each_entry_safe(event_notifier_priv
, tmpevent_notifier_priv
,
422 &event_notifier_group
->event_notifiers_head
, parent
.node
)
423 _lttng_event_unregister(&event_notifier_priv
->pub
->parent
);
425 /* Wait for in-flight event notifier to complete */
428 irq_work_sync(&event_notifier_group
->wakeup_pending
);
430 ret
= lttng_syscalls_destroy_syscall_table(&event_notifier_group
->syscall_table
);
433 list_for_each_entry_safe(event_enabler
, tmp_event_enabler
,
434 &event_notifier_group
->enablers_head
, node
)
435 lttng_event_enabler_destroy(event_enabler
);
437 list_for_each_entry_safe(event_notifier_priv
, tmpevent_notifier_priv
,
438 &event_notifier_group
->event_notifiers_head
, parent
.node
)
439 _lttng_event_destroy(&event_notifier_priv
->pub
->parent
);
441 if (event_notifier_group
->error_counter
)
442 lttng_kernel_counter_destroy(event_notifier_group
->error_counter
);
444 event_notifier_group
->ops
->priv
->channel_destroy(event_notifier_group
->chan
);
445 module_put(event_notifier_group
->transport
->owner
);
446 list_del(&event_notifier_group
->node
);
448 mutex_unlock(&sessions_mutex
);
449 lttng_kvfree(event_notifier_group
);
452 int lttng_session_statedump(struct lttng_kernel_session
*session
)
456 mutex_lock(&sessions_mutex
);
457 ret
= lttng_statedump_start(session
);
458 mutex_unlock(&sessions_mutex
);
462 int lttng_session_enable(struct lttng_kernel_session
*session
)
465 struct lttng_kernel_channel_common_private
*chan_priv
;
467 mutex_lock(&sessions_mutex
);
468 if (session
->active
) {
473 /* Set transient enabler state to "enabled" */
474 session
->priv
->tstate
= 1;
476 /* We need to sync enablers with session before activation. */
477 lttng_session_sync_event_enablers(session
);
480 * Snapshot the number of events per channel to know the type of header
483 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
484 struct lttng_kernel_channel_buffer_private
*chan_buf_priv
;
486 if (chan_priv
->pub
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
488 chan_buf_priv
= container_of(chan_priv
, struct lttng_kernel_channel_buffer_private
, parent
);
489 if (chan_buf_priv
->header_type
)
490 continue; /* don't change it if session stop/restart */
491 if (chan_buf_priv
->free_event_id
< 31)
492 chan_buf_priv
->header_type
= 1; /* compact */
494 chan_buf_priv
->header_type
= 2; /* large */
497 /* Clear each stream's quiescent state. */
498 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
499 struct lttng_kernel_channel_buffer_private
*chan_buf_priv
;
501 if (chan_priv
->pub
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
503 chan_buf_priv
= container_of(chan_priv
, struct lttng_kernel_channel_buffer_private
, parent
);
504 if (chan_buf_priv
->channel_type
!= METADATA_CHANNEL
)
505 lib_ring_buffer_clear_quiescent_channel(chan_buf_priv
->rb_chan
);
508 WRITE_ONCE(session
->active
, 1);
509 WRITE_ONCE(session
->priv
->been_active
, 1);
510 ret
= _lttng_session_metadata_statedump(session
);
512 WRITE_ONCE(session
->active
, 0);
515 ret
= lttng_statedump_start(session
);
517 WRITE_ONCE(session
->active
, 0);
519 mutex_unlock(&sessions_mutex
);
523 int lttng_session_disable(struct lttng_kernel_session
*session
)
526 struct lttng_kernel_channel_common_private
*chan_priv
;
528 mutex_lock(&sessions_mutex
);
529 if (!session
->active
) {
533 WRITE_ONCE(session
->active
, 0);
535 /* Set transient enabler state to "disabled" */
536 session
->priv
->tstate
= 0;
537 lttng_session_sync_event_enablers(session
);
539 /* Set each stream's quiescent state. */
540 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
541 struct lttng_kernel_channel_buffer_private
*chan_buf_priv
;
543 if (chan_priv
->pub
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
545 chan_buf_priv
= container_of(chan_priv
, struct lttng_kernel_channel_buffer_private
, parent
);
546 if (chan_buf_priv
->channel_type
!= METADATA_CHANNEL
)
547 lib_ring_buffer_set_quiescent_channel(chan_buf_priv
->rb_chan
);
550 mutex_unlock(&sessions_mutex
);
554 int lttng_session_metadata_regenerate(struct lttng_kernel_session
*session
)
557 struct lttng_kernel_channel_common_private
*chan_priv
;
558 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
559 struct lttng_metadata_cache
*cache
= session
->priv
->metadata_cache
;
560 struct lttng_metadata_stream
*stream
;
562 mutex_lock(&sessions_mutex
);
563 if (!session
->active
) {
568 mutex_lock(&cache
->lock
);
569 memset(cache
->data
, 0, cache
->cache_alloc
);
570 cache
->metadata_written
= 0;
572 list_for_each_entry(stream
, &session
->priv
->metadata_cache
->metadata_stream
, node
) {
573 stream
->metadata_out
= 0;
574 stream
->metadata_in
= 0;
576 mutex_unlock(&cache
->lock
);
578 session
->priv
->metadata_dumped
= 0;
579 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
580 struct lttng_kernel_channel_buffer_private
*chan_buf_priv
;
582 if (chan_priv
->pub
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
584 chan_buf_priv
= container_of(chan_priv
, struct lttng_kernel_channel_buffer_private
, parent
);
585 chan_buf_priv
->metadata_dumped
= 0;
588 list_for_each_entry(event_recorder_priv
, &session
->priv
->events_head
, parent
.parent
.node
) {
589 event_recorder_priv
->metadata_dumped
= 0;
592 ret
= _lttng_session_metadata_statedump(session
);
595 mutex_unlock(&sessions_mutex
);
600 bool is_channel_buffer_metadata(struct lttng_kernel_channel_common
*channel
)
602 struct lttng_kernel_channel_buffer
*chan_buf
;
604 if (channel
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
606 chan_buf
= container_of(channel
, struct lttng_kernel_channel_buffer
, parent
);
607 if (chan_buf
->priv
->channel_type
== METADATA_CHANNEL
)
612 int lttng_channel_enable(struct lttng_kernel_channel_common
*channel
)
616 mutex_lock(&sessions_mutex
);
617 if (is_channel_buffer_metadata(channel
)) {
621 if (channel
->enabled
) {
625 /* Set transient enabler state to "enabled" */
626 channel
->priv
->tstate
= 1;
627 lttng_session_sync_event_enablers(channel
->session
);
628 /* Set atomically the state to "enabled" */
629 WRITE_ONCE(channel
->enabled
, 1);
631 mutex_unlock(&sessions_mutex
);
635 int lttng_channel_disable(struct lttng_kernel_channel_common
*channel
)
639 mutex_lock(&sessions_mutex
);
640 if (is_channel_buffer_metadata(channel
)) {
644 if (!channel
->enabled
) {
648 /* Set atomically the state to "disabled" */
649 WRITE_ONCE(channel
->enabled
, 0);
650 /* Set transient enabler state to "enabled" */
651 channel
->priv
->tstate
= 0;
652 lttng_session_sync_event_enablers(channel
->session
);
654 mutex_unlock(&sessions_mutex
);
658 int lttng_event_enable(struct lttng_kernel_event_common
*event
)
662 mutex_lock(&sessions_mutex
);
663 switch (event
->type
) {
664 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
666 struct lttng_kernel_event_recorder
*event_recorder
=
667 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
669 if (event_recorder
->chan
->priv
->channel_type
== METADATA_CHANNEL
) {
675 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
677 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
683 if (event
->enabled
) {
687 switch (event
->priv
->instrumentation
) {
688 case LTTNG_KERNEL_ABI_TRACEPOINT
:
690 case LTTNG_KERNEL_ABI_SYSCALL
:
692 case LTTNG_KERNEL_ABI_KPROBE
:
694 case LTTNG_KERNEL_ABI_KRETPROBE
:
698 case LTTNG_KERNEL_ABI_UPROBE
:
699 WRITE_ONCE(event
->enabled
, 1);
702 case LTTNG_KERNEL_ABI_FUNCTION
:
704 case LTTNG_KERNEL_ABI_NOOP
:
711 mutex_unlock(&sessions_mutex
);
715 int lttng_event_disable(struct lttng_kernel_event_common
*event
)
719 mutex_lock(&sessions_mutex
);
720 switch (event
->type
) {
721 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
723 struct lttng_kernel_event_recorder
*event_recorder
=
724 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
726 if (event_recorder
->chan
->priv
->channel_type
== METADATA_CHANNEL
) {
732 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
734 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
740 if (!event
->enabled
) {
744 switch (event
->priv
->instrumentation
) {
745 case LTTNG_KERNEL_ABI_TRACEPOINT
:
747 case LTTNG_KERNEL_ABI_SYSCALL
:
749 case LTTNG_KERNEL_ABI_KPROBE
:
751 case LTTNG_KERNEL_ABI_KRETPROBE
:
755 case LTTNG_KERNEL_ABI_UPROBE
:
756 WRITE_ONCE(event
->enabled
, 0);
759 case LTTNG_KERNEL_ABI_FUNCTION
:
761 case LTTNG_KERNEL_ABI_NOOP
:
768 mutex_unlock(&sessions_mutex
);
772 struct lttng_kernel_channel_buffer
*lttng_channel_buffer_create(struct lttng_kernel_session
*session
,
773 const char *transport_name
,
775 size_t subbuf_size
, size_t num_subbuf
,
776 unsigned int switch_timer_interval
,
777 unsigned int read_timer_interval
,
778 enum channel_type channel_type
)
780 struct lttng_kernel_channel_buffer
*chan
;
781 struct lttng_transport
*transport
= NULL
;
783 mutex_lock(&sessions_mutex
);
784 if (session
->priv
->been_active
&& channel_type
!= METADATA_CHANNEL
)
785 goto active
; /* Refuse to add channel to active session */
786 transport
= lttng_transport_find(transport_name
);
788 printk(KERN_WARNING
"LTTng: transport %s not found\n",
792 if (!try_module_get(transport
->owner
)) {
793 printk(KERN_WARNING
"LTTng: Can't lock transport module.\n");
796 chan
= lttng_kernel_alloc_channel_buffer();
799 chan
->parent
.session
= session
;
800 chan
->priv
->id
= session
->priv
->free_chan_id
++;
801 chan
->ops
= &transport
->ops
;
803 * Note: the channel creation op already writes into the packet
804 * headers. Therefore the "chan" information used as input
805 * should be already accessible.
807 chan
->priv
->rb_chan
= transport
->ops
.priv
->channel_create(transport_name
,
808 chan
, buf_addr
, subbuf_size
, num_subbuf
,
809 switch_timer_interval
, read_timer_interval
);
810 if (!chan
->priv
->rb_chan
)
812 chan
->priv
->parent
.tstate
= 1;
813 chan
->parent
.enabled
= 1;
814 chan
->priv
->transport
= transport
;
815 chan
->priv
->channel_type
= channel_type
;
816 list_add(&chan
->priv
->parent
.node
, &session
->priv
->chan_head
);
817 mutex_unlock(&sessions_mutex
);
821 lttng_kernel_free_channel_common(&chan
->parent
);
824 module_put(transport
->owner
);
827 mutex_unlock(&sessions_mutex
);
832 * Only used internally at session destruction for per-cpu channels, and
833 * when metadata channel is released.
834 * Needs to be called with sessions mutex held.
837 void lttng_kernel_buffer_destroy(struct lttng_kernel_channel_buffer
*chan
)
839 chan
->ops
->priv
->channel_destroy(chan
->priv
->rb_chan
);
840 module_put(chan
->priv
->transport
->owner
);
841 lttng_kernel_destroy_context(chan
->priv
->ctx
);
847 void _lttng_channel_destroy(struct lttng_kernel_channel_common
*chan
)
849 list_del(&chan
->priv
->node
);
851 switch (chan
->type
) {
852 case LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
:
854 struct lttng_kernel_channel_buffer
*chan_buf
=
855 container_of(chan
, struct lttng_kernel_channel_buffer
, parent
);
856 lttng_kernel_buffer_destroy(chan_buf
);
859 case LTTNG_KERNEL_CHANNEL_TYPE_COUNTER
:
861 struct lttng_kernel_channel_counter
*chan_counter
=
862 container_of(chan
, struct lttng_kernel_channel_counter
, parent
);
863 lttng_kernel_counter_destroy(chan_counter
);
871 void lttng_metadata_channel_buffer_destroy(struct lttng_kernel_channel_buffer
*chan
)
873 BUG_ON(chan
->priv
->channel_type
!= METADATA_CHANNEL
);
875 /* Protect the metadata cache with the sessions_mutex. */
876 mutex_lock(&sessions_mutex
);
877 _lttng_channel_destroy(&chan
->parent
);
878 mutex_unlock(&sessions_mutex
);
880 EXPORT_SYMBOL_GPL(lttng_metadata_channel_buffer_destroy
);
883 void _lttng_metadata_channel_hangup(struct lttng_metadata_stream
*stream
)
885 stream
->finalized
= 1;
886 wake_up_interruptible(&stream
->read_wait
);
890 bool lttng_kernel_event_id_available(struct lttng_event_enabler_common
*event_enabler
)
892 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
893 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
895 switch (event_enabler
->enabler_type
) {
896 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
898 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
899 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
.parent
);
900 struct lttng_kernel_channel_buffer
*chan
= event_recorder_enabler
->chan
;
903 case LTTNG_KERNEL_ABI_TRACEPOINT
:
905 case LTTNG_KERNEL_ABI_KPROBE
:
907 case LTTNG_KERNEL_ABI_SYSCALL
:
909 case LTTNG_KERNEL_ABI_UPROBE
:
910 if (chan
->priv
->free_event_id
== -1U)
913 case LTTNG_KERNEL_ABI_KRETPROBE
:
914 /* kretprobes require 2 event IDs. */
915 if (chan
->priv
->free_event_id
>= -2U)
923 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
925 struct lttng_event_counter_enabler
*event_counter_enabler
=
926 container_of(event_enabler
, struct lttng_event_counter_enabler
, parent
.parent
);
927 struct lttng_kernel_channel_counter
*chan
= event_counter_enabler
->chan
;
928 size_t nr_dimensions
, max_nr_elem
;
930 if (lttng_kernel_counter_get_nr_dimensions(chan
, &nr_dimensions
))
932 WARN_ON_ONCE(nr_dimensions
!= 1);
933 if (nr_dimensions
!= 1)
935 if (lttng_kernel_counter_get_max_nr_elem(chan
, &max_nr_elem
))
938 case LTTNG_KERNEL_ABI_TRACEPOINT
:
940 case LTTNG_KERNEL_ABI_KPROBE
:
942 case LTTNG_KERNEL_ABI_SYSCALL
:
944 case LTTNG_KERNEL_ABI_UPROBE
:
945 if (chan
->priv
->free_index
>= max_nr_elem
)
948 case LTTNG_KERNEL_ABI_KRETPROBE
:
949 /* kretprobes require 2 event IDs. */
950 if (chan
->priv
->free_index
+ 1 >= max_nr_elem
)
958 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
967 struct lttng_kernel_event_common
*lttng_kernel_event_alloc(struct lttng_event_enabler_common
*event_enabler
,
968 struct hlist_head
*key_head
,
969 const char *key_string
)
971 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
972 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
974 switch (event_enabler
->enabler_type
) {
975 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
977 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
978 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
.parent
);
979 struct lttng_kernel_event_recorder
*event_recorder
;
980 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
981 struct lttng_kernel_channel_buffer
*chan
= event_recorder_enabler
->chan
;
983 WARN_ON_ONCE(key_head
); /* not implemented. */
984 event_recorder
= kmem_cache_zalloc(event_recorder_cache
, GFP_KERNEL
);
987 event_recorder_priv
= kmem_cache_zalloc(event_recorder_private_cache
, GFP_KERNEL
);
988 if (!event_recorder_priv
) {
989 kmem_cache_free(event_recorder_private_cache
, event_recorder
);
992 event_recorder_priv
->pub
= event_recorder
;
993 event_recorder_priv
->parent
.parent
.pub
= &event_recorder
->parent
;
994 event_recorder
->priv
= event_recorder_priv
;
995 event_recorder
->parent
.priv
= &event_recorder_priv
->parent
.parent
;
997 event_recorder
->parent
.type
= LTTNG_KERNEL_EVENT_TYPE_RECORDER
;
998 event_recorder
->parent
.run_filter
= lttng_kernel_interpret_event_filter
;
999 event_recorder
->priv
->parent
.parent
.instrumentation
= itype
;
1000 INIT_LIST_HEAD(&event_recorder
->priv
->parent
.parent
.filter_bytecode_runtime_head
);
1001 INIT_LIST_HEAD(&event_recorder
->priv
->parent
.parent
.enablers_ref_head
);
1003 event_recorder
->chan
= chan
;
1004 event_recorder
->priv
->parent
.chan
= &chan
->parent
;
1005 event_recorder
->priv
->parent
.id
= chan
->priv
->free_event_id
++;
1006 return &event_recorder
->parent
;
1008 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
1010 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
1011 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
1012 struct lttng_kernel_event_notifier
*event_notifier
;
1013 struct lttng_kernel_event_notifier_private
*event_notifier_priv
;
1015 WARN_ON_ONCE(key_head
); /* not implemented. */
1016 event_notifier
= kmem_cache_zalloc(event_notifier_cache
, GFP_KERNEL
);
1017 if (!event_notifier
)
1019 event_notifier_priv
= kmem_cache_zalloc(event_notifier_private_cache
, GFP_KERNEL
);
1020 if (!event_notifier_priv
) {
1021 kmem_cache_free(event_notifier_private_cache
, event_notifier
);
1024 event_notifier_priv
->pub
= event_notifier
;
1025 event_notifier_priv
->parent
.pub
= &event_notifier
->parent
;
1026 event_notifier
->priv
= event_notifier_priv
;
1027 event_notifier
->parent
.priv
= &event_notifier_priv
->parent
;
1029 event_notifier
->parent
.type
= LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
;
1030 event_notifier
->parent
.run_filter
= lttng_kernel_interpret_event_filter
;
1031 event_notifier
->priv
->parent
.instrumentation
= itype
;
1032 event_notifier
->priv
->parent
.user_token
= event_enabler
->user_token
;
1033 INIT_LIST_HEAD(&event_notifier
->priv
->parent
.filter_bytecode_runtime_head
);
1034 INIT_LIST_HEAD(&event_notifier
->priv
->parent
.enablers_ref_head
);
1036 event_notifier
->priv
->group
= event_notifier_enabler
->group
;
1037 event_notifier
->priv
->error_counter_index
= event_notifier_enabler
->error_counter_index
;
1038 event_notifier
->priv
->num_captures
= 0;
1039 event_notifier
->notification_send
= lttng_event_notifier_notification_send
;
1040 INIT_LIST_HEAD(&event_notifier
->priv
->capture_bytecode_runtime_head
);
1041 return &event_notifier
->parent
;
1043 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
1045 struct lttng_event_counter_enabler
*event_counter_enabler
=
1046 container_of(event_enabler
, struct lttng_event_counter_enabler
, parent
.parent
);
1047 struct lttng_kernel_event_counter
*event_counter
;
1048 struct lttng_kernel_event_counter_private
*event_counter_priv
;
1049 struct lttng_kernel_channel_counter
*chan
= event_counter_enabler
->chan
;
1050 bool key_found
= false;
1052 event_counter
= kmem_cache_zalloc(event_counter_cache
, GFP_KERNEL
);
1055 event_counter_priv
= kmem_cache_zalloc(event_counter_private_cache
, GFP_KERNEL
);
1056 if (!event_counter_priv
) {
1057 kmem_cache_free(event_counter_private_cache
, event_counter
);
1060 event_counter_priv
->pub
= event_counter
;
1061 event_counter_priv
->parent
.parent
.pub
= &event_counter
->parent
;
1062 event_counter
->priv
= event_counter_priv
;
1063 event_counter
->parent
.priv
= &event_counter_priv
->parent
.parent
;
1065 event_counter
->parent
.type
= LTTNG_KERNEL_EVENT_TYPE_COUNTER
;
1066 event_counter
->parent
.run_filter
= lttng_kernel_interpret_event_filter
;
1067 event_counter
->priv
->parent
.parent
.instrumentation
= itype
;
1068 INIT_LIST_HEAD(&event_counter
->priv
->parent
.parent
.filter_bytecode_runtime_head
);
1069 INIT_LIST_HEAD(&event_counter
->priv
->parent
.parent
.enablers_ref_head
);
1071 event_counter
->chan
= chan
;
1072 event_counter
->priv
->parent
.chan
= &chan
->parent
;
1073 if (!chan
->priv
->parent
.coalesce_hits
)
1074 event_counter
->priv
->parent
.parent
.user_token
= event_counter_enabler
->parent
.parent
.user_token
;
1075 strcpy(event_counter_priv
->key
, key_string
);
1076 event_counter_priv
->action
= event_counter_enabler
->action
;
1078 struct lttng_kernel_event_counter_private
*event_counter_priv_iter
;
1080 lttng_hlist_for_each_entry(event_counter_priv_iter
, key_head
, hlist_key_node
) {
1081 if (!strcmp(key_string
, event_counter_priv_iter
->key
)) {
1082 /* Same key, use same id. */
1084 event_counter
->priv
->parent
.id
= event_counter_priv_iter
->parent
.id
;
1090 event_counter
->priv
->parent
.id
= chan
->priv
->free_index
++;
1091 return &event_counter
->parent
;
1099 void lttng_kernel_event_free(struct lttng_kernel_event_common
*event
)
1101 switch (event
->type
) {
1102 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1104 struct lttng_kernel_event_recorder
*event_recorder
=
1105 container_of(event
, struct lttng_kernel_event_recorder
, parent
);
1107 kmem_cache_free(event_recorder_private_cache
, event_recorder
->priv
);
1108 kmem_cache_free(event_recorder_cache
, event_recorder
);
1111 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
1113 struct lttng_kernel_event_counter
*event_counter
=
1114 container_of(event
, struct lttng_kernel_event_counter
, parent
);
1116 kmem_cache_free(event_counter_private_cache
, event_counter
->priv
);
1117 kmem_cache_free(event_counter_cache
, event_counter
);
1120 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1122 struct lttng_kernel_event_notifier
*event_notifier
=
1123 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1125 kmem_cache_free(event_notifier_private_cache
, event_notifier
->priv
);
1126 kmem_cache_free(event_notifier_cache
, event_notifier
);
1135 int lttng_kernel_event_notifier_clear_error_counter(struct lttng_kernel_event_common
*event
)
1137 switch (event
->type
) {
1138 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1140 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
1143 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1145 struct lttng_kernel_event_notifier
*event_notifier
=
1146 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
1147 struct lttng_kernel_channel_counter
*error_counter
;
1148 struct lttng_event_notifier_group
*event_notifier_group
= event_notifier
->priv
->group
;
1149 size_t dimension_index
[1];
1153 * Clear the error counter bucket. The sessiond keeps track of which
1154 * bucket is currently in use. We trust it. The session lock
1155 * synchronizes against concurrent creation of the error
1158 error_counter
= event_notifier_group
->error_counter
;
1162 * Check that the index is within the boundary of the counter.
1164 if (event_notifier
->priv
->error_counter_index
>= event_notifier_group
->error_counter_len
) {
1165 printk(KERN_INFO
"LTTng: event_notifier: Error counter index out-of-bound: counter-len=%zu, index=%llu\n",
1166 event_notifier_group
->error_counter_len
, event_notifier
->priv
->error_counter_index
);
1170 dimension_index
[0] = event_notifier
->priv
->error_counter_index
;
1171 ret
= error_counter
->ops
->priv
->counter_clear(error_counter
, dimension_index
);
1173 printk(KERN_INFO
"LTTng: event_notifier: Unable to clear error counter bucket %llu\n",
1174 event_notifier
->priv
->error_counter_index
);
1184 int format_event_key(struct lttng_event_enabler_common
*event_enabler
, char *key_string
,
1185 const char *event_name
)
1187 struct lttng_event_counter_enabler
*event_counter_enabler
;
1188 const struct lttng_kernel_counter_key_dimension
*dim
;
1189 size_t i
, left
= LTTNG_KERNEL_COUNTER_KEY_LEN
;
1190 const struct lttng_kernel_counter_key
*key
;
1192 if (event_enabler
->enabler_type
!= LTTNG_EVENT_ENABLER_TYPE_COUNTER
)
1194 event_counter_enabler
= container_of(event_enabler
, struct lttng_event_counter_enabler
, parent
.parent
);
1195 key
= event_counter_enabler
->key
;
1196 if (!key
->nr_dimensions
)
1198 /* Currently event keys can only be specified on a single dimension. */
1199 if (key
->nr_dimensions
!= 1)
1201 dim
= &key
->dimension_array
[0];
1202 for (i
= 0; i
< dim
->nr_key_tokens
; i
++) {
1203 const struct lttng_key_token
*token
= &dim
->token_array
[i
];
1207 switch (token
->type
) {
1208 case LTTNG_KEY_TOKEN_STRING
:
1211 case LTTNG_KEY_TOKEN_EVENT_NAME
:
1217 token_len
= strlen(str
);
1218 if (token_len
>= left
)
1220 strcat(key_string
, str
);
1227 bool match_event_key(struct lttng_kernel_event_common
*event
, const char *key_string
)
1229 switch (event
->type
) {
1230 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1232 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1235 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
1237 struct lttng_kernel_event_counter_private
*event_counter_priv
=
1238 container_of(event
->priv
, struct lttng_kernel_event_counter_private
, parent
.parent
);
1240 if (key_string
[0] == '\0')
1242 return !strcmp(key_string
, event_counter_priv
->key
);
1252 bool match_event_session_token(struct lttng_kernel_event_session_common_private
*event_session_priv
,
1255 if (event_session_priv
->chan
->priv
->coalesce_hits
)
1257 if (event_session_priv
->parent
.user_token
== token
)
1262 bool lttng_event_enabler_event_name_key_match_event(struct lttng_event_enabler_common
*event_enabler
,
1263 const char *event_name
, const char *key_string
, struct lttng_kernel_event_common
*event
)
1265 switch (event_enabler
->enabler_type
) {
1266 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
1268 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
1270 struct lttng_event_enabler_session_common
*event_session_enabler
=
1271 container_of(event_enabler
, struct lttng_event_enabler_session_common
, parent
);
1272 struct lttng_kernel_event_session_common_private
*event_session_priv
=
1273 container_of(event
->priv
, struct lttng_kernel_event_session_common_private
, parent
);
1274 bool same_event
= false, same_channel
= false, same_key
= false,
1277 WARN_ON_ONCE(!event
->priv
->desc
);
1278 if (!strncmp(event
->priv
->desc
->event_name
, event_name
, LTTNG_KERNEL_ABI_SYM_NAME_LEN
- 1))
1280 if (event_session_enabler
->chan
== event_session_priv
->chan
) {
1281 same_channel
= true;
1282 if (match_event_session_token(event_session_priv
, event_enabler
->user_token
))
1285 if (match_event_key(event
, key_string
))
1287 return same_event
&& same_channel
&& same_key
&& same_token
;
1290 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
1293 * Check if event_notifier already exists by checking
1294 * if the event_notifier and enabler share the same
1295 * description and id.
1297 if (!strncmp(event
->priv
->desc
->event_name
, event_name
, LTTNG_KERNEL_ABI_SYM_NAME_LEN
- 1)
1298 && event
->priv
->user_token
== event_enabler
->user_token
)
1310 int lttng_counter_append_descriptor(struct lttng_kernel_channel_counter
*counter
,
1311 uint64_t user_token
,
1315 struct lttng_counter_map
*map
= &counter
->priv
->map
;
1316 struct lttng_counter_map_descriptor
*last
;
1319 if (strlen(key
) >= LTTNG_KERNEL_COUNTER_KEY_LEN
) {
1323 mutex_lock(&map
->lock
);
1324 if (map
->nr_descriptors
== map
->alloc_len
) {
1325 struct lttng_counter_map_descriptor
*new_table
, *old_table
;
1326 size_t old_len
= map
->nr_descriptors
;
1327 size_t new_len
= max_t(size_t, old_len
+ 1, map
->alloc_len
* 2);
1329 old_table
= map
->descriptors
;
1330 new_table
= lttng_kvzalloc(sizeof(struct lttng_counter_map_descriptor
) * new_len
,
1338 memcpy(new_table
, old_table
, old_len
* sizeof(struct lttng_counter_map_descriptor
));
1340 map
->descriptors
= new_table
;
1341 map
->alloc_len
= new_len
;
1342 lttng_kvfree(old_table
);
1344 last
= &map
->descriptors
[map
->nr_descriptors
++];
1345 last
->user_token
= user_token
;
1346 last
->array_index
= index
;
1347 strcpy(last
->key
, key
);
1349 mutex_unlock(&map
->lock
);
1354 int lttng_append_event_to_channel_map(struct lttng_event_enabler_common
*event_enabler
,
1355 struct lttng_kernel_event_common
*event
,
1356 const char *event_name
)
1358 struct lttng_event_counter_enabler
*event_counter_enabler
;
1359 struct lttng_kernel_channel_counter
*chan_counter
;
1360 struct lttng_kernel_event_counter
*event_counter
;
1361 const char *name
= "<UNKNOWN>";
1363 if (event_enabler
->enabler_type
!= LTTNG_EVENT_ENABLER_TYPE_COUNTER
)
1365 event_counter_enabler
= container_of(event_enabler
, struct lttng_event_counter_enabler
, parent
.parent
);
1366 event_counter
= container_of(event
, struct lttng_kernel_event_counter
, parent
);
1367 chan_counter
= event_counter_enabler
->chan
;
1368 if (event_counter
->priv
->key
[0])
1369 name
= event_counter
->priv
->key
;
1372 return lttng_counter_append_descriptor(chan_counter
, event_enabler
->user_token
,
1373 event_counter
->priv
->parent
.id
, name
);
1377 * Supports event creation while tracing session is active.
1378 * Needs to be called with sessions mutex held.
1380 struct lttng_kernel_event_common
*_lttng_kernel_event_create(struct lttng_event_enabler_common
*event_enabler
,
1381 const struct lttng_kernel_event_desc
*event_desc
,
1382 struct lttng_kernel_event_pair
*event_pair
)
1384 char key_string
[LTTNG_KERNEL_COUNTER_KEY_LEN
] = { 0 };
1385 struct lttng_event_ht
*events_name_ht
= lttng_get_events_name_ht_from_enabler(event_enabler
);
1386 struct lttng_event_ht
*events_key_ht
= lttng_get_events_key_ht_from_enabler(event_enabler
);
1387 struct list_head
*event_list_head
= lttng_get_event_list_head_from_enabler(event_enabler
);
1388 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
1389 enum lttng_kernel_abi_instrumentation itype
= event_param
->instrumentation
;
1390 struct lttng_kernel_event_common_private
*event_priv_iter
;
1391 struct lttng_kernel_event_common
*event
;
1392 struct hlist_head
*name_head
, *key_head
= NULL
;
1393 const char *event_name
;
1396 if (event_pair
== NULL
|| event_pair
->check_ids
) {
1397 if (!lttng_kernel_event_id_available(event_enabler
)) {
1404 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1405 event_name
= event_desc
->event_name
;
1408 case LTTNG_KERNEL_ABI_KPROBE
:
1410 case LTTNG_KERNEL_ABI_UPROBE
:
1412 case LTTNG_KERNEL_ABI_SYSCALL
:
1413 event_name
= event_param
->name
;
1416 case LTTNG_KERNEL_ABI_KRETPROBE
:
1417 event_name
= event_pair
->name
;
1420 case LTTNG_KERNEL_ABI_FUNCTION
:
1422 case LTTNG_KERNEL_ABI_NOOP
:
1430 if (format_event_key(event_enabler
, key_string
, event_name
)) {
1435 name_head
= utils_borrow_hash_table_bucket(events_name_ht
->table
, LTTNG_EVENT_HT_SIZE
, event_name
);
1436 lttng_hlist_for_each_entry(event_priv_iter
, name_head
, hlist_name_node
) {
1437 if (lttng_event_enabler_event_name_key_match_event(event_enabler
,
1438 event_name
, key_string
, event_priv_iter
->pub
)) {
1444 if (key_string
[0] != '\0')
1445 key_head
= utils_borrow_hash_table_bucket(events_key_ht
->table
, LTTNG_EVENT_HT_SIZE
, key_string
);
1447 event
= lttng_kernel_event_alloc(event_enabler
, key_head
, key_string
);
1454 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1455 /* Event will be enabled by enabler sync. */
1457 event
->priv
->registered
= 0;
1458 event
->priv
->desc
= lttng_event_desc_get(event_name
);
1459 if (!event
->priv
->desc
) {
1461 goto register_error
;
1463 /* Populate lttng_event structure before event registration. */
1467 case LTTNG_KERNEL_ABI_KPROBE
:
1468 /* Event will be enabled by enabler sync. */
1470 event
->priv
->registered
= 0;
1471 ret
= lttng_kprobes_init_event(event_name
,
1472 event_param
->u
.kprobe
.symbol_name
,
1473 event_param
->u
.kprobe
.offset
,
1474 event_param
->u
.kprobe
.addr
,
1478 goto register_error
;
1481 * Populate lttng_event structure before event
1485 ret
= try_module_get(event
->priv
->desc
->owner
);
1489 case LTTNG_KERNEL_ABI_KRETPROBE
:
1491 /* Event will be enabled by enabler sync. */
1493 event
->priv
->registered
= 0;
1494 ret
= lttng_kretprobes_init_event(event_name
,
1495 event_pair
->entryexit
,
1496 event
, event_pair
->krp
);
1499 goto register_error
;
1501 ret
= try_module_get(event
->priv
->desc
->owner
);
1506 case LTTNG_KERNEL_ABI_SYSCALL
:
1507 /* Event will be enabled by enabler sync. */
1509 event
->priv
->registered
= 0;
1510 event
->priv
->desc
= event_desc
;
1511 switch (event_param
->u
.syscall
.entryexit
) {
1512 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT
:
1514 goto register_error
;
1515 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY
:
1516 event
->priv
->u
.syscall
.entryexit
= LTTNG_SYSCALL_ENTRY
;
1518 case LTTNG_KERNEL_ABI_SYSCALL_EXIT
:
1519 event
->priv
->u
.syscall
.entryexit
= LTTNG_SYSCALL_EXIT
;
1522 switch (event_param
->u
.syscall
.abi
) {
1523 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL
:
1525 goto register_error
;
1526 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE
:
1527 event
->priv
->u
.syscall
.abi
= LTTNG_SYSCALL_ABI_NATIVE
;
1529 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT
:
1530 event
->priv
->u
.syscall
.abi
= LTTNG_SYSCALL_ABI_COMPAT
;
1533 if (!event
->priv
->desc
) {
1535 goto register_error
;
1539 case LTTNG_KERNEL_ABI_UPROBE
:
1541 * Needs to be explicitly enabled after creation, since
1542 * we may want to apply filters.
1545 event
->priv
->registered
= 1;
1548 * Populate lttng_event structure before event
1553 ret
= lttng_uprobes_register_event(event_param
->name
,
1554 event_param
->u
.uprobe
.fd
,
1557 goto register_error
;
1558 ret
= try_module_get(event
->priv
->desc
->owner
);
1560 ret
= lttng_append_event_to_channel_map(event_enabler
, event
, event_name
);
1567 goto register_error
;
1570 ret
= _lttng_event_recorder_metadata_statedump(event
);
1571 WARN_ON_ONCE(ret
> 0);
1573 goto statedump_error
;
1576 ret
= lttng_kernel_event_notifier_clear_error_counter(event
);
1578 goto register_error
;
1581 hlist_add_head(&event
->priv
->hlist_name_node
, name_head
);
1583 struct lttng_kernel_event_counter_private
*event_counter_priv
=
1584 container_of(event
->priv
, struct lttng_kernel_event_counter_private
, parent
.parent
);
1585 hlist_add_head(&event_counter_priv
->hlist_key_node
, key_head
);
1587 list_add(&event
->priv
->node
, event_list_head
);
1592 /* If a statedump error occurs, events will not be readable. */
1594 lttng_kernel_event_free(event
);
1599 return ERR_PTR(ret
);
1602 struct lttng_kernel_event_common
*lttng_kernel_event_create(struct lttng_event_enabler_common
*event_enabler
,
1603 const struct lttng_kernel_event_desc
*event_desc
,
1604 struct lttng_kernel_event_pair
*event_pair
)
1606 struct lttng_kernel_event_common
*event
;
1608 mutex_lock(&sessions_mutex
);
1609 event
= _lttng_kernel_event_create(event_enabler
, event_desc
, event_pair
);
1610 mutex_unlock(&sessions_mutex
);
1614 int lttng_kernel_counter_read(struct lttng_kernel_channel_counter
*counter
,
1615 const size_t *dim_indexes
, int32_t cpu
,
1616 int64_t *val
, bool *overflow
, bool *underflow
)
1618 return counter
->ops
->priv
->counter_read(counter
, dim_indexes
,
1619 cpu
, val
, overflow
, underflow
);
1622 int lttng_kernel_counter_aggregate(struct lttng_kernel_channel_counter
*counter
,
1623 const size_t *dim_indexes
, int64_t *val
,
1624 bool *overflow
, bool *underflow
)
1626 return counter
->ops
->priv
->counter_aggregate(counter
, dim_indexes
,
1627 val
, overflow
, underflow
);
1630 int lttng_kernel_counter_clear(struct lttng_kernel_channel_counter
*counter
,
1631 const size_t *dim_indexes
)
1633 return counter
->ops
->priv
->counter_clear(counter
, dim_indexes
);
1636 int lttng_kernel_counter_get_nr_dimensions(struct lttng_kernel_channel_counter
*counter
,
1637 size_t *nr_dimensions
)
1639 return counter
->ops
->priv
->counter_get_nr_dimensions(counter
, nr_dimensions
);
1642 int lttng_kernel_counter_get_max_nr_elem(struct lttng_kernel_channel_counter
*counter
,
1643 size_t *max_nr_elem
)
1645 return counter
->ops
->priv
->counter_get_max_nr_elem(counter
, max_nr_elem
);
1648 /* Used for tracepoints, system calls, and kprobe. */
1650 void register_event(struct lttng_kernel_event_common
*event
)
1652 const struct lttng_kernel_event_desc
*desc
;
1655 WARN_ON_ONCE(event
->priv
->registered
);
1657 desc
= event
->priv
->desc
;
1658 switch (event
->priv
->instrumentation
) {
1659 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1660 ret
= lttng_tracepoint_probe_register(desc
->event_kname
,
1661 desc
->tp_class
->probe_callback
,
1665 case LTTNG_KERNEL_ABI_SYSCALL
:
1666 ret
= lttng_syscall_filter_enable_event(event
);
1669 case LTTNG_KERNEL_ABI_KPROBE
:
1670 ret
= lttng_kprobes_register_event(event
);
1673 case LTTNG_KERNEL_ABI_UPROBE
:
1677 case LTTNG_KERNEL_ABI_KRETPROBE
:
1678 ret
= lttng_kretprobes_register_event(event
);
1681 case LTTNG_KERNEL_ABI_FUNCTION
:
1683 case LTTNG_KERNEL_ABI_NOOP
:
1690 event
->priv
->registered
= 1;
1694 void unregister_event(struct lttng_kernel_event_common
*event
)
1696 struct lttng_kernel_event_common_private
*event_priv
= event
->priv
;
1697 const struct lttng_kernel_event_desc
*desc
;
1700 WARN_ON_ONCE(!event
->priv
->registered
);
1702 desc
= event_priv
->desc
;
1703 switch (event_priv
->instrumentation
) {
1704 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1705 ret
= lttng_tracepoint_probe_unregister(event_priv
->desc
->event_kname
,
1706 event_priv
->desc
->tp_class
->probe_callback
,
1710 case LTTNG_KERNEL_ABI_KPROBE
:
1711 lttng_kprobes_unregister_event(event
);
1715 case LTTNG_KERNEL_ABI_KRETPROBE
:
1716 lttng_kretprobes_unregister_event(event
);
1720 case LTTNG_KERNEL_ABI_SYSCALL
:
1721 ret
= lttng_syscall_filter_disable_event(event
);
1724 case LTTNG_KERNEL_ABI_NOOP
:
1725 switch (event
->type
) {
1726 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1728 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
1731 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1737 case LTTNG_KERNEL_ABI_UPROBE
:
1738 lttng_uprobes_unregister_event(event
);
1742 case LTTNG_KERNEL_ABI_FUNCTION
:
1749 event_priv
->registered
= 0;
1753 void _lttng_event_unregister(struct lttng_kernel_event_common
*event
)
1755 if (event
->priv
->registered
)
1756 unregister_event(event
);
1760 * Only used internally at session destruction.
1763 void _lttng_event_destroy(struct lttng_kernel_event_common
*event
)
1765 struct lttng_enabler_ref
*enabler_ref
, *tmp_enabler_ref
;
1767 lttng_free_event_filter_runtime(event
);
1768 /* Free event enabler refs */
1769 list_for_each_entry_safe(enabler_ref
, tmp_enabler_ref
,
1770 &event
->priv
->enablers_ref_head
, node
)
1773 /* Remove from event list. */
1774 list_del(&event
->priv
->node
);
1775 /* Remove from event hash table. */
1776 hlist_del(&event
->priv
->hlist_name_node
);
1778 switch (event
->type
) {
1779 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
1781 struct lttng_kernel_event_counter_private
*event_counter_priv
=
1782 container_of(event
->priv
, struct lttng_kernel_event_counter_private
, parent
.parent
);
1783 if (event_counter_priv
->key
[0] != '\0')
1784 hlist_del(&event_counter_priv
->hlist_key_node
);
1787 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
1789 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
1795 switch (event
->priv
->instrumentation
) {
1796 case LTTNG_KERNEL_ABI_TRACEPOINT
:
1797 lttng_event_desc_put(event
->priv
->desc
);
1800 case LTTNG_KERNEL_ABI_KPROBE
:
1801 module_put(event
->priv
->desc
->owner
);
1802 lttng_kprobes_destroy_event_private(event
);
1805 case LTTNG_KERNEL_ABI_KRETPROBE
:
1806 module_put(event
->priv
->desc
->owner
);
1807 lttng_kretprobes_destroy_event_private(event
);
1810 case LTTNG_KERNEL_ABI_SYSCALL
:
1813 case LTTNG_KERNEL_ABI_UPROBE
:
1814 module_put(event
->priv
->desc
->owner
);
1815 lttng_uprobes_destroy_event_private(event
);
1818 case LTTNG_KERNEL_ABI_FUNCTION
:
1820 case LTTNG_KERNEL_ABI_NOOP
:
1826 lttng_kernel_event_free(event
);
1830 struct lttng_kernel_id_tracker
*get_tracker(struct lttng_kernel_session
*session
,
1831 enum tracker_type tracker_type
)
1833 switch (tracker_type
) {
1835 return &session
->pid_tracker
;
1837 return &session
->vpid_tracker
;
1839 return &session
->uid_tracker
;
1841 return &session
->vuid_tracker
;
1843 return &session
->gid_tracker
;
1845 return &session
->vgid_tracker
;
1852 int lttng_session_track_id(struct lttng_kernel_session
*session
,
1853 enum tracker_type tracker_type
, int id
)
1855 struct lttng_kernel_id_tracker
*tracker
;
1858 tracker
= get_tracker(session
, tracker_type
);
1863 mutex_lock(&sessions_mutex
);
1865 /* track all ids: destroy tracker. */
1866 lttng_id_tracker_destroy(tracker
, true);
1869 ret
= lttng_id_tracker_add(tracker
, id
);
1871 mutex_unlock(&sessions_mutex
);
1875 int lttng_session_untrack_id(struct lttng_kernel_session
*session
,
1876 enum tracker_type tracker_type
, int id
)
1878 struct lttng_kernel_id_tracker
*tracker
;
1881 tracker
= get_tracker(session
, tracker_type
);
1886 mutex_lock(&sessions_mutex
);
1888 /* untrack all ids: replace by empty tracker. */
1889 ret
= lttng_id_tracker_empty_set(tracker
);
1891 ret
= lttng_id_tracker_del(tracker
, id
);
1893 mutex_unlock(&sessions_mutex
);
1898 void *id_list_start(struct seq_file
*m
, loff_t
*pos
)
1900 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1901 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1902 struct lttng_id_hash_node
*e
;
1905 mutex_lock(&sessions_mutex
);
1907 for (i
= 0; i
< LTTNG_ID_TABLE_SIZE
; i
++) {
1908 struct hlist_head
*head
= &id_tracker_p
->id_hash
[i
];
1910 lttng_hlist_for_each_entry(e
, head
, hlist
) {
1916 /* ID tracker disabled. */
1917 if (iter
>= *pos
&& iter
== 0) {
1918 return id_tracker_p
; /* empty tracker */
1926 /* Called with sessions_mutex held. */
1928 void *id_list_next(struct seq_file
*m
, void *p
, loff_t
*ppos
)
1930 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1931 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1932 struct lttng_id_hash_node
*e
;
1937 for (i
= 0; i
< LTTNG_ID_TABLE_SIZE
; i
++) {
1938 struct hlist_head
*head
= &id_tracker_p
->id_hash
[i
];
1940 lttng_hlist_for_each_entry(e
, head
, hlist
) {
1941 if (iter
++ >= *ppos
)
1946 /* ID tracker disabled. */
1947 if (iter
>= *ppos
&& iter
== 0)
1948 return p
; /* empty tracker */
1957 void id_list_stop(struct seq_file
*m
, void *p
)
1959 mutex_unlock(&sessions_mutex
);
1963 int id_list_show(struct seq_file
*m
, void *p
)
1965 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
1966 struct lttng_kernel_id_tracker_rcu
*id_tracker_p
= id_tracker
->p
;
1969 if (p
== id_tracker_p
) {
1970 /* Tracker disabled. */
1973 const struct lttng_id_hash_node
*e
= p
;
1975 id
= lttng_id_tracker_get_node_id(e
);
1977 switch (id_tracker
->priv
->tracker_type
) {
1979 seq_printf(m
, "process { pid = %d; };\n", id
);
1982 seq_printf(m
, "process { vpid = %d; };\n", id
);
1985 seq_printf(m
, "user { uid = %d; };\n", id
);
1988 seq_printf(m
, "user { vuid = %d; };\n", id
);
1991 seq_printf(m
, "group { gid = %d; };\n", id
);
1994 seq_printf(m
, "group { vgid = %d; };\n", id
);
1997 seq_printf(m
, "UNKNOWN { field = %d };\n", id
);
2003 const struct seq_operations lttng_tracker_ids_list_seq_ops
= {
2004 .start
= id_list_start
,
2005 .next
= id_list_next
,
2006 .stop
= id_list_stop
,
2007 .show
= id_list_show
,
2011 int lttng_tracker_ids_list_open(struct inode
*inode
, struct file
*file
)
2013 return seq_open(file
, <tng_tracker_ids_list_seq_ops
);
2017 int lttng_tracker_ids_list_release(struct inode
*inode
, struct file
*file
)
2019 struct seq_file
*m
= file
->private_data
;
2020 struct lttng_kernel_id_tracker
*id_tracker
= m
->private;
2023 WARN_ON_ONCE(!id_tracker
);
2024 ret
= seq_release(inode
, file
);
2026 fput(id_tracker
->priv
->session
->priv
->file
);
2030 const struct file_operations lttng_tracker_ids_list_fops
= {
2031 .owner
= THIS_MODULE
,
2032 .open
= lttng_tracker_ids_list_open
,
2034 .llseek
= seq_lseek
,
2035 .release
= lttng_tracker_ids_list_release
,
2038 int lttng_session_list_tracker_ids(struct lttng_kernel_session
*session
,
2039 enum tracker_type tracker_type
)
2041 struct file
*tracker_ids_list_file
;
2045 file_fd
= get_unused_fd_flags(0);
2051 tracker_ids_list_file
= anon_inode_getfile("[lttng_tracker_ids_list]",
2052 <tng_tracker_ids_list_fops
,
2054 if (IS_ERR(tracker_ids_list_file
)) {
2055 ret
= PTR_ERR(tracker_ids_list_file
);
2058 if (!atomic_long_add_unless(&session
->priv
->file
->f_count
, 1, LONG_MAX
)) {
2060 goto refcount_error
;
2062 ret
= lttng_tracker_ids_list_fops
.open(NULL
, tracker_ids_list_file
);
2065 m
= tracker_ids_list_file
->private_data
;
2067 m
->private = get_tracker(session
, tracker_type
);
2068 BUG_ON(!m
->private);
2069 fd_install(file_fd
, tracker_ids_list_file
);
2074 atomic_long_dec(&session
->priv
->file
->f_count
);
2076 fput(tracker_ids_list_file
);
2078 put_unused_fd(file_fd
);
2084 * Enabler management.
2087 int lttng_match_enabler_star_glob(const char *desc_name
,
2088 const char *pattern
)
2090 if (!strutils_star_glob_match(pattern
, LTTNG_SIZE_MAX
,
2091 desc_name
, LTTNG_SIZE_MAX
))
2097 int lttng_match_enabler_name(const char *desc_name
,
2100 if (strcmp(desc_name
, name
))
2106 int lttng_desc_match_enabler_check(const struct lttng_kernel_event_desc
*desc
,
2107 struct lttng_event_enabler_common
*enabler
)
2109 const char *desc_name
, *enabler_name
;
2110 bool compat
= false, entry
= false;
2112 enabler_name
= enabler
->event_param
.name
;
2113 switch (enabler
->event_param
.instrumentation
) {
2114 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2115 desc_name
= desc
->event_name
;
2116 switch (enabler
->format_type
) {
2117 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
2118 return lttng_match_enabler_star_glob(desc_name
, enabler_name
);
2119 case LTTNG_ENABLER_FORMAT_NAME
:
2120 return lttng_match_enabler_name(desc_name
, enabler_name
);
2126 case LTTNG_KERNEL_ABI_SYSCALL
:
2127 desc_name
= desc
->event_name
;
2128 if (!strncmp(desc_name
, "compat_", strlen("compat_"))) {
2129 desc_name
+= strlen("compat_");
2132 if (!strncmp(desc_name
, "syscall_exit_",
2133 strlen("syscall_exit_"))) {
2134 desc_name
+= strlen("syscall_exit_");
2135 } else if (!strncmp(desc_name
, "syscall_entry_",
2136 strlen("syscall_entry_"))) {
2137 desc_name
+= strlen("syscall_entry_");
2143 switch (enabler
->event_param
.u
.syscall
.entryexit
) {
2144 case LTTNG_KERNEL_ABI_SYSCALL_ENTRYEXIT
:
2146 case LTTNG_KERNEL_ABI_SYSCALL_ENTRY
:
2150 case LTTNG_KERNEL_ABI_SYSCALL_EXIT
:
2157 switch (enabler
->event_param
.u
.syscall
.abi
) {
2158 case LTTNG_KERNEL_ABI_SYSCALL_ABI_ALL
:
2160 case LTTNG_KERNEL_ABI_SYSCALL_ABI_NATIVE
:
2164 case LTTNG_KERNEL_ABI_SYSCALL_ABI_COMPAT
:
2171 switch (enabler
->event_param
.u
.syscall
.match
) {
2172 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NAME
:
2173 switch (enabler
->format_type
) {
2174 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
2175 return lttng_match_enabler_star_glob(desc_name
, enabler_name
);
2176 case LTTNG_ENABLER_FORMAT_NAME
:
2177 return lttng_match_enabler_name(desc_name
, enabler_name
);
2182 case LTTNG_KERNEL_ABI_SYSCALL_MATCH_NR
:
2183 return -EINVAL
; /* Not implemented. */
2189 case LTTNG_KERNEL_ABI_KPROBE
:
2190 desc_name
= desc
->event_name
;
2191 switch (enabler
->format_type
) {
2192 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
2194 case LTTNG_ENABLER_FORMAT_NAME
:
2195 return lttng_match_enabler_name(desc_name
, enabler_name
);
2201 case LTTNG_KERNEL_ABI_KRETPROBE
:
2203 char base_name
[LTTNG_KERNEL_ABI_SYM_NAME_LEN
];
2204 size_t base_name_len
; /* includes \0 */
2205 char *last_separator
, *entryexit
;
2207 desc_name
= desc
->event_name
;
2208 last_separator
= strrchr(desc_name
, '_');
2209 base_name_len
= last_separator
- desc_name
+ 1;
2210 memcpy(base_name
, desc_name
, base_name_len
);
2211 base_name
[base_name_len
- 1] = '\0'; /* Replace '_' by '\0' */
2212 entryexit
= last_separator
+ 1;
2214 if (!strcmp(entryexit
, "entry")) {
2216 } else if (!strcmp(entryexit
, "exit")) {
2217 /* Nothing to do. */
2223 switch (enabler
->event_param
.u
.kretprobe
.entryexit
) {
2224 case LTTNG_KERNEL_ABI_KRETPROBE_ENTRYEXIT
:
2226 case LTTNG_KERNEL_ABI_KRETPROBE_ENTRY
:
2230 case LTTNG_KERNEL_ABI_KRETPROBE_EXIT
:
2238 switch (enabler
->format_type
) {
2239 case LTTNG_ENABLER_FORMAT_STAR_GLOB
:
2241 case LTTNG_ENABLER_FORMAT_NAME
:
2242 return lttng_match_enabler_name(base_name
, enabler_name
);
2255 bool lttng_desc_match_enabler(const struct lttng_kernel_event_desc
*desc
,
2256 struct lttng_event_enabler_common
*enabler
)
2260 ret
= lttng_desc_match_enabler_check(desc
, enabler
);
2269 bool lttng_event_session_enabler_match_event_session(struct lttng_event_enabler_session_common
*event_enabler_session
,
2270 struct lttng_kernel_event_session_common_private
*event_session_priv
)
2272 if (lttng_desc_match_enabler(event_session_priv
->parent
.desc
, &event_enabler_session
->parent
)
2273 && event_session_priv
->chan
== event_enabler_session
->chan
2274 && match_event_session_token(event_session_priv
, event_enabler_session
->parent
.user_token
))
2281 int lttng_event_notifier_enabler_match_event_notifier(
2282 struct lttng_event_notifier_enabler
*event_notifier_enabler
,
2283 struct lttng_kernel_event_notifier_private
*event_notifier_priv
)
2285 int desc_matches
= lttng_desc_match_enabler(event_notifier_priv
->parent
.desc
,
2286 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler
));
2288 if (desc_matches
&& event_notifier_priv
->group
== event_notifier_enabler
->group
&&
2289 event_notifier_priv
->parent
.user_token
== event_notifier_enabler
->parent
.user_token
)
2295 bool lttng_event_enabler_match_event(struct lttng_event_enabler_common
*event_enabler
,
2296 struct lttng_kernel_event_common
*event
)
2298 if (event_enabler
->event_param
.instrumentation
!= event
->priv
->instrumentation
)
2301 switch (event_enabler
->enabler_type
) {
2302 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2304 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
2306 struct lttng_event_enabler_session_common
*event_enabler_session
=
2307 container_of(event_enabler
, struct lttng_event_enabler_session_common
, parent
);
2308 struct lttng_kernel_event_session_common_private
*event_session_priv
=
2309 container_of(event
->priv
, struct lttng_kernel_event_session_common_private
, parent
);
2310 return lttng_event_session_enabler_match_event_session(event_enabler_session
, event_session_priv
);
2312 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2314 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2315 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2316 struct lttng_kernel_event_notifier_private
*event_notifier_priv
=
2317 container_of(event
->priv
, struct lttng_kernel_event_notifier_private
, parent
);
2318 return lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler
, event_notifier_priv
);
2327 struct lttng_enabler_ref
*lttng_enabler_ref(
2328 struct list_head
*enablers_ref_list
,
2329 struct lttng_event_enabler_common
*enabler
)
2331 struct lttng_enabler_ref
*enabler_ref
;
2333 list_for_each_entry(enabler_ref
, enablers_ref_list
, node
) {
2334 if (enabler_ref
->ref
== enabler
)
2341 void lttng_event_enabler_create_tracepoint_events_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2343 struct lttng_kernel_probe_desc
*probe_desc
;
2344 const struct lttng_kernel_event_desc
*desc
;
2345 struct list_head
*probe_list
;
2348 probe_list
= lttng_get_probe_list_head();
2350 * For each probe event, if we find that a probe event matches
2351 * our enabler, create an associated lttng_event if not
2354 list_for_each_entry(probe_desc
, probe_list
, head
) {
2355 for (i
= 0; i
< probe_desc
->nr_events
; i
++) {
2356 struct lttng_kernel_event_common
*event
;
2358 desc
= probe_desc
->event_desc
[i
];
2359 if (!lttng_desc_match_enabler(desc
, event_enabler
))
2362 * We need to create an event for this event probe.
2364 event
= _lttng_kernel_event_create(event_enabler
, desc
, NULL
);
2365 if (IS_ERR(event
)) {
2366 /* Skip if already found. */
2367 if (PTR_ERR(event
) == -EEXIST
)
2369 printk(KERN_INFO
"LTTng: Unable to create event %s\n",
2370 probe_desc
->event_desc
[i
]->event_name
);
2376 /* Try to create the event associated with this kprobe enabler. */
2378 void lttng_event_enabler_create_kprobe_event_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2380 struct lttng_kernel_event_common
*event
;
2382 event
= _lttng_kernel_event_create(event_enabler
, NULL
, NULL
);
2383 if (IS_ERR(event
)) {
2384 if (PTR_ERR(event
) != -EEXIST
) {
2385 printk(KERN_INFO
"LTTng: Unable to create kprobe event %s\n",
2386 event_enabler
->event_param
.name
);
2391 /* Try to create the event associated with this kretprobe enabler. */
2393 void lttng_event_enabler_create_kretprobe_event_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2395 struct lttng_kernel_abi_event
*event_param
= &event_enabler
->event_param
;
2396 struct lttng_kernel_event_pair event_pair
;
2397 struct lttng_kernel_event_common
*event
;
2399 if (strlen(event_param
->name
) + strlen("_entry") >= LTTNG_KERNEL_ABI_SYM_NAME_LEN
) {
2404 memset(&event_pair
, 0, sizeof(event_pair
));
2405 event_pair
.krp
= lttng_kretprobes_create_krp(event_param
->u
.kretprobe
.symbol_name
,
2406 event_param
->u
.kretprobe
.offset
, event_param
->u
.kretprobe
.addr
);
2407 if (!event_pair
.krp
) {
2411 strcpy(event_pair
.name
, event_enabler
->event_param
.name
);
2412 strcat(event_pair
.name
, "_entry");
2413 event_pair
.check_ids
= true;
2414 event_pair
.entryexit
= LTTNG_KRETPROBE_ENTRY
;
2415 event
= _lttng_kernel_event_create(event_enabler
, NULL
, &event_pair
);
2416 if (IS_ERR(event
)) {
2417 if (PTR_ERR(event
) != -EEXIST
) {
2418 printk(KERN_INFO
"LTTng: Unable to create kretprobe event %s\n",
2419 event_enabler
->event_param
.name
);
2423 strcpy(event_pair
.name
, event_enabler
->event_param
.name
);
2424 strcat(event_pair
.name
, "_exit");
2425 event_pair
.check_ids
= false;
2426 event_pair
.entryexit
= LTTNG_KRETPROBE_EXIT
;
2427 event
= _lttng_kernel_event_create(event_enabler
, NULL
, &event_pair
);
2428 if (IS_ERR(event
)) {
2429 if (PTR_ERR(event
) != -EEXIST
) {
2430 printk(KERN_INFO
"LTTng: Unable to create kretprobe event %s\n",
2431 event_enabler
->event_param
.name
);
2435 lttng_kretprobes_put_krp(event_pair
.krp
);
2439 * Create event if it is missing and present in the list of tracepoint probes.
2440 * Should be called with sessions mutex held.
2443 void lttng_event_enabler_create_events_if_missing(struct lttng_event_enabler_common
*event_enabler
)
2447 switch (event_enabler
->event_param
.instrumentation
) {
2448 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2449 lttng_event_enabler_create_tracepoint_events_if_missing(event_enabler
);
2452 case LTTNG_KERNEL_ABI_SYSCALL
:
2453 ret
= lttng_event_enabler_create_syscall_events_if_missing(event_enabler
);
2457 case LTTNG_KERNEL_ABI_KPROBE
:
2458 lttng_event_enabler_create_kprobe_event_if_missing(event_enabler
);
2461 case LTTNG_KERNEL_ABI_KRETPROBE
:
2462 lttng_event_enabler_create_kretprobe_event_if_missing(event_enabler
);
2472 void lttng_event_enabler_init_event_filter(struct lttng_event_enabler_common
*event_enabler
,
2473 struct lttng_kernel_event_common
*event
)
2475 /* Link filter bytecodes if not linked yet. */
2476 lttng_enabler_link_bytecode(event
->priv
->desc
, lttng_static_ctx
,
2477 &event
->priv
->filter_bytecode_runtime_head
, &event_enabler
->filter_bytecode_head
);
2481 void lttng_event_enabler_init_event_capture(struct lttng_event_enabler_common
*event_enabler
,
2482 struct lttng_kernel_event_common
*event
)
2484 switch (event_enabler
->enabler_type
) {
2485 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2487 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
2489 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2491 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2492 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2493 struct lttng_kernel_event_notifier
*event_notifier
=
2494 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2496 /* Link capture bytecodes if not linked yet. */
2497 lttng_enabler_link_bytecode(event
->priv
->desc
,
2498 lttng_static_ctx
, &event_notifier
->priv
->capture_bytecode_runtime_head
,
2499 &event_notifier_enabler
->capture_bytecode_head
);
2500 event_notifier
->priv
->num_captures
= event_notifier_enabler
->num_captures
;
2509 * Create events associated with an event_enabler (if not already present),
2510 * and add backward reference from the event to the enabler.
2511 * Should be called with sessions mutex held.
2514 int lttng_event_enabler_ref_events(struct lttng_event_enabler_common
*event_enabler
)
2516 struct list_head
*event_list_head
= lttng_get_event_list_head_from_enabler(event_enabler
);
2517 struct lttng_kernel_event_common_private
*event_priv
;
2520 * Only try to create events for enablers that are enabled, the user
2521 * might still be attaching filter or exclusion to the event enabler.
2523 if (!event_enabler
->enabled
)
2526 lttng_syscall_table_set_wildcard_all(event_enabler
);
2528 /* First ensure that probe events are created for this enabler. */
2529 lttng_event_enabler_create_events_if_missing(event_enabler
);
2531 /* Link the created event with its associated enabler. */
2532 list_for_each_entry(event_priv
, event_list_head
, node
) {
2533 struct lttng_kernel_event_common
*event
= event_priv
->pub
;
2534 struct lttng_enabler_ref
*enabler_ref
;
2536 if (!lttng_event_enabler_match_event(event_enabler
, event
))
2539 enabler_ref
= lttng_enabler_ref(&event_priv
->enablers_ref_head
, event_enabler
);
2544 * If no backward ref, create it.
2545 * Add backward ref from event_notifier to enabler.
2547 enabler_ref
= kzalloc(sizeof(*enabler_ref
), GFP_KERNEL
);
2551 enabler_ref
->ref
= event_enabler
;
2552 list_add(&enabler_ref
->node
, &event_priv
->enablers_ref_head
);
2554 ret
= lttng_append_event_to_channel_map(event_enabler
, event
,
2555 event
->priv
->desc
->event_name
);
2559 lttng_event_enabler_init_event_filter(event_enabler
, event
);
2560 lttng_event_enabler_init_event_capture(event_enabler
, event
);
2567 * Called at module load: connect the probe on all enablers matching
2569 * Called with sessions lock held.
2571 int lttng_fix_pending_events(void)
2573 struct lttng_kernel_session_private
*session_priv
;
2575 list_for_each_entry(session_priv
, &sessions
, node
)
2576 lttng_session_lazy_sync_event_enablers(session_priv
->pub
);
2580 static bool lttng_event_notifier_group_has_active_event_notifiers(
2581 struct lttng_event_notifier_group
*event_notifier_group
)
2583 struct lttng_event_enabler_common
*event_enabler
;
2585 list_for_each_entry(event_enabler
, &event_notifier_group
->enablers_head
, node
) {
2586 if (event_enabler
->enabled
)
2592 bool lttng_event_notifier_active(void)
2594 struct lttng_event_notifier_group
*event_notifier_group
;
2596 list_for_each_entry(event_notifier_group
, &event_notifier_groups
, node
) {
2597 if (lttng_event_notifier_group_has_active_event_notifiers(event_notifier_group
))
2603 int lttng_fix_pending_event_notifiers(void)
2605 struct lttng_event_notifier_group
*event_notifier_group
;
2607 list_for_each_entry(event_notifier_group
, &event_notifier_groups
, node
)
2608 lttng_event_notifier_group_sync_enablers(event_notifier_group
);
2612 struct lttng_event_recorder_enabler
*lttng_event_recorder_enabler_create(
2613 enum lttng_enabler_format_type format_type
,
2614 struct lttng_kernel_abi_event
*event_param
,
2615 struct lttng_kernel_channel_buffer
*chan
)
2617 struct lttng_event_recorder_enabler
*event_enabler
;
2619 event_enabler
= kzalloc(sizeof(*event_enabler
), GFP_KERNEL
);
2622 event_enabler
->parent
.parent
.enabler_type
= LTTNG_EVENT_ENABLER_TYPE_RECORDER
;
2623 event_enabler
->parent
.parent
.format_type
= format_type
;
2624 INIT_LIST_HEAD(&event_enabler
->parent
.parent
.filter_bytecode_head
);
2625 memcpy(&event_enabler
->parent
.parent
.event_param
, event_param
,
2626 sizeof(event_enabler
->parent
.parent
.event_param
));
2627 event_enabler
->chan
= chan
;
2628 event_enabler
->parent
.chan
= &chan
->parent
;
2631 event_enabler
->parent
.parent
.enabled
= 0;
2632 return event_enabler
;
2635 struct lttng_event_counter_enabler
*lttng_event_counter_enabler_create(
2636 enum lttng_enabler_format_type format_type
,
2637 struct lttng_kernel_abi_event
*event_param
,
2638 struct lttng_kernel_counter_key
*counter_key
,
2639 struct lttng_kernel_channel_counter
*chan
)
2641 struct lttng_event_counter_enabler
*event_enabler
;
2643 event_enabler
= kzalloc(sizeof(*event_enabler
), GFP_KERNEL
);
2646 event_enabler
->parent
.parent
.enabler_type
= LTTNG_EVENT_ENABLER_TYPE_COUNTER
;
2647 event_enabler
->parent
.parent
.format_type
= format_type
;
2648 INIT_LIST_HEAD(&event_enabler
->parent
.parent
.filter_bytecode_head
);
2649 memcpy(&event_enabler
->parent
.parent
.event_param
, event_param
,
2650 sizeof(event_enabler
->parent
.parent
.event_param
));
2651 event_enabler
->chan
= chan
;
2652 event_enabler
->parent
.chan
= &chan
->parent
;
2653 if (create_counter_key_from_kernel(&event_enabler
->key
, counter_key
)) {
2654 kfree(event_enabler
);
2659 event_enabler
->parent
.parent
.enabled
= 0;
2660 event_enabler
->parent
.parent
.user_token
= event_param
->token
;
2661 return event_enabler
;
2664 void lttng_event_enabler_session_add(struct lttng_kernel_session
*session
,
2665 struct lttng_event_enabler_session_common
*event_enabler
)
2667 mutex_lock(&sessions_mutex
);
2668 list_add(&event_enabler
->parent
.node
, &session
->priv
->enablers_head
);
2669 event_enabler
->parent
.published
= true;
2670 lttng_session_lazy_sync_event_enablers(session
);
2671 mutex_unlock(&sessions_mutex
);
2674 int lttng_event_enabler_enable(struct lttng_event_enabler_common
*event_enabler
)
2676 mutex_lock(&sessions_mutex
);
2677 event_enabler
->enabled
= 1;
2678 lttng_event_enabler_sync(event_enabler
);
2679 mutex_unlock(&sessions_mutex
);
2683 int lttng_event_enabler_disable(struct lttng_event_enabler_common
*event_enabler
)
2685 mutex_lock(&sessions_mutex
);
2686 event_enabler
->enabled
= 0;
2687 lttng_event_enabler_sync(event_enabler
);
2688 mutex_unlock(&sessions_mutex
);
2693 int lttng_enabler_attach_filter_bytecode(struct lttng_event_enabler_common
*enabler
,
2694 struct lttng_kernel_abi_filter_bytecode __user
*bytecode
)
2696 struct lttng_kernel_bytecode_node
*bytecode_node
;
2697 uint32_t bytecode_len
;
2700 ret
= get_user(bytecode_len
, &bytecode
->len
);
2703 bytecode_node
= lttng_kvzalloc(sizeof(*bytecode_node
) + bytecode_len
,
2707 ret
= copy_from_user(&bytecode_node
->bc
, bytecode
,
2708 sizeof(*bytecode
) + bytecode_len
);
2712 bytecode_node
->type
= LTTNG_KERNEL_BYTECODE_TYPE_FILTER
;
2713 bytecode_node
->enabler
= enabler
;
2714 /* Enforce length based on allocated size */
2715 bytecode_node
->bc
.len
= bytecode_len
;
2716 list_add_tail(&bytecode_node
->node
, &enabler
->filter_bytecode_head
);
2721 lttng_kvfree(bytecode_node
);
2725 int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler_common
*event_enabler
,
2726 struct lttng_kernel_abi_filter_bytecode __user
*bytecode
)
2729 ret
= lttng_enabler_attach_filter_bytecode(event_enabler
, bytecode
);
2732 lttng_event_enabler_sync(event_enabler
);
2739 int lttng_event_add_callsite(struct lttng_kernel_event_common
*event
,
2740 struct lttng_kernel_abi_event_callsite __user
*callsite
)
2743 switch (event
->priv
->instrumentation
) {
2744 case LTTNG_KERNEL_ABI_UPROBE
:
2745 return lttng_uprobes_event_add_callsite(event
, callsite
);
2752 void lttng_enabler_destroy(struct lttng_event_enabler_common
*enabler
)
2754 struct lttng_kernel_bytecode_node
*filter_node
, *tmp_filter_node
;
2756 /* Destroy filter bytecode */
2757 list_for_each_entry_safe(filter_node
, tmp_filter_node
,
2758 &enabler
->filter_bytecode_head
, node
) {
2759 lttng_kvfree(filter_node
);
2763 void lttng_event_enabler_destroy(struct lttng_event_enabler_common
*event_enabler
)
2765 lttng_enabler_destroy(event_enabler
);
2766 if (event_enabler
->published
)
2767 list_del(&event_enabler
->node
);
2769 switch (event_enabler
->enabler_type
) {
2770 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
2772 struct lttng_event_recorder_enabler
*event_recorder_enabler
=
2773 container_of(event_enabler
, struct lttng_event_recorder_enabler
, parent
.parent
);
2775 kfree(event_recorder_enabler
);
2778 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
2780 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
2781 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
2783 kfree(event_notifier_enabler
);
2786 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
2788 struct lttng_event_counter_enabler
*event_counter_enabler
=
2789 container_of(event_enabler
, struct lttng_event_counter_enabler
, parent
.parent
);
2791 destroy_counter_key(event_counter_enabler
->key
);
2792 kfree(event_counter_enabler
);
2800 struct lttng_event_notifier_enabler
*lttng_event_notifier_enabler_create(
2801 enum lttng_enabler_format_type format_type
,
2802 struct lttng_kernel_abi_event_notifier
*event_notifier_param
,
2803 struct lttng_event_notifier_group
*event_notifier_group
)
2805 struct lttng_event_notifier_enabler
*event_notifier_enabler
;
2807 event_notifier_enabler
= kzalloc(sizeof(*event_notifier_enabler
), GFP_KERNEL
);
2808 if (!event_notifier_enabler
)
2811 event_notifier_enabler
->parent
.enabler_type
= LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
;
2812 event_notifier_enabler
->parent
.format_type
= format_type
;
2813 INIT_LIST_HEAD(&event_notifier_enabler
->parent
.filter_bytecode_head
);
2814 INIT_LIST_HEAD(&event_notifier_enabler
->capture_bytecode_head
);
2816 event_notifier_enabler
->error_counter_index
= event_notifier_param
->error_counter_index
;
2817 event_notifier_enabler
->num_captures
= 0;
2819 memcpy(&event_notifier_enabler
->parent
.event_param
, &event_notifier_param
->event
,
2820 sizeof(event_notifier_enabler
->parent
.event_param
));
2822 event_notifier_enabler
->parent
.enabled
= 0;
2823 event_notifier_enabler
->parent
.user_token
= event_notifier_param
->event
.token
;
2824 event_notifier_enabler
->group
= event_notifier_group
;
2825 return event_notifier_enabler
;
2828 void lttng_event_notifier_enabler_group_add(struct lttng_event_notifier_group
*event_notifier_group
,
2829 struct lttng_event_notifier_enabler
*event_notifier_enabler
)
2831 mutex_lock(&sessions_mutex
);
2832 list_add(&event_notifier_enabler
->parent
.node
, &event_notifier_enabler
->group
->enablers_head
);
2833 event_notifier_enabler
->parent
.published
= true;
2834 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2835 mutex_unlock(&sessions_mutex
);
2838 int lttng_event_notifier_enabler_attach_capture_bytecode(
2839 struct lttng_event_notifier_enabler
*event_notifier_enabler
,
2840 struct lttng_kernel_abi_capture_bytecode __user
*bytecode
)
2842 struct lttng_kernel_bytecode_node
*bytecode_node
;
2843 struct lttng_event_enabler_common
*enabler
=
2844 lttng_event_notifier_enabler_as_enabler(event_notifier_enabler
);
2845 uint32_t bytecode_len
;
2848 ret
= get_user(bytecode_len
, &bytecode
->len
);
2852 bytecode_node
= lttng_kvzalloc(sizeof(*bytecode_node
) + bytecode_len
,
2857 ret
= copy_from_user(&bytecode_node
->bc
, bytecode
,
2858 sizeof(*bytecode
) + bytecode_len
);
2862 bytecode_node
->type
= LTTNG_KERNEL_BYTECODE_TYPE_CAPTURE
;
2863 bytecode_node
->enabler
= enabler
;
2865 /* Enforce length based on allocated size */
2866 bytecode_node
->bc
.len
= bytecode_len
;
2867 list_add_tail(&bytecode_node
->node
, &event_notifier_enabler
->capture_bytecode_head
);
2869 event_notifier_enabler
->num_captures
++;
2871 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
2875 lttng_kvfree(bytecode_node
);
2881 void lttng_event_sync_filter_state(struct lttng_kernel_event_common
*event
)
2883 int has_enablers_without_filter_bytecode
= 0, nr_filters
= 0;
2884 struct lttng_kernel_bytecode_runtime
*runtime
;
2885 struct lttng_enabler_ref
*enabler_ref
;
2887 /* Check if has enablers without bytecode enabled */
2888 list_for_each_entry(enabler_ref
, &event
->priv
->enablers_ref_head
, node
) {
2889 if (enabler_ref
->ref
->enabled
2890 && list_empty(&enabler_ref
->ref
->filter_bytecode_head
)) {
2891 has_enablers_without_filter_bytecode
= 1;
2895 event
->priv
->has_enablers_without_filter_bytecode
= has_enablers_without_filter_bytecode
;
2897 /* Enable filters */
2898 list_for_each_entry(runtime
, &event
->priv
->filter_bytecode_runtime_head
, node
) {
2899 lttng_bytecode_sync_state(runtime
);
2902 WRITE_ONCE(event
->eval_filter
, !(has_enablers_without_filter_bytecode
|| !nr_filters
));
2906 void lttng_event_sync_capture_state(struct lttng_kernel_event_common
*event
)
2908 switch (event
->type
) {
2909 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
2911 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
2913 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
2915 struct lttng_kernel_event_notifier
*event_notifier
=
2916 container_of(event
, struct lttng_kernel_event_notifier
, parent
);
2917 struct lttng_kernel_bytecode_runtime
*runtime
;
2918 int nr_captures
= 0;
2920 /* Enable captures */
2921 list_for_each_entry(runtime
, &event_notifier
->priv
->capture_bytecode_runtime_head
, node
) {
2922 lttng_bytecode_sync_state(runtime
);
2925 WRITE_ONCE(event_notifier
->eval_capture
, !!nr_captures
);
2934 bool lttng_get_event_enabled_state(struct lttng_kernel_event_common
*event
)
2936 struct lttng_enabler_ref
*enabler_ref
;
2937 bool enabled
= false;
2939 switch (event
->priv
->instrumentation
) {
2940 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2942 case LTTNG_KERNEL_ABI_SYSCALL
:
2944 case LTTNG_KERNEL_ABI_KPROBE
:
2946 case LTTNG_KERNEL_ABI_KRETPROBE
:
2948 list_for_each_entry(enabler_ref
, &event
->priv
->enablers_ref_head
, node
) {
2949 if (enabler_ref
->ref
->enabled
) {
2960 switch (event
->type
) {
2961 case LTTNG_KERNEL_EVENT_TYPE_RECORDER
:
2963 case LTTNG_KERNEL_EVENT_TYPE_COUNTER
:
2965 struct lttng_kernel_event_session_common_private
*event_session_common_priv
=
2966 container_of(event
->priv
, struct lttng_kernel_event_session_common_private
, parent
);
2969 * Enabled state is based on union of enablers, with
2970 * intersection of session and channel transient enable
2973 return enabled
&& event_session_common_priv
->chan
->session
->priv
->tstate
&& event_session_common_priv
->chan
->priv
->tstate
;
2975 case LTTNG_KERNEL_EVENT_TYPE_NOTIFIER
:
2984 bool lttng_event_is_lazy_sync(struct lttng_kernel_event_common
*event
)
2986 switch (event
->priv
->instrumentation
) {
2987 case LTTNG_KERNEL_ABI_TRACEPOINT
:
2989 case LTTNG_KERNEL_ABI_SYSCALL
:
2991 case LTTNG_KERNEL_ABI_KPROBE
:
2993 case LTTNG_KERNEL_ABI_KRETPROBE
:
2997 /* Not handled with lazy sync. */
3003 * Should be called with sessions mutex held.
3006 void lttng_sync_event_list(struct list_head
*event_enabler_list
,
3007 struct list_head
*event_list
)
3009 struct lttng_kernel_event_common_private
*event_priv
;
3010 struct lttng_event_enabler_common
*event_enabler
;
3012 list_for_each_entry(event_enabler
, event_enabler_list
, node
)
3013 lttng_event_enabler_ref_events(event_enabler
);
3016 * For each event, if at least one of its enablers is enabled,
3017 * and its channel and session transient states are enabled, we
3018 * enable the event, else we disable it.
3020 list_for_each_entry(event_priv
, event_list
, node
) {
3021 struct lttng_kernel_event_common
*event
= event_priv
->pub
;
3024 if (!lttng_event_is_lazy_sync(event
))
3027 enabled
= lttng_get_event_enabled_state(event
);
3028 WRITE_ONCE(event
->enabled
, enabled
);
3030 * Sync tracepoint registration with event enabled state.
3033 if (!event_priv
->registered
)
3034 register_event(event
);
3036 if (event_priv
->registered
)
3037 unregister_event(event
);
3040 lttng_event_sync_filter_state(event
);
3041 lttng_event_sync_capture_state(event
);
3046 * lttng_session_sync_event_enablers should be called just before starting a
3050 void lttng_session_sync_event_enablers(struct lttng_kernel_session
*session
)
3052 lttng_sync_event_list(&session
->priv
->enablers_head
, &session
->priv
->events_head
);
3056 * Apply enablers to session events, adding events to session if need
3057 * be. It is required after each modification applied to an active
3058 * session, and right before session "start".
3059 * "lazy" sync means we only sync if required.
3060 * Should be called with sessions mutex held.
3063 void lttng_session_lazy_sync_event_enablers(struct lttng_kernel_session
*session
)
3065 /* We can skip if session is not active */
3066 if (!session
->active
)
3068 lttng_session_sync_event_enablers(session
);
3072 void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group
*event_notifier_group
)
3074 lttng_sync_event_list(&event_notifier_group
->enablers_head
, &event_notifier_group
->event_notifiers_head
);
3078 void lttng_event_enabler_sync(struct lttng_event_enabler_common
*event_enabler
)
3080 switch (event_enabler
->enabler_type
) {
3081 case LTTNG_EVENT_ENABLER_TYPE_RECORDER
:
3083 case LTTNG_EVENT_ENABLER_TYPE_COUNTER
:
3085 struct lttng_event_enabler_session_common
*event_enabler_session
=
3086 container_of(event_enabler
, struct lttng_event_enabler_session_common
, parent
);
3087 lttng_session_lazy_sync_event_enablers(event_enabler_session
->chan
->session
);
3090 case LTTNG_EVENT_ENABLER_TYPE_NOTIFIER
:
3092 struct lttng_event_notifier_enabler
*event_notifier_enabler
=
3093 container_of(event_enabler
, struct lttng_event_notifier_enabler
, parent
);
3094 lttng_event_notifier_group_sync_enablers(event_notifier_enabler
->group
);
3103 * Serialize at most one packet worth of metadata into a metadata
3105 * We grab the metadata cache mutex to get exclusive access to our metadata
3106 * buffer and to the metadata cache. Exclusive access to the metadata buffer
3107 * allows us to do racy operations such as looking for remaining space left in
3108 * packet and write, since mutual exclusion protects us from concurrent writes.
3109 * Mutual exclusion on the metadata cache allow us to read the cache content
3110 * without racing against reallocation of the cache by updates.
3111 * Returns the number of bytes written in the channel, 0 if no data
3112 * was written and a negative value on error.
3114 int lttng_metadata_output_channel(struct lttng_metadata_stream
*stream
,
3115 struct lttng_kernel_ring_buffer_channel
*chan
, bool *coherent
)
3117 struct lttng_kernel_ring_buffer_ctx ctx
;
3119 size_t len
, reserve_len
;
3122 * Ensure we support mutiple get_next / put sequences followed by
3123 * put_next. The metadata cache lock protects reading the metadata
3124 * cache. It can indeed be read concurrently by "get_next_subbuf" and
3125 * "flush" operations on the buffer invoked by different processes.
3126 * Moreover, since the metadata cache memory can be reallocated, we
3127 * need to have exclusive access against updates even though we only
3130 mutex_lock(&stream
->metadata_cache
->lock
);
3131 WARN_ON(stream
->metadata_in
< stream
->metadata_out
);
3132 if (stream
->metadata_in
!= stream
->metadata_out
)
3135 /* Metadata regenerated, change the version. */
3136 if (stream
->metadata_cache
->version
!= stream
->version
)
3137 stream
->version
= stream
->metadata_cache
->version
;
3139 len
= stream
->metadata_cache
->metadata_written
-
3140 stream
->metadata_in
;
3143 reserve_len
= min_t(size_t,
3144 stream
->transport
->ops
.priv
->packet_avail_size(chan
),
3146 lib_ring_buffer_ctx_init(&ctx
, chan
, reserve_len
,
3147 sizeof(char), NULL
);
3149 * If reservation failed, return an error to the caller.
3151 ret
= stream
->transport
->ops
.event_reserve(&ctx
);
3153 printk(KERN_WARNING
"LTTng: Metadata event reservation failed\n");
3154 stream
->coherent
= false;
3157 stream
->transport
->ops
.event_write(&ctx
,
3158 stream
->metadata_cache
->data
+ stream
->metadata_in
,
3160 stream
->transport
->ops
.event_commit(&ctx
);
3161 stream
->metadata_in
+= reserve_len
;
3162 if (reserve_len
< len
)
3163 stream
->coherent
= false;
3165 stream
->coherent
= true;
3170 *coherent
= stream
->coherent
;
3171 mutex_unlock(&stream
->metadata_cache
->lock
);
3176 void lttng_metadata_begin(struct lttng_kernel_session
*session
)
3178 if (atomic_inc_return(&session
->priv
->metadata_cache
->producing
) == 1)
3179 mutex_lock(&session
->priv
->metadata_cache
->lock
);
3183 void lttng_metadata_end(struct lttng_kernel_session
*session
)
3185 WARN_ON_ONCE(!atomic_read(&session
->priv
->metadata_cache
->producing
));
3186 if (atomic_dec_return(&session
->priv
->metadata_cache
->producing
) == 0) {
3187 struct lttng_metadata_stream
*stream
;
3189 list_for_each_entry(stream
, &session
->priv
->metadata_cache
->metadata_stream
, node
)
3190 wake_up_interruptible(&stream
->read_wait
);
3191 mutex_unlock(&session
->priv
->metadata_cache
->lock
);
3196 * Write the metadata to the metadata cache.
3197 * Must be called with sessions_mutex held.
3198 * The metadata cache lock protects us from concurrent read access from
3199 * thread outputting metadata content to ring buffer.
3200 * The content of the printf is printed as a single atomic metadata
3204 int lttng_metadata_printf(struct lttng_kernel_session
*session
,
3205 const char *fmt
, ...)
3211 WARN_ON_ONCE(!LTTNG_READ_ONCE(session
->active
));
3214 str
= kvasprintf(GFP_KERNEL
, fmt
, ap
);
3220 WARN_ON_ONCE(!atomic_read(&session
->priv
->metadata_cache
->producing
));
3221 if (session
->priv
->metadata_cache
->metadata_written
+ len
>
3222 session
->priv
->metadata_cache
->cache_alloc
) {
3223 char *tmp_cache_realloc
;
3224 unsigned int tmp_cache_alloc_size
;
3226 tmp_cache_alloc_size
= max_t(unsigned int,
3227 session
->priv
->metadata_cache
->cache_alloc
+ len
,
3228 session
->priv
->metadata_cache
->cache_alloc
<< 1);
3229 tmp_cache_realloc
= vzalloc(tmp_cache_alloc_size
);
3230 if (!tmp_cache_realloc
)
3232 if (session
->priv
->metadata_cache
->data
) {
3233 memcpy(tmp_cache_realloc
,
3234 session
->priv
->metadata_cache
->data
,
3235 session
->priv
->metadata_cache
->cache_alloc
);
3236 vfree(session
->priv
->metadata_cache
->data
);
3239 session
->priv
->metadata_cache
->cache_alloc
= tmp_cache_alloc_size
;
3240 session
->priv
->metadata_cache
->data
= tmp_cache_realloc
;
3242 memcpy(session
->priv
->metadata_cache
->data
+
3243 session
->priv
->metadata_cache
->metadata_written
,
3245 session
->priv
->metadata_cache
->metadata_written
+= len
;
3256 int print_tabs(struct lttng_kernel_session
*session
, size_t nesting
)
3260 for (i
= 0; i
< nesting
; i
++) {
3263 ret
= lttng_metadata_printf(session
, " ");
3272 int lttng_field_name_statedump(struct lttng_kernel_session
*session
,
3273 const struct lttng_kernel_event_field
*field
,
3276 return lttng_metadata_printf(session
, " _%s;\n", field
->name
);
3280 int _lttng_integer_type_statedump(struct lttng_kernel_session
*session
,
3281 const struct lttng_kernel_type_integer
*type
,
3282 enum lttng_kernel_string_encoding parent_encoding
,
3287 ret
= print_tabs(session
, nesting
);
3290 ret
= lttng_metadata_printf(session
,
3291 "integer { size = %u; align = %u; signed = %u; encoding = %s; base = %u;%s }",
3295 (parent_encoding
== lttng_kernel_string_encoding_none
)
3297 : (parent_encoding
== lttng_kernel_string_encoding_UTF8
)
3301 #if __BYTE_ORDER == __BIG_ENDIAN
3302 type
->reverse_byte_order
? " byte_order = le;" : ""
3304 type
->reverse_byte_order
? " byte_order = be;" : ""
3311 * Must be called with sessions_mutex held.
3314 int _lttng_struct_type_statedump(struct lttng_kernel_session
*session
,
3315 const struct lttng_kernel_type_struct
*type
,
3318 const char *prev_field_name
= NULL
;
3320 uint32_t i
, nr_fields
;
3321 unsigned int alignment
;
3323 ret
= print_tabs(session
, nesting
);
3326 ret
= lttng_metadata_printf(session
,
3330 nr_fields
= type
->nr_fields
;
3331 for (i
= 0; i
< nr_fields
; i
++) {
3332 const struct lttng_kernel_event_field
*iter_field
;
3334 iter_field
= type
->fields
[i
];
3335 ret
= _lttng_field_statedump(session
, iter_field
, nesting
+ 1, &prev_field_name
);
3339 ret
= print_tabs(session
, nesting
);
3342 alignment
= type
->alignment
;
3344 ret
= lttng_metadata_printf(session
,
3348 ret
= lttng_metadata_printf(session
,
3355 * Must be called with sessions_mutex held.
3358 int _lttng_struct_field_statedump(struct lttng_kernel_session
*session
,
3359 const struct lttng_kernel_event_field
*field
,
3364 ret
= _lttng_struct_type_statedump(session
,
3365 lttng_kernel_get_type_struct(field
->type
), nesting
);
3368 return lttng_field_name_statedump(session
, field
, nesting
);
3372 * Must be called with sessions_mutex held.
3375 int _lttng_variant_type_statedump(struct lttng_kernel_session
*session
,
3376 const struct lttng_kernel_type_variant
*type
,
3378 const char *prev_field_name
)
3380 const char *tag_name
;
3382 uint32_t i
, nr_choices
;
3384 tag_name
= type
->tag_name
;
3386 tag_name
= prev_field_name
;
3390 * CTF 1.8 does not allow expressing nonzero variant alignment in a nestable way.
3392 if (type
->alignment
!= 0)
3394 ret
= print_tabs(session
, nesting
);
3397 ret
= lttng_metadata_printf(session
,
3398 "variant <_%s> {\n",
3402 nr_choices
= type
->nr_choices
;
3403 for (i
= 0; i
< nr_choices
; i
++) {
3404 const struct lttng_kernel_event_field
*iter_field
;
3406 iter_field
= type
->choices
[i
];
3407 ret
= _lttng_field_statedump(session
, iter_field
, nesting
+ 1, NULL
);
3411 ret
= print_tabs(session
, nesting
);
3414 ret
= lttng_metadata_printf(session
,
3420 * Must be called with sessions_mutex held.
3423 int _lttng_variant_field_statedump(struct lttng_kernel_session
*session
,
3424 const struct lttng_kernel_event_field
*field
,
3426 const char *prev_field_name
)
3430 ret
= _lttng_variant_type_statedump(session
,
3431 lttng_kernel_get_type_variant(field
->type
), nesting
,
3435 return lttng_field_name_statedump(session
, field
, nesting
);
3439 * Must be called with sessions_mutex held.
3442 int _lttng_array_field_statedump(struct lttng_kernel_session
*session
,
3443 const struct lttng_kernel_event_field
*field
,
3447 const struct lttng_kernel_type_array
*array_type
;
3448 const struct lttng_kernel_type_common
*elem_type
;
3450 array_type
= lttng_kernel_get_type_array(field
->type
);
3451 WARN_ON_ONCE(!array_type
);
3453 if (array_type
->alignment
) {
3454 ret
= print_tabs(session
, nesting
);
3457 ret
= lttng_metadata_printf(session
,
3458 "struct { } align(%u) _%s_padding;\n",
3459 array_type
->alignment
* CHAR_BIT
,
3465 * Nested compound types: Only array of structures and variants are
3466 * currently supported.
3468 elem_type
= array_type
->elem_type
;
3469 switch (elem_type
->type
) {
3470 case lttng_kernel_type_integer
:
3471 case lttng_kernel_type_struct
:
3472 case lttng_kernel_type_variant
:
3473 ret
= _lttng_type_statedump(session
, elem_type
,
3474 array_type
->encoding
, nesting
);
3482 ret
= lttng_metadata_printf(session
,
3485 array_type
->length
);
3490 * Must be called with sessions_mutex held.
3493 int _lttng_sequence_field_statedump(struct lttng_kernel_session
*session
,
3494 const struct lttng_kernel_event_field
*field
,
3496 const char *prev_field_name
)
3499 const char *length_name
;
3500 const struct lttng_kernel_type_sequence
*sequence_type
;
3501 const struct lttng_kernel_type_common
*elem_type
;
3503 sequence_type
= lttng_kernel_get_type_sequence(field
->type
);
3504 WARN_ON_ONCE(!sequence_type
);
3506 length_name
= sequence_type
->length_name
;
3508 length_name
= prev_field_name
;
3512 if (sequence_type
->alignment
) {
3513 ret
= print_tabs(session
, nesting
);
3516 ret
= lttng_metadata_printf(session
,
3517 "struct { } align(%u) _%s_padding;\n",
3518 sequence_type
->alignment
* CHAR_BIT
,
3525 * Nested compound types: Only array of structures and variants are
3526 * currently supported.
3528 elem_type
= sequence_type
->elem_type
;
3529 switch (elem_type
->type
) {
3530 case lttng_kernel_type_integer
:
3531 case lttng_kernel_type_struct
:
3532 case lttng_kernel_type_variant
:
3533 ret
= _lttng_type_statedump(session
, elem_type
,
3534 sequence_type
->encoding
, nesting
);
3542 ret
= lttng_metadata_printf(session
,
3550 * Must be called with sessions_mutex held.
3553 int _lttng_enum_type_statedump(struct lttng_kernel_session
*session
,
3554 const struct lttng_kernel_type_enum
*type
,
3557 const struct lttng_kernel_enum_desc
*enum_desc
;
3558 const struct lttng_kernel_type_common
*container_type
;
3560 unsigned int i
, nr_entries
;
3562 container_type
= type
->container_type
;
3563 if (container_type
->type
!= lttng_kernel_type_integer
) {
3567 enum_desc
= type
->desc
;
3568 nr_entries
= enum_desc
->nr_entries
;
3570 ret
= print_tabs(session
, nesting
);
3573 ret
= lttng_metadata_printf(session
, "enum : ");
3576 ret
= _lttng_integer_type_statedump(session
, lttng_kernel_get_type_integer(container_type
),
3577 lttng_kernel_string_encoding_none
, 0);
3580 ret
= lttng_metadata_printf(session
, " {\n");
3583 /* Dump all entries */
3584 for (i
= 0; i
< nr_entries
; i
++) {
3585 const struct lttng_kernel_enum_entry
*entry
= enum_desc
->entries
[i
];
3588 ret
= print_tabs(session
, nesting
+ 1);
3591 ret
= lttng_metadata_printf(session
,
3595 len
= strlen(entry
->string
);
3596 /* Escape the character '"' */
3597 for (j
= 0; j
< len
; j
++) {
3598 char c
= entry
->string
[j
];
3602 ret
= lttng_metadata_printf(session
,
3606 ret
= lttng_metadata_printf(session
,
3610 ret
= lttng_metadata_printf(session
,
3617 ret
= lttng_metadata_printf(session
, "\"");
3621 if (entry
->options
.is_auto
) {
3622 ret
= lttng_metadata_printf(session
, ",\n");
3626 ret
= lttng_metadata_printf(session
,
3630 if (entry
->start
.signedness
)
3631 ret
= lttng_metadata_printf(session
,
3632 "%lld", (long long) entry
->start
.value
);
3634 ret
= lttng_metadata_printf(session
,
3635 "%llu", entry
->start
.value
);
3638 if (entry
->start
.signedness
== entry
->end
.signedness
&&
3640 == entry
->end
.value
) {
3641 ret
= lttng_metadata_printf(session
,
3644 if (entry
->end
.signedness
) {
3645 ret
= lttng_metadata_printf(session
,
3647 (long long) entry
->end
.value
);
3649 ret
= lttng_metadata_printf(session
,
3658 ret
= print_tabs(session
, nesting
);
3661 ret
= lttng_metadata_printf(session
, "}");
3667 * Must be called with sessions_mutex held.
3670 int _lttng_enum_field_statedump(struct lttng_kernel_session
*session
,
3671 const struct lttng_kernel_event_field
*field
,
3675 const struct lttng_kernel_type_enum
*enum_type
;
3677 enum_type
= lttng_kernel_get_type_enum(field
->type
);
3678 WARN_ON_ONCE(!enum_type
);
3679 ret
= _lttng_enum_type_statedump(session
, enum_type
, nesting
);
3682 return lttng_field_name_statedump(session
, field
, nesting
);
3686 int _lttng_integer_field_statedump(struct lttng_kernel_session
*session
,
3687 const struct lttng_kernel_event_field
*field
,
3692 ret
= _lttng_integer_type_statedump(session
, lttng_kernel_get_type_integer(field
->type
),
3693 lttng_kernel_string_encoding_none
, nesting
);
3696 return lttng_field_name_statedump(session
, field
, nesting
);
3700 int _lttng_string_type_statedump(struct lttng_kernel_session
*session
,
3701 const struct lttng_kernel_type_string
*type
,
3706 /* Default encoding is UTF8 */
3707 ret
= print_tabs(session
, nesting
);
3710 ret
= lttng_metadata_printf(session
,
3712 type
->encoding
== lttng_kernel_string_encoding_ASCII
?
3713 " { encoding = ASCII; }" : "");
3718 int _lttng_string_field_statedump(struct lttng_kernel_session
*session
,
3719 const struct lttng_kernel_event_field
*field
,
3722 const struct lttng_kernel_type_string
*string_type
;
3725 string_type
= lttng_kernel_get_type_string(field
->type
);
3726 WARN_ON_ONCE(!string_type
);
3727 ret
= _lttng_string_type_statedump(session
, string_type
, nesting
);
3730 return lttng_field_name_statedump(session
, field
, nesting
);
3734 * Must be called with sessions_mutex held.
3737 int _lttng_type_statedump(struct lttng_kernel_session
*session
,
3738 const struct lttng_kernel_type_common
*type
,
3739 enum lttng_kernel_string_encoding parent_encoding
,
3744 switch (type
->type
) {
3745 case lttng_kernel_type_integer
:
3746 ret
= _lttng_integer_type_statedump(session
,
3747 lttng_kernel_get_type_integer(type
),
3748 parent_encoding
, nesting
);
3750 case lttng_kernel_type_enum
:
3751 ret
= _lttng_enum_type_statedump(session
,
3752 lttng_kernel_get_type_enum(type
),
3755 case lttng_kernel_type_string
:
3756 ret
= _lttng_string_type_statedump(session
,
3757 lttng_kernel_get_type_string(type
),
3760 case lttng_kernel_type_struct
:
3761 ret
= _lttng_struct_type_statedump(session
,
3762 lttng_kernel_get_type_struct(type
),
3765 case lttng_kernel_type_variant
:
3766 ret
= _lttng_variant_type_statedump(session
,
3767 lttng_kernel_get_type_variant(type
),
3771 /* Nested arrays and sequences are not supported yet. */
3772 case lttng_kernel_type_array
:
3773 case lttng_kernel_type_sequence
:
3782 * Must be called with sessions_mutex held.
3785 int _lttng_field_statedump(struct lttng_kernel_session
*session
,
3786 const struct lttng_kernel_event_field
*field
,
3788 const char **prev_field_name_p
)
3790 const char *prev_field_name
= NULL
;
3793 if (prev_field_name_p
)
3794 prev_field_name
= *prev_field_name_p
;
3795 switch (field
->type
->type
) {
3796 case lttng_kernel_type_integer
:
3797 ret
= _lttng_integer_field_statedump(session
, field
, nesting
);
3799 case lttng_kernel_type_enum
:
3800 ret
= _lttng_enum_field_statedump(session
, field
, nesting
);
3802 case lttng_kernel_type_string
:
3803 ret
= _lttng_string_field_statedump(session
, field
, nesting
);
3805 case lttng_kernel_type_struct
:
3806 ret
= _lttng_struct_field_statedump(session
, field
, nesting
);
3808 case lttng_kernel_type_array
:
3809 ret
= _lttng_array_field_statedump(session
, field
, nesting
);
3811 case lttng_kernel_type_sequence
:
3812 ret
= _lttng_sequence_field_statedump(session
, field
, nesting
, prev_field_name
);
3814 case lttng_kernel_type_variant
:
3815 ret
= _lttng_variant_field_statedump(session
, field
, nesting
, prev_field_name
);
3822 if (prev_field_name_p
)
3823 *prev_field_name_p
= field
->name
;
3828 int _lttng_context_metadata_statedump(struct lttng_kernel_session
*session
,
3829 struct lttng_kernel_ctx
*ctx
)
3831 const char *prev_field_name
= NULL
;
3837 for (i
= 0; i
< ctx
->nr_fields
; i
++) {
3838 const struct lttng_kernel_ctx_field
*field
= &ctx
->fields
[i
];
3840 ret
= _lttng_field_statedump(session
, field
->event_field
, 2, &prev_field_name
);
3848 int _lttng_fields_metadata_statedump(struct lttng_kernel_session
*session
,
3849 struct lttng_kernel_event_recorder
*event_recorder
)
3851 const char *prev_field_name
= NULL
;
3852 const struct lttng_kernel_event_desc
*desc
= event_recorder
->priv
->parent
.parent
.desc
;
3856 for (i
= 0; i
< desc
->tp_class
->nr_fields
; i
++) {
3857 const struct lttng_kernel_event_field
*field
= desc
->tp_class
->fields
[i
];
3859 ret
= _lttng_field_statedump(session
, field
, 2, &prev_field_name
);
3867 * Must be called with sessions_mutex held.
3868 * The entire event metadata is printed as a single atomic metadata
3872 int _lttng_event_recorder_metadata_statedump(struct lttng_kernel_event_common
*event
)
3874 struct lttng_kernel_event_recorder
*event_recorder
;
3875 struct lttng_kernel_channel_buffer
*chan
;
3876 struct lttng_kernel_session
*session
;
3879 if (event
->type
!= LTTNG_KERNEL_EVENT_TYPE_RECORDER
)
3881 event_recorder
= container_of(event
, struct lttng_kernel_event_recorder
, parent
);
3882 chan
= event_recorder
->chan
;
3883 session
= chan
->parent
.session
;
3885 if (event_recorder
->priv
->metadata_dumped
|| !LTTNG_READ_ONCE(session
->active
))
3887 if (chan
->priv
->channel_type
== METADATA_CHANNEL
)
3890 lttng_metadata_begin(session
);
3892 ret
= lttng_metadata_printf(session
,
3896 " stream_id = %u;\n",
3897 event_recorder
->priv
->parent
.parent
.desc
->event_name
,
3898 event_recorder
->priv
->parent
.id
,
3899 event_recorder
->chan
->priv
->id
);
3903 ret
= lttng_metadata_printf(session
,
3904 " fields := struct {\n"
3909 ret
= _lttng_fields_metadata_statedump(session
, event_recorder
);
3914 * LTTng space reservation can only reserve multiples of the
3917 ret
= lttng_metadata_printf(session
,
3923 event_recorder
->priv
->metadata_dumped
= 1;
3925 lttng_metadata_end(session
);
3931 * Must be called with sessions_mutex held.
3932 * The entire channel metadata is printed as a single atomic metadata
3936 int _lttng_channel_metadata_statedump(struct lttng_kernel_session
*session
,
3937 struct lttng_kernel_channel_buffer
*chan
)
3941 if (chan
->priv
->metadata_dumped
|| !LTTNG_READ_ONCE(session
->active
))
3944 if (chan
->priv
->channel_type
== METADATA_CHANNEL
)
3947 lttng_metadata_begin(session
);
3949 WARN_ON_ONCE(!chan
->priv
->header_type
);
3950 ret
= lttng_metadata_printf(session
,
3953 " event.header := %s;\n"
3954 " packet.context := struct packet_context;\n",
3956 chan
->priv
->header_type
== 1 ? "struct event_header_compact" :
3957 "struct event_header_large");
3961 if (chan
->priv
->ctx
) {
3962 ret
= lttng_metadata_printf(session
,
3963 " event.context := struct {\n");
3967 ret
= _lttng_context_metadata_statedump(session
, chan
->priv
->ctx
);
3970 if (chan
->priv
->ctx
) {
3971 ret
= lttng_metadata_printf(session
,
3977 ret
= lttng_metadata_printf(session
,
3980 chan
->priv
->metadata_dumped
= 1;
3982 lttng_metadata_end(session
);
3987 * Must be called with sessions_mutex held.
3990 int _lttng_stream_packet_context_declare(struct lttng_kernel_session
*session
)
3992 return lttng_metadata_printf(session
,
3993 "struct packet_context {\n"
3994 " uint64_clock_monotonic_t timestamp_begin;\n"
3995 " uint64_clock_monotonic_t timestamp_end;\n"
3996 " uint64_t content_size;\n"
3997 " uint64_t packet_size;\n"
3998 " uint64_t packet_seq_num;\n"
3999 " unsigned long events_discarded;\n"
4000 " uint32_t cpu_id;\n"
4007 * id: range: 0 - 30.
4008 * id 31 is reserved to indicate an extended header.
4011 * id: range: 0 - 65534.
4012 * id 65535 is reserved to indicate an extended header.
4014 * Must be called with sessions_mutex held.
4017 int _lttng_event_header_declare(struct lttng_kernel_session
*session
)
4019 return lttng_metadata_printf(session
,
4020 "struct event_header_compact {\n"
4021 " enum : uint5_t { compact = 0 ... 30, extended = 31 } id;\n"
4024 " uint27_clock_monotonic_t timestamp;\n"
4028 " uint64_clock_monotonic_t timestamp;\n"
4033 "struct event_header_large {\n"
4034 " enum : uint16_t { compact = 0 ... 65534, extended = 65535 } id;\n"
4037 " uint32_clock_monotonic_t timestamp;\n"
4041 " uint64_clock_monotonic_t timestamp;\n"
4045 lttng_alignof(uint32_t) * CHAR_BIT
,
4046 lttng_alignof(uint16_t) * CHAR_BIT
4051 * Approximation of NTP time of day to clock monotonic correlation,
4052 * taken at start of trace.
4053 * Yes, this is only an approximation. Yes, we can (and will) do better
4054 * in future versions.
4055 * This function may return a negative offset. It may happen if the
4056 * system sets the REALTIME clock to 0 after boot.
4058 * Use 64bit timespec on kernels that have it, this makes 32bit arch
4062 int64_t measure_clock_offset(void)
4064 uint64_t monotonic_avg
, monotonic
[2], realtime
;
4065 uint64_t tcf
= trace_clock_freq();
4067 unsigned long flags
;
4068 struct timespec64 rts
= { 0, 0 };
4070 /* Disable interrupts to increase correlation precision. */
4071 local_irq_save(flags
);
4072 monotonic
[0] = trace_clock_read64();
4073 ktime_get_real_ts64(&rts
);
4074 monotonic
[1] = trace_clock_read64();
4075 local_irq_restore(flags
);
4077 monotonic_avg
= (monotonic
[0] + monotonic
[1]) >> 1;
4078 realtime
= (uint64_t) rts
.tv_sec
* tcf
;
4079 if (tcf
== NSEC_PER_SEC
) {
4080 realtime
+= rts
.tv_nsec
;
4082 uint64_t n
= rts
.tv_nsec
* tcf
;
4084 do_div(n
, NSEC_PER_SEC
);
4087 offset
= (int64_t) realtime
- monotonic_avg
;
4092 int print_escaped_ctf_string(struct lttng_kernel_session
*session
, const char *string
)
4100 while (cur
!= '\0') {
4103 ret
= lttng_metadata_printf(session
, "%s", "\\n");
4107 ret
= lttng_metadata_printf(session
, "%c", '\\');
4110 /* We still print the current char */
4113 ret
= lttng_metadata_printf(session
, "%c", cur
);
4127 int print_metadata_escaped_field(struct lttng_kernel_session
*session
, const char *field
,
4128 const char *field_value
)
4132 ret
= lttng_metadata_printf(session
, " %s = \"", field
);
4136 ret
= print_escaped_ctf_string(session
, field_value
);
4140 ret
= lttng_metadata_printf(session
, "\";\n");
4147 * Output metadata into this session's metadata buffers.
4148 * Must be called with sessions_mutex held.
4151 int _lttng_session_metadata_statedump(struct lttng_kernel_session
*session
)
4153 unsigned char *uuid_c
= session
->priv
->uuid
.b
;
4154 unsigned char uuid_s
[37], clock_uuid_s
[BOOT_ID_LEN
];
4155 const char *product_uuid
;
4156 struct lttng_kernel_channel_common_private
*chan_priv
;
4157 struct lttng_kernel_event_recorder_private
*event_recorder_priv
;
4160 if (!LTTNG_READ_ONCE(session
->active
))
4163 lttng_metadata_begin(session
);
4165 if (session
->priv
->metadata_dumped
)
4168 snprintf(uuid_s
, sizeof(uuid_s
),
4169 "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
4170 uuid_c
[0], uuid_c
[1], uuid_c
[2], uuid_c
[3],
4171 uuid_c
[4], uuid_c
[5], uuid_c
[6], uuid_c
[7],
4172 uuid_c
[8], uuid_c
[9], uuid_c
[10], uuid_c
[11],
4173 uuid_c
[12], uuid_c
[13], uuid_c
[14], uuid_c
[15]);
4175 ret
= lttng_metadata_printf(session
,
4176 "typealias integer { size = 8; align = %u; signed = false; } := uint8_t;\n"
4177 "typealias integer { size = 16; align = %u; signed = false; } := uint16_t;\n"
4178 "typealias integer { size = 32; align = %u; signed = false; } := uint32_t;\n"
4179 "typealias integer { size = 64; align = %u; signed = false; } := uint64_t;\n"
4180 "typealias integer { size = %u; align = %u; signed = false; } := unsigned long;\n"
4181 "typealias integer { size = 5; align = 1; signed = false; } := uint5_t;\n"
4182 "typealias integer { size = 27; align = 1; signed = false; } := uint27_t;\n"
4188 " byte_order = %s;\n"
4189 " packet.header := struct {\n"
4190 " uint32_t magic;\n"
4191 " uint8_t uuid[16];\n"
4192 " uint32_t stream_id;\n"
4193 " uint64_t stream_instance_id;\n"
4196 lttng_alignof(uint8_t) * CHAR_BIT
,
4197 lttng_alignof(uint16_t) * CHAR_BIT
,
4198 lttng_alignof(uint32_t) * CHAR_BIT
,
4199 lttng_alignof(uint64_t) * CHAR_BIT
,
4200 sizeof(unsigned long) * CHAR_BIT
,
4201 lttng_alignof(unsigned long) * CHAR_BIT
,
4205 #if __BYTE_ORDER == __BIG_ENDIAN
4214 ret
= lttng_metadata_printf(session
,
4216 " hostname = \"%s\";\n"
4217 " domain = \"kernel\";\n"
4218 " sysname = \"%s\";\n"
4219 " kernel_release = \"%s\";\n"
4220 " kernel_version = \"%s\";\n"
4221 " tracer_name = \"lttng-modules\";\n"
4222 " tracer_major = %d;\n"
4223 " tracer_minor = %d;\n"
4224 " tracer_patchlevel = %d;\n"
4225 " trace_buffering_scheme = \"global\";\n",
4226 current
->nsproxy
->uts_ns
->name
.nodename
,
4230 LTTNG_MODULES_MAJOR_VERSION
,
4231 LTTNG_MODULES_MINOR_VERSION
,
4232 LTTNG_MODULES_PATCHLEVEL_VERSION
4237 ret
= print_metadata_escaped_field(session
, "trace_name", session
->priv
->name
);
4240 ret
= print_metadata_escaped_field(session
, "trace_creation_datetime",
4241 session
->priv
->creation_time
);
4245 /* Add the product UUID to the 'env' section */
4246 product_uuid
= dmi_get_system_info(DMI_PRODUCT_UUID
);
4248 ret
= lttng_metadata_printf(session
,
4249 " product_uuid = \"%s\";\n",
4256 /* Close the 'env' section */
4257 ret
= lttng_metadata_printf(session
, "};\n\n");
4261 ret
= lttng_metadata_printf(session
,
4263 " name = \"%s\";\n",
4269 if (!trace_clock_uuid(clock_uuid_s
)) {
4270 ret
= lttng_metadata_printf(session
,
4271 " uuid = \"%s\";\n",
4278 ret
= lttng_metadata_printf(session
,
4279 " description = \"%s\";\n"
4280 " freq = %llu; /* Frequency, in Hz */\n"
4281 " /* clock value offset from Epoch is: offset * (1/freq) */\n"
4284 trace_clock_description(),
4285 (unsigned long long) trace_clock_freq(),
4286 (long long) measure_clock_offset()
4291 ret
= lttng_metadata_printf(session
,
4292 "typealias integer {\n"
4293 " size = 27; align = 1; signed = false;\n"
4294 " map = clock.%s.value;\n"
4295 "} := uint27_clock_monotonic_t;\n"
4297 "typealias integer {\n"
4298 " size = 32; align = %u; signed = false;\n"
4299 " map = clock.%s.value;\n"
4300 "} := uint32_clock_monotonic_t;\n"
4302 "typealias integer {\n"
4303 " size = 64; align = %u; signed = false;\n"
4304 " map = clock.%s.value;\n"
4305 "} := uint64_clock_monotonic_t;\n\n",
4307 lttng_alignof(uint32_t) * CHAR_BIT
,
4309 lttng_alignof(uint64_t) * CHAR_BIT
,
4315 ret
= _lttng_stream_packet_context_declare(session
);
4319 ret
= _lttng_event_header_declare(session
);
4324 list_for_each_entry(chan_priv
, &session
->priv
->chan_head
, node
) {
4325 struct lttng_kernel_channel_buffer_private
*chan_buf_priv
;
4327 if (chan_priv
->pub
->type
!= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
)
4329 chan_buf_priv
= container_of(chan_priv
, struct lttng_kernel_channel_buffer_private
, parent
);
4330 ret
= _lttng_channel_metadata_statedump(session
, chan_buf_priv
->pub
);
4335 list_for_each_entry(event_recorder_priv
, &session
->priv
->events_head
, parent
.parent
.node
) {
4336 ret
= _lttng_event_recorder_metadata_statedump(&event_recorder_priv
->pub
->parent
);
4340 session
->priv
->metadata_dumped
= 1;
4342 lttng_metadata_end(session
);
4347 * lttng_transport_register - LTT transport registration
4348 * @transport: transport structure
4350 * Registers a transport which can be used as output to extract the data out of
4351 * LTTng. The module calling this registration function must ensure that no
4352 * trap-inducing code will be executed by the transport functions. E.g.
4353 * vmalloc_sync_mappings() must be called between a vmalloc and the moment the memory
4354 * is made visible to the transport function. This registration acts as a
4355 * vmalloc_sync_mappings. Therefore, only if the module allocates virtual memory
4356 * after its registration must it synchronize the TLBs.
4358 void lttng_transport_register(struct lttng_transport
*transport
)
4361 * Make sure no page fault can be triggered by the module about to be
4362 * registered. We deal with this here so we don't have to call
4363 * vmalloc_sync_mappings() in each module's init.
4365 wrapper_vmalloc_sync_mappings();
4367 mutex_lock(&sessions_mutex
);
4368 list_add_tail(&transport
->node
, <tng_transport_list
);
4369 mutex_unlock(&sessions_mutex
);
4371 EXPORT_SYMBOL_GPL(lttng_transport_register
);
4374 * lttng_transport_unregister - LTT transport unregistration
4375 * @transport: transport structure
4377 void lttng_transport_unregister(struct lttng_transport
*transport
)
4379 mutex_lock(&sessions_mutex
);
4380 list_del(&transport
->node
);
4381 mutex_unlock(&sessions_mutex
);
4383 EXPORT_SYMBOL_GPL(lttng_transport_unregister
);
4385 void lttng_counter_transport_register(struct lttng_counter_transport
*transport
)
4388 * Make sure no page fault can be triggered by the module about to be
4389 * registered. We deal with this here so we don't have to call
4390 * vmalloc_sync_mappings() in each module's init.
4392 wrapper_vmalloc_sync_mappings();
4394 mutex_lock(&sessions_mutex
);
4395 list_add_tail(&transport
->node
, <tng_counter_transport_list
);
4396 mutex_unlock(&sessions_mutex
);
4398 EXPORT_SYMBOL_GPL(lttng_counter_transport_register
);
4400 void lttng_counter_transport_unregister(struct lttng_counter_transport
*transport
)
4402 mutex_lock(&sessions_mutex
);
4403 list_del(&transport
->node
);
4404 mutex_unlock(&sessions_mutex
);
4406 EXPORT_SYMBOL_GPL(lttng_counter_transport_unregister
);
4408 struct lttng_kernel_channel_buffer
*lttng_kernel_alloc_channel_buffer(void)
4410 struct lttng_kernel_channel_buffer
*lttng_chan_buf
;
4411 struct lttng_kernel_channel_common
*lttng_chan_common
;
4412 struct lttng_kernel_channel_buffer_private
*lttng_chan_buf_priv
;
4414 lttng_chan_buf
= kzalloc(sizeof(struct lttng_kernel_channel_buffer
), GFP_KERNEL
);
4415 if (!lttng_chan_buf
)
4417 lttng_chan_buf_priv
= kzalloc(sizeof(struct lttng_kernel_channel_buffer_private
), GFP_KERNEL
);
4418 if (!lttng_chan_buf_priv
)
4420 lttng_chan_common
= <tng_chan_buf
->parent
;
4421 lttng_chan_common
->type
= LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
;
4422 lttng_chan_buf
->priv
= lttng_chan_buf_priv
;
4423 lttng_chan_common
->priv
= <tng_chan_buf_priv
->parent
;
4424 lttng_chan_buf_priv
->pub
= lttng_chan_buf
;
4425 lttng_chan_buf_priv
->parent
.pub
= lttng_chan_common
;
4426 return lttng_chan_buf
;
4429 kfree(lttng_chan_buf
);
4433 EXPORT_SYMBOL_GPL(lttng_kernel_alloc_channel_buffer
);
4435 struct lttng_kernel_channel_counter
*lttng_kernel_alloc_channel_counter(void)
4437 struct lttng_kernel_channel_counter
*lttng_chan_counter
;
4438 struct lttng_kernel_channel_common
*lttng_chan_common
;
4439 struct lttng_kernel_channel_counter_private
*lttng_chan_counter_priv
;
4441 lttng_chan_counter
= kzalloc(sizeof(struct lttng_kernel_channel_counter
), GFP_KERNEL
);
4442 if (!lttng_chan_counter
)
4444 lttng_chan_counter_priv
= kzalloc(sizeof(struct lttng_kernel_channel_counter_private
), GFP_KERNEL
);
4445 if (!lttng_chan_counter_priv
)
4447 lttng_chan_common
= <tng_chan_counter
->parent
;
4448 lttng_chan_common
->type
= LTTNG_KERNEL_CHANNEL_TYPE_COUNTER
;
4449 lttng_chan_counter
->priv
= lttng_chan_counter_priv
;
4450 lttng_chan_common
->priv
= <tng_chan_counter_priv
->parent
;
4451 lttng_chan_counter_priv
->pub
= lttng_chan_counter
;
4452 lttng_chan_counter_priv
->parent
.pub
= lttng_chan_common
;
4453 return lttng_chan_counter
;
4456 kfree(lttng_chan_counter
);
4460 EXPORT_SYMBOL_GPL(lttng_kernel_alloc_channel_counter
);
4462 void lttng_kernel_free_channel_common(struct lttng_kernel_channel_common
*chan
)
4464 switch (chan
->type
) {
4465 case LTTNG_KERNEL_CHANNEL_TYPE_BUFFER
:
4467 struct lttng_kernel_channel_buffer
*chan_buf
= container_of(chan
,
4468 struct lttng_kernel_channel_buffer
, parent
);
4470 kfree(chan_buf
->priv
);
4474 case LTTNG_KERNEL_CHANNEL_TYPE_COUNTER
:
4476 struct lttng_kernel_channel_counter
*chan_counter
= container_of(chan
,
4477 struct lttng_kernel_channel_counter
, parent
);
4479 kfree(chan_counter
->priv
);
4480 kfree(chan_counter
);
4487 EXPORT_SYMBOL_GPL(lttng_kernel_free_channel_common
);
4489 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
4491 enum cpuhp_state lttng_hp_prepare
;
4492 enum cpuhp_state lttng_hp_online
;
4494 static int lttng_hotplug_prepare(unsigned int cpu
, struct hlist_node
*node
)
4496 struct lttng_cpuhp_node
*lttng_node
;
4498 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4499 switch (lttng_node
->component
) {
4500 case LTTNG_RING_BUFFER_FRONTEND
:
4502 case LTTNG_RING_BUFFER_BACKEND
:
4503 return lttng_cpuhp_rb_backend_prepare(cpu
, lttng_node
);
4504 case LTTNG_RING_BUFFER_ITER
:
4506 case LTTNG_CONTEXT_PERF_COUNTERS
:
4513 static int lttng_hotplug_dead(unsigned int cpu
, struct hlist_node
*node
)
4515 struct lttng_cpuhp_node
*lttng_node
;
4517 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4518 switch (lttng_node
->component
) {
4519 case LTTNG_RING_BUFFER_FRONTEND
:
4520 return lttng_cpuhp_rb_frontend_dead(cpu
, lttng_node
);
4521 case LTTNG_RING_BUFFER_BACKEND
:
4523 case LTTNG_RING_BUFFER_ITER
:
4525 case LTTNG_CONTEXT_PERF_COUNTERS
:
4526 return lttng_cpuhp_perf_counter_dead(cpu
, lttng_node
);
4532 static int lttng_hotplug_online(unsigned int cpu
, struct hlist_node
*node
)
4534 struct lttng_cpuhp_node
*lttng_node
;
4536 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4537 switch (lttng_node
->component
) {
4538 case LTTNG_RING_BUFFER_FRONTEND
:
4539 return lttng_cpuhp_rb_frontend_online(cpu
, lttng_node
);
4540 case LTTNG_RING_BUFFER_BACKEND
:
4542 case LTTNG_RING_BUFFER_ITER
:
4543 return lttng_cpuhp_rb_iter_online(cpu
, lttng_node
);
4544 case LTTNG_CONTEXT_PERF_COUNTERS
:
4545 return lttng_cpuhp_perf_counter_online(cpu
, lttng_node
);
4551 static int lttng_hotplug_offline(unsigned int cpu
, struct hlist_node
*node
)
4553 struct lttng_cpuhp_node
*lttng_node
;
4555 lttng_node
= container_of(node
, struct lttng_cpuhp_node
, node
);
4556 switch (lttng_node
->component
) {
4557 case LTTNG_RING_BUFFER_FRONTEND
:
4558 return lttng_cpuhp_rb_frontend_offline(cpu
, lttng_node
);
4559 case LTTNG_RING_BUFFER_BACKEND
:
4561 case LTTNG_RING_BUFFER_ITER
:
4563 case LTTNG_CONTEXT_PERF_COUNTERS
:
4570 static int __init
lttng_init_cpu_hotplug(void)
4574 ret
= cpuhp_setup_state_multi(CPUHP_BP_PREPARE_DYN
, "lttng:prepare",
4575 lttng_hotplug_prepare
,
4576 lttng_hotplug_dead
);
4580 lttng_hp_prepare
= ret
;
4581 lttng_rb_set_hp_prepare(ret
);
4583 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "lttng:online",
4584 lttng_hotplug_online
,
4585 lttng_hotplug_offline
);
4587 cpuhp_remove_multi_state(lttng_hp_prepare
);
4588 lttng_hp_prepare
= 0;
4591 lttng_hp_online
= ret
;
4592 lttng_rb_set_hp_online(ret
);
4597 static void __exit
lttng_exit_cpu_hotplug(void)
4599 lttng_rb_set_hp_online(0);
4600 cpuhp_remove_multi_state(lttng_hp_online
);
4601 lttng_rb_set_hp_prepare(0);
4602 cpuhp_remove_multi_state(lttng_hp_prepare
);
4605 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4606 static int lttng_init_cpu_hotplug(void)
4610 static void lttng_exit_cpu_hotplug(void)
4613 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
4615 static int __init
lttng_events_init(void)
4619 ret
= wrapper_get_pfnblock_flags_mask_init();
4622 ret
= lttng_probes_init();
4625 ret
= lttng_context_init();
4628 ret
= lttng_tracepoint_init();
4631 event_recorder_cache
= KMEM_CACHE(lttng_kernel_event_recorder
, 0);
4632 if (!event_recorder_cache
) {
4634 goto error_kmem_event_recorder
;
4636 event_recorder_private_cache
= KMEM_CACHE(lttng_kernel_event_recorder_private
, 0);
4637 if (!event_recorder_private_cache
) {
4639 goto error_kmem_event_recorder_private
;
4641 event_counter_cache
= KMEM_CACHE(lttng_kernel_event_counter
, 0);
4642 if (!event_counter_cache
) {
4644 goto error_kmem_event_counter
;
4646 event_counter_private_cache
= KMEM_CACHE(lttng_kernel_event_counter_private
, 0);
4647 if (!event_counter_private_cache
) {
4649 goto error_kmem_event_counter_private
;
4651 event_notifier_cache
= KMEM_CACHE(lttng_kernel_event_notifier
, 0);
4652 if (!event_notifier_cache
) {
4654 goto error_kmem_event_notifier
;
4656 event_notifier_private_cache
= KMEM_CACHE(lttng_kernel_event_notifier_private
, 0);
4657 if (!event_notifier_private_cache
) {
4659 goto error_kmem_event_notifier_private
;
4661 ret
= lttng_abi_init();
4664 ret
= lttng_logger_init();
4667 ret
= lttng_init_cpu_hotplug();
4670 printk(KERN_NOTICE
"LTTng: Loaded modules v%s.%s.%s%s (%s)%s%s\n",
4671 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4672 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4673 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4674 LTTNG_MODULES_EXTRAVERSION
,
4676 #ifdef LTTNG_EXTRA_VERSION_GIT
4677 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4681 #ifdef LTTNG_EXTRA_VERSION_NAME
4682 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4686 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
4687 printk(KERN_NOTICE
"LTTng: Experimental bitwise enum enabled.\n");
4688 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
4692 lttng_logger_exit();
4696 kmem_cache_destroy(event_notifier_private_cache
);
4697 error_kmem_event_notifier_private
:
4698 kmem_cache_destroy(event_notifier_cache
);
4699 error_kmem_event_notifier
:
4700 kmem_cache_destroy(event_counter_private_cache
);
4701 error_kmem_event_counter_private
:
4702 kmem_cache_destroy(event_counter_cache
);
4703 error_kmem_event_counter
:
4704 kmem_cache_destroy(event_recorder_private_cache
);
4705 error_kmem_event_recorder_private
:
4706 kmem_cache_destroy(event_recorder_cache
);
4707 error_kmem_event_recorder
:
4708 lttng_tracepoint_exit();
4710 lttng_context_exit();
4711 printk(KERN_NOTICE
"LTTng: Failed to load modules v%s.%s.%s%s (%s)%s%s\n",
4712 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4713 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4714 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4715 LTTNG_MODULES_EXTRAVERSION
,
4717 #ifdef LTTNG_EXTRA_VERSION_GIT
4718 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4722 #ifdef LTTNG_EXTRA_VERSION_NAME
4723 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4730 module_init(lttng_events_init
);
4732 static void __exit
lttng_events_exit(void)
4734 struct lttng_kernel_session_private
*session_priv
, *tmpsession_priv
;
4736 lttng_exit_cpu_hotplug();
4737 lttng_logger_exit();
4739 list_for_each_entry_safe(session_priv
, tmpsession_priv
, &sessions
, node
)
4740 lttng_session_destroy(session_priv
->pub
);
4741 kmem_cache_destroy(event_recorder_cache
);
4742 kmem_cache_destroy(event_recorder_private_cache
);
4743 kmem_cache_destroy(event_counter_cache
);
4744 kmem_cache_destroy(event_counter_private_cache
);
4745 kmem_cache_destroy(event_notifier_cache
);
4746 kmem_cache_destroy(event_notifier_private_cache
);
4747 lttng_tracepoint_exit();
4748 lttng_context_exit();
4749 printk(KERN_NOTICE
"LTTng: Unloaded modules v%s.%s.%s%s (%s)%s%s\n",
4750 __stringify(LTTNG_MODULES_MAJOR_VERSION
),
4751 __stringify(LTTNG_MODULES_MINOR_VERSION
),
4752 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
),
4753 LTTNG_MODULES_EXTRAVERSION
,
4755 #ifdef LTTNG_EXTRA_VERSION_GIT
4756 LTTNG_EXTRA_VERSION_GIT
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_GIT
,
4760 #ifdef LTTNG_EXTRA_VERSION_NAME
4761 LTTNG_EXTRA_VERSION_NAME
[0] == '\0' ? "" : " - " LTTNG_EXTRA_VERSION_NAME
);
4767 module_exit(lttng_events_exit
);
4769 #include <generated/patches.h>
4770 #ifdef LTTNG_EXTRA_VERSION_GIT
4771 MODULE_INFO(extra_version_git
, LTTNG_EXTRA_VERSION_GIT
);
4773 #ifdef LTTNG_EXTRA_VERSION_NAME
4774 MODULE_INFO(extra_version_name
, LTTNG_EXTRA_VERSION_NAME
);
4776 MODULE_LICENSE("GPL and additional rights");
4777 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
4778 MODULE_DESCRIPTION("LTTng tracer");
4779 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
4780 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
4781 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
4782 LTTNG_MODULES_EXTRAVERSION
);