2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 * SPDX-License-Identifier: GPL-2.0-only
19 #include <sys/types.h>
21 #include <urcu/compiler.h>
24 #include <common/bytecode/bytecode.h>
25 #include <common/compat/errno.h>
26 #include <common/common.h>
27 #include <common/hashtable/utils.h>
28 #include <lttng/event-rule/event-rule.h>
29 #include <lttng/event-rule/event-rule-internal.h>
30 #include <lttng/event-rule/user-tracepoint.h>
31 #include <lttng/condition/condition.h>
32 #include <lttng/condition/event-rule-matches-internal.h>
33 #include <lttng/condition/event-rule-matches.h>
34 #include <lttng/trigger/trigger-internal.h>
35 #include <common/sessiond-comm/sessiond-comm.h>
37 #include "buffer-registry.h"
38 #include "condition-internal.h"
40 #include "health-sessiond.h"
42 #include "ust-consumer.h"
43 #include "lttng-ust-ctl.h"
44 #include "lttng-ust-error.h"
47 #include "lttng-sessiond.h"
48 #include "notification-thread-commands.h"
51 #include "event-notifier-error-accounting.h"
52 #include "ust-field-utils.h"
54 struct lttng_ht
*ust_app_ht
;
55 struct lttng_ht
*ust_app_ht_by_sock
;
56 struct lttng_ht
*ust_app_ht_by_notify_sock
;
59 int ust_app_flush_app_session(struct ust_app
*app
, struct ust_app_session
*ua_sess
);
61 /* Next available channel key. Access under next_channel_key_lock. */
62 static uint64_t _next_channel_key
;
63 static pthread_mutex_t next_channel_key_lock
= PTHREAD_MUTEX_INITIALIZER
;
65 /* Next available session ID. Access under next_session_id_lock. */
66 static uint64_t _next_session_id
;
67 static pthread_mutex_t next_session_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
70 * Return the incremented value of next_channel_key.
72 static uint64_t get_next_channel_key(void)
76 pthread_mutex_lock(&next_channel_key_lock
);
77 ret
= ++_next_channel_key
;
78 pthread_mutex_unlock(&next_channel_key_lock
);
83 * Return the atomically incremented value of next_session_id.
85 static uint64_t get_next_session_id(void)
89 pthread_mutex_lock(&next_session_id_lock
);
90 ret
= ++_next_session_id
;
91 pthread_mutex_unlock(&next_session_id_lock
);
95 static void copy_channel_attr_to_ustctl(
96 struct lttng_ust_ctl_consumer_channel_attr
*attr
,
97 struct lttng_ust_abi_channel_attr
*uattr
)
99 /* Copy event attributes since the layout is different. */
100 attr
->subbuf_size
= uattr
->subbuf_size
;
101 attr
->num_subbuf
= uattr
->num_subbuf
;
102 attr
->overwrite
= uattr
->overwrite
;
103 attr
->switch_timer_interval
= uattr
->switch_timer_interval
;
104 attr
->read_timer_interval
= uattr
->read_timer_interval
;
105 attr
->output
= uattr
->output
;
106 attr
->blocking_timeout
= uattr
->u
.s
.blocking_timeout
;
110 * Match function for the hash table lookup.
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
115 static int ht_match_ust_app_event(struct cds_lfht_node
*node
, const void *_key
)
117 struct ust_app_event
*event
;
118 const struct ust_app_ht_key
*key
;
123 event
= caa_container_of(node
, struct ust_app_event
, node
.node
);
126 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129 if (strncmp(event
->attr
.name
, key
->name
, sizeof(event
->attr
.name
)) != 0) {
133 /* Event loglevel. */
134 if (!loglevels_match(event
->attr
.loglevel_type
, event
->attr
.loglevel
,
135 key
->loglevel_type
, key
->loglevel_value
,
136 LTTNG_UST_ABI_LOGLEVEL_ALL
)) {
140 /* One of the filters is NULL, fail. */
141 if ((key
->filter
&& !event
->filter
) || (!key
->filter
&& event
->filter
)) {
145 if (key
->filter
&& event
->filter
) {
146 /* Both filters exists, check length followed by the bytecode. */
147 if (event
->filter
->len
!= key
->filter
->len
||
148 memcmp(event
->filter
->data
, key
->filter
->data
,
149 event
->filter
->len
) != 0) {
154 /* One of the exclusions is NULL, fail. */
155 if ((key
->exclusion
&& !event
->exclusion
) || (!key
->exclusion
&& event
->exclusion
)) {
159 if (key
->exclusion
&& event
->exclusion
) {
160 /* Both exclusions exists, check count followed by the names. */
161 if (event
->exclusion
->count
!= key
->exclusion
->count
||
162 memcmp(event
->exclusion
->names
, key
->exclusion
->names
,
163 event
->exclusion
->count
* LTTNG_UST_ABI_SYM_NAME_LEN
) != 0) {
177 * Unique add of an ust app event in the given ht. This uses the custom
178 * ht_match_ust_app_event match function and the event name as hash.
180 static void add_unique_ust_app_event(struct ust_app_channel
*ua_chan
,
181 struct ust_app_event
*event
)
183 struct cds_lfht_node
*node_ptr
;
184 struct ust_app_ht_key key
;
188 assert(ua_chan
->events
);
191 ht
= ua_chan
->events
;
192 key
.name
= event
->attr
.name
;
193 key
.filter
= event
->filter
;
194 key
.loglevel_type
= (enum lttng_ust_abi_loglevel_type
)
195 event
->attr
.loglevel_type
;
196 key
.loglevel_value
= event
->attr
.loglevel
;
197 key
.exclusion
= event
->exclusion
;
199 node_ptr
= cds_lfht_add_unique(ht
->ht
,
200 ht
->hash_fct(event
->node
.key
, lttng_ht_seed
),
201 ht_match_ust_app_event
, &key
, &event
->node
.node
);
202 assert(node_ptr
== &event
->node
.node
);
206 * Close the notify socket from the given RCU head object. This MUST be called
207 * through a call_rcu().
209 static void close_notify_sock_rcu(struct rcu_head
*head
)
212 struct ust_app_notify_sock_obj
*obj
=
213 caa_container_of(head
, struct ust_app_notify_sock_obj
, head
);
215 /* Must have a valid fd here. */
216 assert(obj
->fd
>= 0);
218 ret
= close(obj
->fd
);
220 ERR("close notify sock %d RCU", obj
->fd
);
222 lttng_fd_put(LTTNG_FD_APPS
, 1);
228 * Return the session registry according to the buffer type of the given
231 * A registry per UID object MUST exists before calling this function or else
232 * it assert() if not found. RCU read side lock must be acquired.
234 static struct ust_registry_session
*get_session_registry(
235 struct ust_app_session
*ua_sess
)
237 struct ust_registry_session
*registry
= NULL
;
241 switch (ua_sess
->buffer_type
) {
242 case LTTNG_BUFFER_PER_PID
:
244 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
248 registry
= reg_pid
->registry
->reg
.ust
;
251 case LTTNG_BUFFER_PER_UID
:
253 struct buffer_reg_uid
*reg_uid
= buffer_reg_uid_find(
254 ua_sess
->tracing_id
, ua_sess
->bits_per_long
,
255 lttng_credentials_get_uid(&ua_sess
->real_credentials
));
259 registry
= reg_uid
->registry
->reg
.ust
;
271 * Delete ust context safely. RCU read lock must be held before calling
275 void delete_ust_app_ctx(int sock
, struct ust_app_ctx
*ua_ctx
,
283 pthread_mutex_lock(&app
->sock_lock
);
284 ret
= lttng_ust_ctl_release_object(sock
, ua_ctx
->obj
);
285 pthread_mutex_unlock(&app
->sock_lock
);
287 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
288 DBG3("UST app release ctx failed. Application is dead: pid = %d, sock = %d",
289 app
->pid
, app
->sock
);
290 } else if (ret
== -EAGAIN
) {
291 WARN("UST app release ctx failed. Communication time out: pid = %d, sock = %d",
292 app
->pid
, app
->sock
);
294 ERR("UST app release ctx obj handle %d failed with ret %d: pid = %d, sock = %d",
295 ua_ctx
->obj
->handle
, ret
,
296 app
->pid
, app
->sock
);
305 * Delete ust app event safely. RCU read lock must be held before calling
309 void delete_ust_app_event(int sock
, struct ust_app_event
*ua_event
,
316 free(ua_event
->filter
);
317 if (ua_event
->exclusion
!= NULL
)
318 free(ua_event
->exclusion
);
319 if (ua_event
->obj
!= NULL
) {
320 pthread_mutex_lock(&app
->sock_lock
);
321 ret
= lttng_ust_ctl_release_object(sock
, ua_event
->obj
);
322 pthread_mutex_unlock(&app
->sock_lock
);
324 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
325 DBG3("UST app release event failed. Application is dead: pid = %d, sock = %d",
326 app
->pid
, app
->sock
);
327 } else if (ret
== -EAGAIN
) {
328 WARN("UST app release event failed. Communication time out: pid = %d, sock = %d",
329 app
->pid
, app
->sock
);
331 ERR("UST app release event obj failed with ret %d: pid = %d, sock = %d",
332 ret
, app
->pid
, app
->sock
);
341 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
342 * through a call_rcu().
345 void free_ust_app_event_notifier_rule_rcu(struct rcu_head
*head
)
347 struct ust_app_event_notifier_rule
*obj
= caa_container_of(
348 head
, struct ust_app_event_notifier_rule
, rcu_head
);
354 * Delete ust app event notifier rule safely.
356 static void delete_ust_app_event_notifier_rule(int sock
,
357 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
,
362 assert(ua_event_notifier_rule
);
364 if (ua_event_notifier_rule
->exclusion
!= NULL
) {
365 free(ua_event_notifier_rule
->exclusion
);
368 if (ua_event_notifier_rule
->obj
!= NULL
) {
369 pthread_mutex_lock(&app
->sock_lock
);
370 ret
= lttng_ust_ctl_release_object(sock
, ua_event_notifier_rule
->obj
);
371 pthread_mutex_unlock(&app
->sock_lock
);
373 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
374 DBG3("UST app release event notifier failed. Application is dead: pid = %d, sock = %d",
375 app
->pid
, app
->sock
);
376 } else if (ret
== -EAGAIN
) {
377 WARN("UST app release event notifier failed. Communication time out: pid = %d, sock = %d",
378 app
->pid
, app
->sock
);
380 ERR("UST app release event notifier failed with ret %d: pid = %d, sock = %d",
381 ret
, app
->pid
, app
->sock
);
385 free(ua_event_notifier_rule
->obj
);
388 lttng_trigger_put(ua_event_notifier_rule
->trigger
);
389 call_rcu(&ua_event_notifier_rule
->rcu_head
,
390 free_ust_app_event_notifier_rule_rcu
);
394 * Release ust data object of the given stream.
396 * Return 0 on success or else a negative value.
398 static int release_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
406 pthread_mutex_lock(&app
->sock_lock
);
407 ret
= lttng_ust_ctl_release_object(sock
, stream
->obj
);
408 pthread_mutex_unlock(&app
->sock_lock
);
410 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
411 DBG3("UST app release stream failed. Application is dead: pid = %d, sock = %d",
412 app
->pid
, app
->sock
);
413 } else if (ret
== -EAGAIN
) {
414 WARN("UST app release stream failed. Communication time out: pid = %d, sock = %d",
415 app
->pid
, app
->sock
);
417 ERR("UST app release stream obj failed with ret %d: pid = %d, sock = %d",
418 ret
, app
->pid
, app
->sock
);
421 lttng_fd_put(LTTNG_FD_APPS
, 2);
429 * Delete ust app stream safely. RCU read lock must be held before calling
433 void delete_ust_app_stream(int sock
, struct ust_app_stream
*stream
,
438 (void) release_ust_app_stream(sock
, stream
, app
);
443 * We need to execute ht_destroy outside of RCU read-side critical
444 * section and outside of call_rcu thread, so we postpone its execution
445 * using ht_cleanup_push. It is simpler than to change the semantic of
446 * the many callers of delete_ust_app_session().
449 void delete_ust_app_channel_rcu(struct rcu_head
*head
)
451 struct ust_app_channel
*ua_chan
=
452 caa_container_of(head
, struct ust_app_channel
, rcu_head
);
454 ht_cleanup_push(ua_chan
->ctx
);
455 ht_cleanup_push(ua_chan
->events
);
460 * Extract the lost packet or discarded events counter when the channel is
461 * being deleted and store the value in the parent channel so we can
462 * access it from lttng list and at stop/destroy.
464 * The session list lock must be held by the caller.
467 void save_per_pid_lost_discarded_counters(struct ust_app_channel
*ua_chan
)
469 uint64_t discarded
= 0, lost
= 0;
470 struct ltt_session
*session
;
471 struct ltt_ust_channel
*uchan
;
473 if (ua_chan
->attr
.type
!= LTTNG_UST_ABI_CHAN_PER_CPU
) {
478 session
= session_find_by_id(ua_chan
->session
->tracing_id
);
479 if (!session
|| !session
->ust_session
) {
481 * Not finding the session is not an error because there are
482 * multiple ways the channels can be torn down.
484 * 1) The session daemon can initiate the destruction of the
485 * ust app session after receiving a destroy command or
486 * during its shutdown/teardown.
487 * 2) The application, since we are in per-pid tracing, is
488 * unregistering and tearing down its ust app session.
490 * Both paths are protected by the session list lock which
491 * ensures that the accounting of lost packets and discarded
492 * events is done exactly once. The session is then unpublished
493 * from the session list, resulting in this condition.
498 if (ua_chan
->attr
.overwrite
) {
499 consumer_get_lost_packets(ua_chan
->session
->tracing_id
,
500 ua_chan
->key
, session
->ust_session
->consumer
,
503 consumer_get_discarded_events(ua_chan
->session
->tracing_id
,
504 ua_chan
->key
, session
->ust_session
->consumer
,
507 uchan
= trace_ust_find_channel_by_name(
508 session
->ust_session
->domain_global
.channels
,
511 ERR("Missing UST channel to store discarded counters");
515 uchan
->per_pid_closed_app_discarded
+= discarded
;
516 uchan
->per_pid_closed_app_lost
+= lost
;
521 session_put(session
);
526 * Delete ust app channel safely. RCU read lock must be held before calling
529 * The session list lock must be held by the caller.
532 void delete_ust_app_channel(int sock
, struct ust_app_channel
*ua_chan
,
536 struct lttng_ht_iter iter
;
537 struct ust_app_event
*ua_event
;
538 struct ust_app_ctx
*ua_ctx
;
539 struct ust_app_stream
*stream
, *stmp
;
540 struct ust_registry_session
*registry
;
544 DBG3("UST app deleting channel %s", ua_chan
->name
);
547 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
548 cds_list_del(&stream
->list
);
549 delete_ust_app_stream(sock
, stream
, app
);
553 cds_lfht_for_each_entry(ua_chan
->ctx
->ht
, &iter
.iter
, ua_ctx
, node
.node
) {
554 cds_list_del(&ua_ctx
->list
);
555 ret
= lttng_ht_del(ua_chan
->ctx
, &iter
);
557 delete_ust_app_ctx(sock
, ua_ctx
, app
);
561 cds_lfht_for_each_entry(ua_chan
->events
->ht
, &iter
.iter
, ua_event
,
563 ret
= lttng_ht_del(ua_chan
->events
, &iter
);
565 delete_ust_app_event(sock
, ua_event
, app
);
568 if (ua_chan
->session
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
569 /* Wipe and free registry from session registry. */
570 registry
= get_session_registry(ua_chan
->session
);
572 ust_registry_channel_del_free(registry
, ua_chan
->key
,
576 * A negative socket can be used by the caller when
577 * cleaning-up a ua_chan in an error path. Skip the
578 * accounting in this case.
581 save_per_pid_lost_discarded_counters(ua_chan
);
585 if (ua_chan
->obj
!= NULL
) {
586 /* Remove channel from application UST object descriptor. */
587 iter
.iter
.node
= &ua_chan
->ust_objd_node
.node
;
588 ret
= lttng_ht_del(app
->ust_objd
, &iter
);
590 pthread_mutex_lock(&app
->sock_lock
);
591 ret
= lttng_ust_ctl_release_object(sock
, ua_chan
->obj
);
592 pthread_mutex_unlock(&app
->sock_lock
);
594 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
595 DBG3("UST app channel %s release failed. Application is dead: pid = %d, sock = %d",
596 ua_chan
->name
, app
->pid
,
598 } else if (ret
== -EAGAIN
) {
599 WARN("UST app channel %s release failed. Communication time out: pid = %d, sock = %d",
600 ua_chan
->name
, app
->pid
,
603 ERR("UST app channel %s release failed with ret %d: pid = %d, sock = %d",
604 ua_chan
->name
, ret
, app
->pid
,
608 lttng_fd_put(LTTNG_FD_APPS
, 1);
611 call_rcu(&ua_chan
->rcu_head
, delete_ust_app_channel_rcu
);
614 int ust_app_register_done(struct ust_app
*app
)
618 pthread_mutex_lock(&app
->sock_lock
);
619 ret
= lttng_ust_ctl_register_done(app
->sock
);
620 pthread_mutex_unlock(&app
->sock_lock
);
624 int ust_app_release_object(struct ust_app
*app
, struct lttng_ust_abi_object_data
*data
)
629 pthread_mutex_lock(&app
->sock_lock
);
634 ret
= lttng_ust_ctl_release_object(sock
, data
);
636 pthread_mutex_unlock(&app
->sock_lock
);
642 * Push metadata to consumer socket.
644 * RCU read-side lock must be held to guarantee existance of socket.
645 * Must be called with the ust app session lock held.
646 * Must be called with the registry lock held.
648 * On success, return the len of metadata pushed or else a negative value.
649 * Returning a -EPIPE return value means we could not send the metadata,
650 * but it can be caused by recoverable errors (e.g. the application has
651 * terminated concurrently).
653 ssize_t
ust_app_push_metadata(struct ust_registry_session
*registry
,
654 struct consumer_socket
*socket
, int send_zero_data
)
657 char *metadata_str
= NULL
;
658 size_t len
, offset
, new_metadata_len_sent
;
660 uint64_t metadata_key
, metadata_version
;
665 metadata_key
= registry
->metadata_key
;
668 * Means that no metadata was assigned to the session. This can
669 * happens if no start has been done previously.
675 offset
= registry
->metadata_len_sent
;
676 len
= registry
->metadata_len
- registry
->metadata_len_sent
;
677 new_metadata_len_sent
= registry
->metadata_len
;
678 metadata_version
= registry
->metadata_version
;
680 DBG3("No metadata to push for metadata key %" PRIu64
,
681 registry
->metadata_key
);
683 if (send_zero_data
) {
684 DBG("No metadata to push");
690 /* Allocate only what we have to send. */
691 metadata_str
= zmalloc(len
);
693 PERROR("zmalloc ust app metadata string");
697 /* Copy what we haven't sent out. */
698 memcpy(metadata_str
, registry
->metadata
+ offset
, len
);
701 pthread_mutex_unlock(®istry
->lock
);
703 * We need to unlock the registry while we push metadata to
704 * break a circular dependency between the consumerd metadata
705 * lock and the sessiond registry lock. Indeed, pushing metadata
706 * to the consumerd awaits that it gets pushed all the way to
707 * relayd, but doing so requires grabbing the metadata lock. If
708 * a concurrent metadata request is being performed by
709 * consumerd, this can try to grab the registry lock on the
710 * sessiond while holding the metadata lock on the consumer
711 * daemon. Those push and pull schemes are performed on two
712 * different bidirectionnal communication sockets.
714 ret
= consumer_push_metadata(socket
, metadata_key
,
715 metadata_str
, len
, offset
, metadata_version
);
716 pthread_mutex_lock(®istry
->lock
);
719 * There is an acceptable race here between the registry
720 * metadata key assignment and the creation on the
721 * consumer. The session daemon can concurrently push
722 * metadata for this registry while being created on the
723 * consumer since the metadata key of the registry is
724 * assigned *before* it is setup to avoid the consumer
725 * to ask for metadata that could possibly be not found
726 * in the session daemon.
728 * The metadata will get pushed either by the session
729 * being stopped or the consumer requesting metadata if
730 * that race is triggered.
732 if (ret
== -LTTCOMM_CONSUMERD_CHANNEL_FAIL
) {
735 ERR("Error pushing metadata to consumer");
741 * Metadata may have been concurrently pushed, since
742 * we're not holding the registry lock while pushing to
743 * consumer. This is handled by the fact that we send
744 * the metadata content, size, and the offset at which
745 * that metadata belongs. This may arrive out of order
746 * on the consumer side, and the consumer is able to
747 * deal with overlapping fragments. The consumer
748 * supports overlapping fragments, which must be
749 * contiguous starting from offset 0. We keep the
750 * largest metadata_len_sent value of the concurrent
753 registry
->metadata_len_sent
=
754 max_t(size_t, registry
->metadata_len_sent
,
755 new_metadata_len_sent
);
764 * On error, flag the registry that the metadata is
765 * closed. We were unable to push anything and this
766 * means that either the consumer is not responding or
767 * the metadata cache has been destroyed on the
770 registry
->metadata_closed
= 1;
778 * For a given application and session, push metadata to consumer.
779 * Either sock or consumer is required : if sock is NULL, the default
780 * socket to send the metadata is retrieved from consumer, if sock
781 * is not NULL we use it to send the metadata.
782 * RCU read-side lock must be held while calling this function,
783 * therefore ensuring existance of registry. It also ensures existance
784 * of socket throughout this function.
786 * Return 0 on success else a negative error.
787 * Returning a -EPIPE return value means we could not send the metadata,
788 * but it can be caused by recoverable errors (e.g. the application has
789 * terminated concurrently).
791 static int push_metadata(struct ust_registry_session
*registry
,
792 struct consumer_output
*consumer
)
796 struct consumer_socket
*socket
;
801 pthread_mutex_lock(®istry
->lock
);
802 if (registry
->metadata_closed
) {
807 /* Get consumer socket to use to push the metadata.*/
808 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
815 ret
= ust_app_push_metadata(registry
, socket
, 0);
820 pthread_mutex_unlock(®istry
->lock
);
824 pthread_mutex_unlock(®istry
->lock
);
829 * Send to the consumer a close metadata command for the given session. Once
830 * done, the metadata channel is deleted and the session metadata pointer is
831 * nullified. The session lock MUST be held unless the application is
832 * in the destroy path.
834 * Do not hold the registry lock while communicating with the consumerd, because
835 * doing so causes inter-process deadlocks between consumerd and sessiond with
836 * the metadata request notification.
838 * Return 0 on success else a negative value.
840 static int close_metadata(struct ust_registry_session
*registry
,
841 struct consumer_output
*consumer
)
844 struct consumer_socket
*socket
;
845 uint64_t metadata_key
;
846 bool registry_was_already_closed
;
853 pthread_mutex_lock(®istry
->lock
);
854 metadata_key
= registry
->metadata_key
;
855 registry_was_already_closed
= registry
->metadata_closed
;
856 if (metadata_key
!= 0) {
858 * Metadata closed. Even on error this means that the consumer
859 * is not responding or not found so either way a second close
860 * should NOT be emit for this registry.
862 registry
->metadata_closed
= 1;
864 pthread_mutex_unlock(®istry
->lock
);
866 if (metadata_key
== 0 || registry_was_already_closed
) {
871 /* Get consumer socket to use to push the metadata.*/
872 socket
= consumer_find_socket_by_bitness(registry
->bits_per_long
,
879 ret
= consumer_close_metadata(socket
, metadata_key
);
890 * We need to execute ht_destroy outside of RCU read-side critical
891 * section and outside of call_rcu thread, so we postpone its execution
892 * using ht_cleanup_push. It is simpler than to change the semantic of
893 * the many callers of delete_ust_app_session().
896 void delete_ust_app_session_rcu(struct rcu_head
*head
)
898 struct ust_app_session
*ua_sess
=
899 caa_container_of(head
, struct ust_app_session
, rcu_head
);
901 ht_cleanup_push(ua_sess
->channels
);
906 * Delete ust app session safely. RCU read lock must be held before calling
909 * The session list lock must be held by the caller.
912 void delete_ust_app_session(int sock
, struct ust_app_session
*ua_sess
,
916 struct lttng_ht_iter iter
;
917 struct ust_app_channel
*ua_chan
;
918 struct ust_registry_session
*registry
;
922 pthread_mutex_lock(&ua_sess
->lock
);
924 assert(!ua_sess
->deleted
);
925 ua_sess
->deleted
= true;
927 registry
= get_session_registry(ua_sess
);
928 /* Registry can be null on error path during initialization. */
930 /* Push metadata for application before freeing the application. */
931 (void) push_metadata(registry
, ua_sess
->consumer
);
934 * Don't ask to close metadata for global per UID buffers. Close
935 * metadata only on destroy trace session in this case. Also, the
936 * previous push metadata could have flag the metadata registry to
937 * close so don't send a close command if closed.
939 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
940 /* And ask to close it for this session registry. */
941 (void) close_metadata(registry
, ua_sess
->consumer
);
945 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
947 ret
= lttng_ht_del(ua_sess
->channels
, &iter
);
949 delete_ust_app_channel(sock
, ua_chan
, app
);
952 /* In case of per PID, the registry is kept in the session. */
953 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
954 struct buffer_reg_pid
*reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
957 * Registry can be null on error path during
960 buffer_reg_pid_remove(reg_pid
);
961 buffer_reg_pid_destroy(reg_pid
);
965 if (ua_sess
->handle
!= -1) {
966 pthread_mutex_lock(&app
->sock_lock
);
967 ret
= lttng_ust_ctl_release_handle(sock
, ua_sess
->handle
);
968 pthread_mutex_unlock(&app
->sock_lock
);
970 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
971 DBG3("UST app release session handle failed. Application is dead: pid = %d, sock = %d",
972 app
->pid
, app
->sock
);
973 } else if (ret
== -EAGAIN
) {
974 WARN("UST app release session handle failed. Communication time out: pid = %d, sock = %d",
975 app
->pid
, app
->sock
);
977 ERR("UST app release session handle failed with ret %d: pid = %d, sock = %d",
978 ret
, app
->pid
, app
->sock
);
982 /* Remove session from application UST object descriptor. */
983 iter
.iter
.node
= &ua_sess
->ust_objd_node
.node
;
984 ret
= lttng_ht_del(app
->ust_sessions_objd
, &iter
);
988 pthread_mutex_unlock(&ua_sess
->lock
);
990 consumer_output_put(ua_sess
->consumer
);
992 call_rcu(&ua_sess
->rcu_head
, delete_ust_app_session_rcu
);
996 * Delete a traceable application structure from the global list. Never call
997 * this function outside of a call_rcu call.
999 * RCU read side lock should _NOT_ be held when calling this function.
1002 void delete_ust_app(struct ust_app
*app
)
1005 struct ust_app_session
*ua_sess
, *tmp_ua_sess
;
1006 struct lttng_ht_iter iter
;
1007 struct ust_app_event_notifier_rule
*event_notifier_rule
;
1008 bool event_notifier_write_fd_is_open
;
1011 * The session list lock must be held during this function to guarantee
1012 * the existence of ua_sess.
1014 session_lock_list();
1015 /* Delete ust app sessions info */
1020 cds_list_for_each_entry_safe(ua_sess
, tmp_ua_sess
, &app
->teardown_head
,
1022 /* Free every object in the session and the session. */
1024 delete_ust_app_session(sock
, ua_sess
, app
);
1028 /* Remove the event notifier rules associated with this app. */
1030 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
1031 &iter
.iter
, event_notifier_rule
, node
.node
) {
1032 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
, &iter
);
1035 delete_ust_app_event_notifier_rule(
1036 app
->sock
, event_notifier_rule
, app
);
1041 ht_cleanup_push(app
->sessions
);
1042 ht_cleanup_push(app
->ust_sessions_objd
);
1043 ht_cleanup_push(app
->ust_objd
);
1044 ht_cleanup_push(app
->token_to_event_notifier_rule_ht
);
1047 * This could be NULL if the event notifier setup failed (e.g the app
1048 * was killed or the tracer does not support this feature).
1050 if (app
->event_notifier_group
.object
) {
1051 enum lttng_error_code ret_code
;
1052 enum event_notifier_error_accounting_status status
;
1054 const int event_notifier_read_fd
= lttng_pipe_get_readfd(
1055 app
->event_notifier_group
.event_pipe
);
1057 ret_code
= notification_thread_command_remove_tracer_event_source(
1058 the_notification_thread_handle
,
1059 event_notifier_read_fd
);
1060 if (ret_code
!= LTTNG_OK
) {
1061 ERR("Failed to remove application tracer event source from notification thread");
1064 status
= event_notifier_error_accounting_unregister_app(app
);
1065 if (status
!= EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
) {
1066 ERR("Error unregistering app from event notifier error accounting");
1069 lttng_ust_ctl_release_object(sock
, app
->event_notifier_group
.object
);
1070 free(app
->event_notifier_group
.object
);
1073 event_notifier_write_fd_is_open
= lttng_pipe_is_write_open(
1074 app
->event_notifier_group
.event_pipe
);
1075 lttng_pipe_destroy(app
->event_notifier_group
.event_pipe
);
1077 * Release the file descriptors reserved for the event notifier pipe.
1078 * The app could be destroyed before the write end of the pipe could be
1079 * passed to the application (and closed). In that case, both file
1080 * descriptors must be released.
1082 lttng_fd_put(LTTNG_FD_APPS
, event_notifier_write_fd_is_open
? 2 : 1);
1085 * Wait until we have deleted the application from the sock hash table
1086 * before closing this socket, otherwise an application could re-use the
1087 * socket ID and race with the teardown, using the same hash table entry.
1089 * It's OK to leave the close in call_rcu. We want it to stay unique for
1090 * all RCU readers that could run concurrently with unregister app,
1091 * therefore we _need_ to only close that socket after a grace period. So
1092 * it should stay in this RCU callback.
1094 * This close() is a very important step of the synchronization model so
1095 * every modification to this function must be carefully reviewed.
1101 lttng_fd_put(LTTNG_FD_APPS
, 1);
1103 DBG2("UST app pid %d deleted", app
->pid
);
1105 session_unlock_list();
1109 * URCU intermediate call to delete an UST app.
1112 void delete_ust_app_rcu(struct rcu_head
*head
)
1114 struct lttng_ht_node_ulong
*node
=
1115 caa_container_of(head
, struct lttng_ht_node_ulong
, head
);
1116 struct ust_app
*app
=
1117 caa_container_of(node
, struct ust_app
, pid_n
);
1119 DBG3("Call RCU deleting app PID %d", app
->pid
);
1120 delete_ust_app(app
);
1124 * Delete the session from the application ht and delete the data structure by
1125 * freeing every object inside and releasing them.
1127 * The session list lock must be held by the caller.
1129 static void destroy_app_session(struct ust_app
*app
,
1130 struct ust_app_session
*ua_sess
)
1133 struct lttng_ht_iter iter
;
1138 iter
.iter
.node
= &ua_sess
->node
.node
;
1139 ret
= lttng_ht_del(app
->sessions
, &iter
);
1141 /* Already scheduled for teardown. */
1145 /* Once deleted, free the data structure. */
1146 delete_ust_app_session(app
->sock
, ua_sess
, app
);
1153 * Alloc new UST app session.
1156 struct ust_app_session
*alloc_ust_app_session(void)
1158 struct ust_app_session
*ua_sess
;
1160 /* Init most of the default value by allocating and zeroing */
1161 ua_sess
= zmalloc(sizeof(struct ust_app_session
));
1162 if (ua_sess
== NULL
) {
1167 ua_sess
->handle
= -1;
1168 ua_sess
->channels
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1169 ua_sess
->metadata_attr
.type
= LTTNG_UST_ABI_CHAN_METADATA
;
1170 pthread_mutex_init(&ua_sess
->lock
, NULL
);
1179 * Alloc new UST app channel.
1182 struct ust_app_channel
*alloc_ust_app_channel(const char *name
,
1183 struct ust_app_session
*ua_sess
,
1184 struct lttng_ust_abi_channel_attr
*attr
)
1186 struct ust_app_channel
*ua_chan
;
1188 /* Init most of the default value by allocating and zeroing */
1189 ua_chan
= zmalloc(sizeof(struct ust_app_channel
));
1190 if (ua_chan
== NULL
) {
1195 /* Setup channel name */
1196 strncpy(ua_chan
->name
, name
, sizeof(ua_chan
->name
));
1197 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
1199 ua_chan
->enabled
= 1;
1200 ua_chan
->handle
= -1;
1201 ua_chan
->session
= ua_sess
;
1202 ua_chan
->key
= get_next_channel_key();
1203 ua_chan
->ctx
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
1204 ua_chan
->events
= lttng_ht_new(0, LTTNG_HT_TYPE_STRING
);
1205 lttng_ht_node_init_str(&ua_chan
->node
, ua_chan
->name
);
1207 CDS_INIT_LIST_HEAD(&ua_chan
->streams
.head
);
1208 CDS_INIT_LIST_HEAD(&ua_chan
->ctx_list
);
1210 /* Copy attributes */
1212 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1213 ua_chan
->attr
.subbuf_size
= attr
->subbuf_size
;
1214 ua_chan
->attr
.num_subbuf
= attr
->num_subbuf
;
1215 ua_chan
->attr
.overwrite
= attr
->overwrite
;
1216 ua_chan
->attr
.switch_timer_interval
= attr
->switch_timer_interval
;
1217 ua_chan
->attr
.read_timer_interval
= attr
->read_timer_interval
;
1218 ua_chan
->attr
.output
= attr
->output
;
1219 ua_chan
->attr
.blocking_timeout
= attr
->u
.s
.blocking_timeout
;
1221 /* By default, the channel is a per cpu channel. */
1222 ua_chan
->attr
.type
= LTTNG_UST_ABI_CHAN_PER_CPU
;
1224 DBG3("UST app channel %s allocated", ua_chan
->name
);
1233 * Allocate and initialize a UST app stream.
1235 * Return newly allocated stream pointer or NULL on error.
1237 struct ust_app_stream
*ust_app_alloc_stream(void)
1239 struct ust_app_stream
*stream
= NULL
;
1241 stream
= zmalloc(sizeof(*stream
));
1242 if (stream
== NULL
) {
1243 PERROR("zmalloc ust app stream");
1247 /* Zero could be a valid value for a handle so flag it to -1. */
1248 stream
->handle
= -1;
1255 * Alloc new UST app event.
1258 struct ust_app_event
*alloc_ust_app_event(char *name
,
1259 struct lttng_ust_abi_event
*attr
)
1261 struct ust_app_event
*ua_event
;
1263 /* Init most of the default value by allocating and zeroing */
1264 ua_event
= zmalloc(sizeof(struct ust_app_event
));
1265 if (ua_event
== NULL
) {
1266 PERROR("Failed to allocate ust_app_event structure");
1270 ua_event
->enabled
= 1;
1271 strncpy(ua_event
->name
, name
, sizeof(ua_event
->name
));
1272 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
1273 lttng_ht_node_init_str(&ua_event
->node
, ua_event
->name
);
1275 /* Copy attributes */
1277 memcpy(&ua_event
->attr
, attr
, sizeof(ua_event
->attr
));
1280 DBG3("UST app event %s allocated", ua_event
->name
);
1289 * Allocate a new UST app event notifier rule.
1291 static struct ust_app_event_notifier_rule
*alloc_ust_app_event_notifier_rule(
1292 struct lttng_trigger
*trigger
)
1294 enum lttng_event_rule_generate_exclusions_status
1295 generate_exclusion_status
;
1296 enum lttng_condition_status cond_status
;
1297 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
1298 struct lttng_condition
*condition
= NULL
;
1299 const struct lttng_event_rule
*event_rule
= NULL
;
1301 ua_event_notifier_rule
= zmalloc(sizeof(struct ust_app_event_notifier_rule
));
1302 if (ua_event_notifier_rule
== NULL
) {
1303 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1307 ua_event_notifier_rule
->enabled
= 1;
1308 ua_event_notifier_rule
->token
= lttng_trigger_get_tracer_token(trigger
);
1309 lttng_ht_node_init_u64(&ua_event_notifier_rule
->node
,
1310 ua_event_notifier_rule
->token
);
1312 condition
= lttng_trigger_get_condition(trigger
);
1314 assert(lttng_condition_get_type(condition
) ==
1315 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
1317 cond_status
= lttng_condition_event_rule_matches_get_rule(
1318 condition
, &event_rule
);
1319 assert(cond_status
== LTTNG_CONDITION_STATUS_OK
);
1322 ua_event_notifier_rule
->error_counter_index
=
1323 lttng_condition_event_rule_matches_get_error_counter_index(condition
);
1324 /* Acquire the event notifier's reference to the trigger. */
1325 lttng_trigger_get(trigger
);
1327 ua_event_notifier_rule
->trigger
= trigger
;
1328 ua_event_notifier_rule
->filter
= lttng_event_rule_get_filter_bytecode(event_rule
);
1329 generate_exclusion_status
= lttng_event_rule_generate_exclusions(
1330 event_rule
, &ua_event_notifier_rule
->exclusion
);
1331 switch (generate_exclusion_status
) {
1332 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK
:
1333 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE
:
1336 /* Error occurred. */
1337 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1338 goto error_put_trigger
;
1341 DBG3("UST app event notifier rule allocated: token = %" PRIu64
,
1342 ua_event_notifier_rule
->token
);
1344 return ua_event_notifier_rule
;
1347 lttng_trigger_put(trigger
);
1349 free(ua_event_notifier_rule
);
1354 * Alloc new UST app context.
1357 struct ust_app_ctx
*alloc_ust_app_ctx(struct lttng_ust_context_attr
*uctx
)
1359 struct ust_app_ctx
*ua_ctx
;
1361 ua_ctx
= zmalloc(sizeof(struct ust_app_ctx
));
1362 if (ua_ctx
== NULL
) {
1366 CDS_INIT_LIST_HEAD(&ua_ctx
->list
);
1369 memcpy(&ua_ctx
->ctx
, uctx
, sizeof(ua_ctx
->ctx
));
1370 if (uctx
->ctx
== LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
) {
1371 char *provider_name
= NULL
, *ctx_name
= NULL
;
1373 provider_name
= strdup(uctx
->u
.app_ctx
.provider_name
);
1374 ctx_name
= strdup(uctx
->u
.app_ctx
.ctx_name
);
1375 if (!provider_name
|| !ctx_name
) {
1376 free(provider_name
);
1381 ua_ctx
->ctx
.u
.app_ctx
.provider_name
= provider_name
;
1382 ua_ctx
->ctx
.u
.app_ctx
.ctx_name
= ctx_name
;
1386 DBG3("UST app context %d allocated", ua_ctx
->ctx
.ctx
);
1394 * Create a liblttng-ust filter bytecode from given bytecode.
1396 * Return allocated filter or NULL on error.
1398 static struct lttng_ust_abi_filter_bytecode
*create_ust_filter_bytecode_from_bytecode(
1399 const struct lttng_bytecode
*orig_f
)
1401 struct lttng_ust_abi_filter_bytecode
*filter
= NULL
;
1403 /* Copy filter bytecode. */
1404 filter
= zmalloc(sizeof(*filter
) + orig_f
->len
);
1406 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1410 assert(sizeof(struct lttng_bytecode
) ==
1411 sizeof(struct lttng_ust_abi_filter_bytecode
));
1412 memcpy(filter
, orig_f
, sizeof(*filter
) + orig_f
->len
);
1418 * Create a liblttng-ust capture bytecode from given bytecode.
1420 * Return allocated filter or NULL on error.
1422 static struct lttng_ust_abi_capture_bytecode
*
1423 create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode
*orig_f
)
1425 struct lttng_ust_abi_capture_bytecode
*capture
= NULL
;
1427 /* Copy capture bytecode. */
1428 capture
= zmalloc(sizeof(*capture
) + orig_f
->len
);
1430 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32
" bytes", orig_f
->len
);
1434 assert(sizeof(struct lttng_bytecode
) ==
1435 sizeof(struct lttng_ust_abi_capture_bytecode
));
1436 memcpy(capture
, orig_f
, sizeof(*capture
) + orig_f
->len
);
1442 * Find an ust_app using the sock and return it. RCU read side lock must be
1443 * held before calling this helper function.
1445 struct ust_app
*ust_app_find_by_sock(int sock
)
1447 struct lttng_ht_node_ulong
*node
;
1448 struct lttng_ht_iter iter
;
1450 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &iter
);
1451 node
= lttng_ht_iter_get_node_ulong(&iter
);
1453 DBG2("UST app find by sock %d not found", sock
);
1457 return caa_container_of(node
, struct ust_app
, sock_n
);
1464 * Find an ust_app using the notify sock and return it. RCU read side lock must
1465 * be held before calling this helper function.
1467 static struct ust_app
*find_app_by_notify_sock(int sock
)
1469 struct lttng_ht_node_ulong
*node
;
1470 struct lttng_ht_iter iter
;
1472 lttng_ht_lookup(ust_app_ht_by_notify_sock
, (void *)((unsigned long) sock
),
1474 node
= lttng_ht_iter_get_node_ulong(&iter
);
1476 DBG2("UST app find by notify sock %d not found", sock
);
1480 return caa_container_of(node
, struct ust_app
, notify_sock_n
);
1487 * Lookup for an ust app event based on event name, filter bytecode and the
1490 * Return an ust_app_event object or NULL on error.
1492 static struct ust_app_event
*find_ust_app_event(struct lttng_ht
*ht
,
1494 const struct lttng_bytecode
*filter
,
1495 enum lttng_ust_abi_loglevel_type loglevel_type
,
1497 const struct lttng_event_exclusion
*exclusion
)
1499 struct lttng_ht_iter iter
;
1500 struct lttng_ht_node_str
*node
;
1501 struct ust_app_event
*event
= NULL
;
1502 struct ust_app_ht_key key
;
1507 /* Setup key for event lookup. */
1509 key
.filter
= filter
;
1510 key
.loglevel_type
= loglevel_type
;
1511 key
.loglevel_value
= loglevel_value
;
1512 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1513 key
.exclusion
= exclusion
;
1515 /* Lookup using the event name as hash and a custom match fct. */
1516 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) name
, lttng_ht_seed
),
1517 ht_match_ust_app_event
, &key
, &iter
.iter
);
1518 node
= lttng_ht_iter_get_node_str(&iter
);
1523 event
= caa_container_of(node
, struct ust_app_event
, node
);
1530 * Look-up an event notifier rule based on its token id.
1532 * Must be called with the RCU read lock held.
1533 * Return an ust_app_event_notifier_rule object or NULL on error.
1535 static struct ust_app_event_notifier_rule
*find_ust_app_event_notifier_rule(
1536 struct lttng_ht
*ht
, uint64_t token
)
1538 struct lttng_ht_iter iter
;
1539 struct lttng_ht_node_u64
*node
;
1540 struct ust_app_event_notifier_rule
*event_notifier_rule
= NULL
;
1544 lttng_ht_lookup(ht
, &token
, &iter
);
1545 node
= lttng_ht_iter_get_node_u64(&iter
);
1547 DBG2("UST app event notifier rule token not found: token = %" PRIu64
,
1552 event_notifier_rule
= caa_container_of(
1553 node
, struct ust_app_event_notifier_rule
, node
);
1555 return event_notifier_rule
;
1559 * Create the channel context on the tracer.
1561 * Called with UST app session lock held.
1564 int create_ust_channel_context(struct ust_app_channel
*ua_chan
,
1565 struct ust_app_ctx
*ua_ctx
, struct ust_app
*app
)
1569 health_code_update();
1571 pthread_mutex_lock(&app
->sock_lock
);
1572 ret
= lttng_ust_ctl_add_context(app
->sock
, &ua_ctx
->ctx
,
1573 ua_chan
->obj
, &ua_ctx
->obj
);
1574 pthread_mutex_unlock(&app
->sock_lock
);
1576 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1578 DBG3("UST app create channel context failed. Application is dead: pid = %d, sock = %d",
1579 app
->pid
, app
->sock
);
1580 } else if (ret
== -EAGAIN
) {
1582 WARN("UST app create channel context failed. Communication time out: pid = %d, sock = %d",
1583 app
->pid
, app
->sock
);
1585 ERR("UST app create channel context failed with ret %d: pid = %d, sock = %d",
1586 ret
, app
->pid
, app
->sock
);
1591 ua_ctx
->handle
= ua_ctx
->obj
->handle
;
1593 DBG2("UST app context handle %d created successfully for channel %s",
1594 ua_ctx
->handle
, ua_chan
->name
);
1597 health_code_update();
1602 * Set the filter on the tracer.
1604 static int set_ust_object_filter(struct ust_app
*app
,
1605 const struct lttng_bytecode
*bytecode
,
1606 struct lttng_ust_abi_object_data
*ust_object
)
1609 struct lttng_ust_abi_filter_bytecode
*ust_bytecode
= NULL
;
1611 health_code_update();
1613 ust_bytecode
= create_ust_filter_bytecode_from_bytecode(bytecode
);
1614 if (!ust_bytecode
) {
1615 ret
= -LTTNG_ERR_NOMEM
;
1618 pthread_mutex_lock(&app
->sock_lock
);
1619 ret
= lttng_ust_ctl_set_filter(app
->sock
, ust_bytecode
,
1621 pthread_mutex_unlock(&app
->sock_lock
);
1623 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1625 DBG3("UST app set filter failed. Application is dead: pid = %d, sock = %d",
1626 app
->pid
, app
->sock
);
1627 } else if (ret
== -EAGAIN
) {
1629 WARN("UST app set filter failed. Communication time out: pid = %d, sock = %d",
1630 app
->pid
, app
->sock
);
1632 ERR("UST app set filter failed with ret %d: pid = %d, sock = %d, object = %p",
1633 ret
, app
->pid
, app
->sock
, ust_object
);
1638 DBG2("UST filter successfully set: object = %p", ust_object
);
1641 health_code_update();
1647 * Set a capture bytecode for the passed object.
1648 * The sequence number enforces the ordering at runtime and on reception of
1649 * the captured payloads.
1651 static int set_ust_capture(struct ust_app
*app
,
1652 const struct lttng_bytecode
*bytecode
,
1653 unsigned int capture_seqnum
,
1654 struct lttng_ust_abi_object_data
*ust_object
)
1657 struct lttng_ust_abi_capture_bytecode
*ust_bytecode
= NULL
;
1659 health_code_update();
1661 ust_bytecode
= create_ust_capture_bytecode_from_bytecode(bytecode
);
1662 if (!ust_bytecode
) {
1663 ret
= -LTTNG_ERR_NOMEM
;
1668 * Set the sequence number to ensure the capture of fields is ordered.
1670 ust_bytecode
->seqnum
= capture_seqnum
;
1672 pthread_mutex_lock(&app
->sock_lock
);
1673 ret
= lttng_ust_ctl_set_capture(app
->sock
, ust_bytecode
,
1675 pthread_mutex_unlock(&app
->sock_lock
);
1677 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1679 DBG3("UST app set capture failed. Application is dead: pid = %d, sock = %d",
1680 app
->pid
, app
->sock
);
1681 } else if (ret
== -EAGAIN
) {
1683 DBG3("UST app set capture failed. Communication timeout: pid = %d, sock = %d",
1684 app
->pid
, app
->sock
);
1686 ERR("UST app event set capture failed with ret %d: pid = %d, sock = %d",
1694 DBG2("UST capture successfully set: object = %p", ust_object
);
1697 health_code_update();
1703 struct lttng_ust_abi_event_exclusion
*create_ust_exclusion_from_exclusion(
1704 const struct lttng_event_exclusion
*exclusion
)
1706 struct lttng_ust_abi_event_exclusion
*ust_exclusion
= NULL
;
1707 size_t exclusion_alloc_size
= sizeof(struct lttng_ust_abi_event_exclusion
) +
1708 LTTNG_UST_ABI_SYM_NAME_LEN
* exclusion
->count
;
1710 ust_exclusion
= zmalloc(exclusion_alloc_size
);
1711 if (!ust_exclusion
) {
1716 assert(sizeof(struct lttng_event_exclusion
) ==
1717 sizeof(struct lttng_ust_abi_event_exclusion
));
1718 memcpy(ust_exclusion
, exclusion
, exclusion_alloc_size
);
1720 return ust_exclusion
;
1724 * Set event exclusions on the tracer.
1726 static int set_ust_object_exclusions(struct ust_app
*app
,
1727 const struct lttng_event_exclusion
*exclusions
,
1728 struct lttng_ust_abi_object_data
*ust_object
)
1731 struct lttng_ust_abi_event_exclusion
*ust_exclusions
= NULL
;
1733 assert(exclusions
&& exclusions
->count
> 0);
1735 health_code_update();
1737 ust_exclusions
= create_ust_exclusion_from_exclusion(
1739 if (!ust_exclusions
) {
1740 ret
= -LTTNG_ERR_NOMEM
;
1743 pthread_mutex_lock(&app
->sock_lock
);
1744 ret
= lttng_ust_ctl_set_exclusion(app
->sock
, ust_exclusions
, ust_object
);
1745 pthread_mutex_unlock(&app
->sock_lock
);
1747 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1749 DBG3("UST app event exclusion failed. Application is dead: pid = %d, sock = %d",
1750 app
->pid
, app
->sock
);
1751 } else if (ret
== -EAGAIN
) {
1753 WARN("UST app event exclusion failed. Communication time out(pid: %d, sock = %d",
1754 app
->pid
, app
->sock
);
1756 ERR("UST app event exclusions failed with ret %d: pid = %d, sock = %d, object = %p",
1757 ret
, app
->pid
, app
->sock
, ust_object
);
1762 DBG2("UST exclusions set successfully for object %p", ust_object
);
1765 health_code_update();
1766 free(ust_exclusions
);
1771 * Disable the specified event on to UST tracer for the UST session.
1773 static int disable_ust_object(struct ust_app
*app
,
1774 struct lttng_ust_abi_object_data
*object
)
1778 health_code_update();
1780 pthread_mutex_lock(&app
->sock_lock
);
1781 ret
= lttng_ust_ctl_disable(app
->sock
, object
);
1782 pthread_mutex_unlock(&app
->sock_lock
);
1784 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1786 DBG3("UST app disable object failed. Application is dead: pid = %d, sock = %d",
1787 app
->pid
, app
->sock
);
1788 } else if (ret
== -EAGAIN
) {
1790 WARN("UST app disable object failed. Communication time out: pid = %d, sock = %d",
1791 app
->pid
, app
->sock
);
1793 ERR("UST app disable object failed with ret %d: pid = %d, sock = %d, object = %p",
1794 ret
, app
->pid
, app
->sock
, object
);
1799 DBG2("UST app object %p disabled successfully for app: pid = %d",
1803 health_code_update();
1808 * Disable the specified channel on to UST tracer for the UST session.
1810 static int disable_ust_channel(struct ust_app
*app
,
1811 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1815 health_code_update();
1817 pthread_mutex_lock(&app
->sock_lock
);
1818 ret
= lttng_ust_ctl_disable(app
->sock
, ua_chan
->obj
);
1819 pthread_mutex_unlock(&app
->sock_lock
);
1821 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1823 DBG3("UST app disable channel failed. Application is dead: pid = %d, sock = %d",
1824 app
->pid
, app
->sock
);
1825 } else if (ret
== -EAGAIN
) {
1827 WARN("UST app disable channel failed. Communication time out: pid = %d, sock = %d",
1828 app
->pid
, app
->sock
);
1830 ERR("UST app channel %s disable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1831 ua_chan
->name
, ua_sess
->handle
, ret
,
1832 app
->pid
, app
->sock
);
1837 DBG2("UST app channel %s disabled successfully for app: pid = %d",
1838 ua_chan
->name
, app
->pid
);
1841 health_code_update();
1846 * Enable the specified channel on to UST tracer for the UST session.
1848 static int enable_ust_channel(struct ust_app
*app
,
1849 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1853 health_code_update();
1855 pthread_mutex_lock(&app
->sock_lock
);
1856 ret
= lttng_ust_ctl_enable(app
->sock
, ua_chan
->obj
);
1857 pthread_mutex_unlock(&app
->sock_lock
);
1859 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1861 DBG3("UST app channel %s enable failed. Application is dead: pid = %d, sock = %d",
1862 ua_chan
->name
, app
->pid
, app
->sock
);
1863 } else if (ret
== -EAGAIN
) {
1865 WARN("UST app channel %s enable failed. Communication time out: pid = %d, sock = %d",
1866 ua_chan
->name
, app
->pid
, app
->sock
);
1868 ERR("UST app channel %s enable failed, session handle %d, with ret %d: pid = %d, sock = %d",
1869 ua_chan
->name
, ua_sess
->handle
, ret
,
1870 app
->pid
, app
->sock
);
1875 ua_chan
->enabled
= 1;
1877 DBG2("UST app channel %s enabled successfully for app: pid = %d",
1878 ua_chan
->name
, app
->pid
);
1881 health_code_update();
1886 * Enable the specified event on to UST tracer for the UST session.
1888 static int enable_ust_object(
1889 struct ust_app
*app
, struct lttng_ust_abi_object_data
*ust_object
)
1893 health_code_update();
1895 pthread_mutex_lock(&app
->sock_lock
);
1896 ret
= lttng_ust_ctl_enable(app
->sock
, ust_object
);
1897 pthread_mutex_unlock(&app
->sock_lock
);
1899 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1901 DBG3("UST app enable object failed. Application is dead: pid = %d, sock = %d",
1902 app
->pid
, app
->sock
);
1903 } else if (ret
== -EAGAIN
) {
1905 WARN("UST app enable object failed. Communication time out: pid = %d, sock = %d",
1906 app
->pid
, app
->sock
);
1908 ERR("UST app enable object failed with ret %d: pid = %d, sock = %d, object = %p",
1909 ret
, app
->pid
, app
->sock
, ust_object
);
1914 DBG2("UST app object %p enabled successfully for app: pid = %d",
1915 ust_object
, app
->pid
);
1918 health_code_update();
1923 * Send channel and stream buffer to application.
1925 * Return 0 on success. On error, a negative value is returned.
1927 static int send_channel_pid_to_ust(struct ust_app
*app
,
1928 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
)
1931 struct ust_app_stream
*stream
, *stmp
;
1937 health_code_update();
1939 DBG("UST app sending channel %s to UST app sock %d", ua_chan
->name
,
1942 /* Send channel to the application. */
1943 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
1944 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1945 ret
= -ENOTCONN
; /* Caused by app exiting. */
1947 } else if (ret
== -EAGAIN
) {
1948 /* Caused by timeout. */
1949 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
"\".",
1950 app
->pid
, ua_chan
->name
, ua_sess
->tracing_id
);
1951 /* Treat this the same way as an application that is exiting. */
1954 } else if (ret
< 0) {
1958 health_code_update();
1960 /* Send all streams to application. */
1961 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
1962 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, stream
);
1963 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
1964 ret
= -ENOTCONN
; /* Caused by app exiting. */
1966 } else if (ret
== -EAGAIN
) {
1967 /* Caused by timeout. */
1968 WARN("Communication with application %d timed out on send_stream for stream \"%s\" of channel \"%s\" of session \"%" PRIu64
"\".",
1969 app
->pid
, stream
->name
, ua_chan
->name
,
1970 ua_sess
->tracing_id
);
1972 * Treat this the same way as an application that is
1976 } else if (ret
< 0) {
1979 /* We don't need the stream anymore once sent to the tracer. */
1980 cds_list_del(&stream
->list
);
1981 delete_ust_app_stream(-1, stream
, app
);
1983 /* Flag the channel that it is sent to the application. */
1984 ua_chan
->is_sent
= 1;
1987 health_code_update();
1992 * Create the specified event onto the UST tracer for a UST session.
1994 * Should be called with session mutex held.
1997 int create_ust_event(struct ust_app
*app
, struct ust_app_session
*ua_sess
,
1998 struct ust_app_channel
*ua_chan
, struct ust_app_event
*ua_event
)
2002 health_code_update();
2004 /* Create UST event on tracer */
2005 pthread_mutex_lock(&app
->sock_lock
);
2006 ret
= lttng_ust_ctl_create_event(app
->sock
, &ua_event
->attr
, ua_chan
->obj
,
2008 pthread_mutex_unlock(&app
->sock_lock
);
2010 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2012 DBG3("UST app create event failed. Application is dead: pid = %d, sock = %d",
2013 app
->pid
, app
->sock
);
2014 } else if (ret
== -EAGAIN
) {
2016 WARN("UST app create event failed. Communication time out: pid = %d, sock = %d",
2017 app
->pid
, app
->sock
);
2019 ERR("UST app create event '%s' failed with ret %d: pid = %d, sock = %d",
2020 ua_event
->attr
.name
, ret
, app
->pid
,
2026 ua_event
->handle
= ua_event
->obj
->handle
;
2028 DBG2("UST app event %s created successfully for pid:%d object = %p",
2029 ua_event
->attr
.name
, app
->pid
, ua_event
->obj
);
2031 health_code_update();
2033 /* Set filter if one is present. */
2034 if (ua_event
->filter
) {
2035 ret
= set_ust_object_filter(app
, ua_event
->filter
, ua_event
->obj
);
2041 /* Set exclusions for the event */
2042 if (ua_event
->exclusion
) {
2043 ret
= set_ust_object_exclusions(app
, ua_event
->exclusion
, ua_event
->obj
);
2049 /* If event not enabled, disable it on the tracer */
2050 if (ua_event
->enabled
) {
2052 * We now need to explicitly enable the event, since it
2053 * is now disabled at creation.
2055 ret
= enable_ust_object(app
, ua_event
->obj
);
2058 * If we hit an EPERM, something is wrong with our enable call. If
2059 * we get an EEXIST, there is a problem on the tracer side since we
2063 case -LTTNG_UST_ERR_PERM
:
2064 /* Code flow problem */
2066 case -LTTNG_UST_ERR_EXIST
:
2067 /* It's OK for our use case. */
2078 health_code_update();
2082 static int init_ust_event_notifier_from_event_rule(
2083 const struct lttng_event_rule
*rule
,
2084 struct lttng_ust_abi_event_notifier
*event_notifier
)
2086 enum lttng_event_rule_status status
;
2087 enum lttng_ust_abi_loglevel_type ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2088 int loglevel
= -1, ret
= 0;
2089 const char *pattern
;
2092 memset(event_notifier
, 0, sizeof(*event_notifier
));
2094 if (lttng_event_rule_targets_agent_domain(rule
)) {
2096 * Special event for agents
2097 * The actual meat of the event is in the filter that will be
2098 * attached later on.
2099 * Set the default values for the agent event.
2101 pattern
= event_get_default_agent_ust_name(
2102 lttng_event_rule_get_domain_type(rule
));
2104 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2106 const struct lttng_log_level_rule
*log_level_rule
;
2108 assert(lttng_event_rule_get_type(rule
) ==
2109 LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT
);
2111 status
= lttng_event_rule_user_tracepoint_get_name_pattern(rule
, &pattern
);
2112 if (status
!= LTTNG_EVENT_RULE_STATUS_OK
) {
2113 /* At this point, this is a fatal error. */
2117 status
= lttng_event_rule_user_tracepoint_get_log_level_rule(
2118 rule
, &log_level_rule
);
2119 if (status
== LTTNG_EVENT_RULE_STATUS_UNSET
) {
2120 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_ALL
;
2121 } else if (status
== LTTNG_EVENT_RULE_STATUS_OK
) {
2122 enum lttng_log_level_rule_status llr_status
;
2124 switch (lttng_log_level_rule_get_type(log_level_rule
)) {
2125 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY
:
2126 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_SINGLE
;
2127 llr_status
= lttng_log_level_rule_exactly_get_level(
2128 log_level_rule
, &loglevel
);
2130 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS
:
2131 ust_loglevel_type
= LTTNG_UST_ABI_LOGLEVEL_RANGE
;
2132 llr_status
= lttng_log_level_rule_at_least_as_severe_as_get_level(
2133 log_level_rule
, &loglevel
);
2139 assert(llr_status
== LTTNG_LOG_LEVEL_RULE_STATUS_OK
);
2141 /* At this point this is a fatal error. */
2146 event_notifier
->event
.instrumentation
= LTTNG_UST_ABI_TRACEPOINT
;
2147 ret
= lttng_strncpy(event_notifier
->event
.name
, pattern
,
2148 sizeof(event_notifier
->event
.name
));
2150 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2155 event_notifier
->event
.loglevel_type
= ust_loglevel_type
;
2156 event_notifier
->event
.loglevel
= loglevel
;
2162 * Create the specified event notifier against the user space tracer of a
2163 * given application.
2165 static int create_ust_event_notifier(struct ust_app
*app
,
2166 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
)
2169 enum lttng_condition_status condition_status
;
2170 const struct lttng_condition
*condition
= NULL
;
2171 struct lttng_ust_abi_event_notifier event_notifier
;
2172 const struct lttng_event_rule
*event_rule
= NULL
;
2173 unsigned int capture_bytecode_count
= 0, i
;
2174 enum lttng_condition_status cond_status
;
2175 enum lttng_event_rule_type event_rule_type
;
2177 health_code_update();
2178 assert(app
->event_notifier_group
.object
);
2180 condition
= lttng_trigger_get_const_condition(
2181 ua_event_notifier_rule
->trigger
);
2183 assert(lttng_condition_get_type(condition
) ==
2184 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
);
2186 condition_status
= lttng_condition_event_rule_matches_get_rule(
2187 condition
, &event_rule
);
2188 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
2192 event_rule_type
= lttng_event_rule_get_type(event_rule
);
2193 assert(event_rule_type
== LTTNG_EVENT_RULE_TYPE_USER_TRACEPOINT
||
2194 event_rule_type
== LTTNG_EVENT_RULE_TYPE_JUL_LOGGING
||
2196 LTTNG_EVENT_RULE_TYPE_LOG4J_LOGGING
||
2198 LTTNG_EVENT_RULE_TYPE_PYTHON_LOGGING
);
2200 init_ust_event_notifier_from_event_rule(event_rule
, &event_notifier
);
2201 event_notifier
.event
.token
= ua_event_notifier_rule
->token
;
2202 event_notifier
.error_counter_index
= ua_event_notifier_rule
->error_counter_index
;
2204 /* Create UST event notifier against the tracer. */
2205 pthread_mutex_lock(&app
->sock_lock
);
2206 ret
= lttng_ust_ctl_create_event_notifier(app
->sock
, &event_notifier
,
2207 app
->event_notifier_group
.object
,
2208 &ua_event_notifier_rule
->obj
);
2209 pthread_mutex_unlock(&app
->sock_lock
);
2211 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2213 DBG3("UST app create event notifier failed. Application is dead: pid = %d, sock = %d",
2214 app
->pid
, app
->sock
);
2215 } else if (ret
== -EAGAIN
) {
2217 WARN("UST app create event notifier failed. Communication time out: pid = %d, sock = %d",
2218 app
->pid
, app
->sock
);
2220 ERR("UST app create event notifier '%s' failed with ret %d: pid = %d, sock = %d",
2221 event_notifier
.event
.name
, ret
, app
->pid
,
2227 ua_event_notifier_rule
->handle
= ua_event_notifier_rule
->obj
->handle
;
2229 DBG2("UST app event notifier %s created successfully: app = '%s': pid = %d), object = %p",
2230 event_notifier
.event
.name
, app
->name
, app
->pid
,
2231 ua_event_notifier_rule
->obj
);
2233 health_code_update();
2235 /* Set filter if one is present. */
2236 if (ua_event_notifier_rule
->filter
) {
2237 ret
= set_ust_object_filter(app
, ua_event_notifier_rule
->filter
,
2238 ua_event_notifier_rule
->obj
);
2244 /* Set exclusions for the event. */
2245 if (ua_event_notifier_rule
->exclusion
) {
2246 ret
= set_ust_object_exclusions(app
,
2247 ua_event_notifier_rule
->exclusion
,
2248 ua_event_notifier_rule
->obj
);
2254 /* Set the capture bytecodes. */
2255 cond_status
= lttng_condition_event_rule_matches_get_capture_descriptor_count(
2256 condition
, &capture_bytecode_count
);
2257 assert(cond_status
== LTTNG_CONDITION_STATUS_OK
);
2259 for (i
= 0; i
< capture_bytecode_count
; i
++) {
2260 const struct lttng_bytecode
*capture_bytecode
=
2261 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
2264 ret
= set_ust_capture(app
, capture_bytecode
, i
,
2265 ua_event_notifier_rule
->obj
);
2272 * We now need to explicitly enable the event, since it
2273 * is disabled at creation.
2275 ret
= enable_ust_object(app
, ua_event_notifier_rule
->obj
);
2278 * If we hit an EPERM, something is wrong with our enable call.
2279 * If we get an EEXIST, there is a problem on the tracer side
2280 * since we just created it.
2283 case -LTTNG_UST_ERR_PERM
:
2284 /* Code flow problem. */
2286 case -LTTNG_UST_ERR_EXIST
:
2287 /* It's OK for our use case. */
2297 ua_event_notifier_rule
->enabled
= true;
2300 health_code_update();
2305 * Copy data between an UST app event and a LTT event.
2307 static void shadow_copy_event(struct ust_app_event
*ua_event
,
2308 struct ltt_ust_event
*uevent
)
2310 size_t exclusion_alloc_size
;
2312 strncpy(ua_event
->name
, uevent
->attr
.name
, sizeof(ua_event
->name
));
2313 ua_event
->name
[sizeof(ua_event
->name
) - 1] = '\0';
2315 ua_event
->enabled
= uevent
->enabled
;
2317 /* Copy event attributes */
2318 memcpy(&ua_event
->attr
, &uevent
->attr
, sizeof(ua_event
->attr
));
2320 /* Copy filter bytecode */
2321 if (uevent
->filter
) {
2322 ua_event
->filter
= lttng_bytecode_copy(uevent
->filter
);
2323 /* Filter might be NULL here in case of ENONEM. */
2326 /* Copy exclusion data */
2327 if (uevent
->exclusion
) {
2328 exclusion_alloc_size
= sizeof(struct lttng_event_exclusion
) +
2329 LTTNG_UST_ABI_SYM_NAME_LEN
* uevent
->exclusion
->count
;
2330 ua_event
->exclusion
= zmalloc(exclusion_alloc_size
);
2331 if (ua_event
->exclusion
== NULL
) {
2334 memcpy(ua_event
->exclusion
, uevent
->exclusion
,
2335 exclusion_alloc_size
);
2341 * Copy data between an UST app channel and a LTT channel.
2343 static void shadow_copy_channel(struct ust_app_channel
*ua_chan
,
2344 struct ltt_ust_channel
*uchan
)
2346 DBG2("UST app shadow copy of channel %s started", ua_chan
->name
);
2348 strncpy(ua_chan
->name
, uchan
->name
, sizeof(ua_chan
->name
));
2349 ua_chan
->name
[sizeof(ua_chan
->name
) - 1] = '\0';
2351 ua_chan
->tracefile_size
= uchan
->tracefile_size
;
2352 ua_chan
->tracefile_count
= uchan
->tracefile_count
;
2354 /* Copy event attributes since the layout is different. */
2355 ua_chan
->attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2356 ua_chan
->attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2357 ua_chan
->attr
.overwrite
= uchan
->attr
.overwrite
;
2358 ua_chan
->attr
.switch_timer_interval
= uchan
->attr
.switch_timer_interval
;
2359 ua_chan
->attr
.read_timer_interval
= uchan
->attr
.read_timer_interval
;
2360 ua_chan
->monitor_timer_interval
= uchan
->monitor_timer_interval
;
2361 ua_chan
->attr
.output
= uchan
->attr
.output
;
2362 ua_chan
->attr
.blocking_timeout
= uchan
->attr
.u
.s
.blocking_timeout
;
2365 * Note that the attribute channel type is not set since the channel on the
2366 * tracing registry side does not have this information.
2369 ua_chan
->enabled
= uchan
->enabled
;
2370 ua_chan
->tracing_channel_id
= uchan
->id
;
2372 DBG3("UST app shadow copy of channel %s done", ua_chan
->name
);
2376 * Copy data between a UST app session and a regular LTT session.
2378 static void shadow_copy_session(struct ust_app_session
*ua_sess
,
2379 struct ltt_ust_session
*usess
, struct ust_app
*app
)
2381 struct tm
*timeinfo
;
2384 char tmp_shm_path
[PATH_MAX
];
2386 timeinfo
= localtime(&app
->registration_time
);
2387 strftime(datetime
, sizeof(datetime
), "%Y%m%d-%H%M%S", timeinfo
);
2389 DBG2("Shadow copy of session handle %d", ua_sess
->handle
);
2391 ua_sess
->tracing_id
= usess
->id
;
2392 ua_sess
->id
= get_next_session_id();
2393 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.uid
, app
->uid
);
2394 LTTNG_OPTIONAL_SET(&ua_sess
->real_credentials
.gid
, app
->gid
);
2395 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.uid
, usess
->uid
);
2396 LTTNG_OPTIONAL_SET(&ua_sess
->effective_credentials
.gid
, usess
->gid
);
2397 ua_sess
->buffer_type
= usess
->buffer_type
;
2398 ua_sess
->bits_per_long
= app
->bits_per_long
;
2400 /* There is only one consumer object per session possible. */
2401 consumer_output_get(usess
->consumer
);
2402 ua_sess
->consumer
= usess
->consumer
;
2404 ua_sess
->output_traces
= usess
->output_traces
;
2405 ua_sess
->live_timer_interval
= usess
->live_timer_interval
;
2406 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
2407 &usess
->metadata_attr
);
2409 switch (ua_sess
->buffer_type
) {
2410 case LTTNG_BUFFER_PER_PID
:
2411 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2412 DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s", app
->name
, app
->pid
,
2415 case LTTNG_BUFFER_PER_UID
:
2416 ret
= snprintf(ua_sess
->path
, sizeof(ua_sess
->path
),
2417 DEFAULT_UST_TRACE_UID_PATH
,
2418 lttng_credentials_get_uid(&ua_sess
->real_credentials
),
2419 app
->bits_per_long
);
2426 PERROR("asprintf UST shadow copy session");
2431 strncpy(ua_sess
->root_shm_path
, usess
->root_shm_path
,
2432 sizeof(ua_sess
->root_shm_path
));
2433 ua_sess
->root_shm_path
[sizeof(ua_sess
->root_shm_path
) - 1] = '\0';
2434 strncpy(ua_sess
->shm_path
, usess
->shm_path
,
2435 sizeof(ua_sess
->shm_path
));
2436 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2437 if (ua_sess
->shm_path
[0]) {
2438 switch (ua_sess
->buffer_type
) {
2439 case LTTNG_BUFFER_PER_PID
:
2440 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2441 "/" DEFAULT_UST_TRACE_PID_PATH
"/%s-%d-%s",
2442 app
->name
, app
->pid
, datetime
);
2444 case LTTNG_BUFFER_PER_UID
:
2445 ret
= snprintf(tmp_shm_path
, sizeof(tmp_shm_path
),
2446 "/" DEFAULT_UST_TRACE_UID_PATH
,
2447 app
->uid
, app
->bits_per_long
);
2454 PERROR("sprintf UST shadow copy session");
2458 strncat(ua_sess
->shm_path
, tmp_shm_path
,
2459 sizeof(ua_sess
->shm_path
) - strlen(ua_sess
->shm_path
) - 1);
2460 ua_sess
->shm_path
[sizeof(ua_sess
->shm_path
) - 1] = '\0';
2465 consumer_output_put(ua_sess
->consumer
);
2469 * Lookup sesison wrapper.
2472 void __lookup_session_by_app(const struct ltt_ust_session
*usess
,
2473 struct ust_app
*app
, struct lttng_ht_iter
*iter
)
2475 /* Get right UST app session from app */
2476 lttng_ht_lookup(app
->sessions
, &usess
->id
, iter
);
2480 * Return ust app session from the app session hashtable using the UST session
2483 static struct ust_app_session
*lookup_session_by_app(
2484 const struct ltt_ust_session
*usess
, struct ust_app
*app
)
2486 struct lttng_ht_iter iter
;
2487 struct lttng_ht_node_u64
*node
;
2489 __lookup_session_by_app(usess
, app
, &iter
);
2490 node
= lttng_ht_iter_get_node_u64(&iter
);
2495 return caa_container_of(node
, struct ust_app_session
, node
);
2502 * Setup buffer registry per PID for the given session and application. If none
2503 * is found, a new one is created, added to the global registry and
2504 * initialized. If regp is valid, it's set with the newly created object.
2506 * Return 0 on success or else a negative value.
2508 static int setup_buffer_reg_pid(struct ust_app_session
*ua_sess
,
2509 struct ust_app
*app
, struct buffer_reg_pid
**regp
)
2512 struct buffer_reg_pid
*reg_pid
;
2519 reg_pid
= buffer_reg_pid_find(ua_sess
->id
);
2522 * This is the create channel path meaning that if there is NO
2523 * registry available, we have to create one for this session.
2525 ret
= buffer_reg_pid_create(ua_sess
->id
, ®_pid
,
2526 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2534 /* Initialize registry. */
2535 ret
= ust_registry_session_init(®_pid
->registry
->reg
.ust
, app
,
2536 app
->bits_per_long
, app
->uint8_t_alignment
,
2537 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2538 app
->uint64_t_alignment
, app
->long_alignment
,
2539 app
->byte_order
, app
->version
.major
, app
->version
.minor
,
2540 reg_pid
->root_shm_path
, reg_pid
->shm_path
,
2541 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
2542 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
2543 ua_sess
->tracing_id
,
2547 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2548 * destroy the buffer registry, because it is always expected
2549 * that if the buffer registry can be found, its ust registry is
2552 buffer_reg_pid_destroy(reg_pid
);
2556 buffer_reg_pid_add(reg_pid
);
2558 DBG3("UST app buffer registry per PID created successfully");
2570 * Setup buffer registry per UID for the given session and application. If none
2571 * is found, a new one is created, added to the global registry and
2572 * initialized. If regp is valid, it's set with the newly created object.
2574 * Return 0 on success or else a negative value.
2576 static int setup_buffer_reg_uid(struct ltt_ust_session
*usess
,
2577 struct ust_app_session
*ua_sess
,
2578 struct ust_app
*app
, struct buffer_reg_uid
**regp
)
2581 struct buffer_reg_uid
*reg_uid
;
2588 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
2591 * This is the create channel path meaning that if there is NO
2592 * registry available, we have to create one for this session.
2594 ret
= buffer_reg_uid_create(usess
->id
, app
->bits_per_long
, app
->uid
,
2595 LTTNG_DOMAIN_UST
, ®_uid
,
2596 ua_sess
->root_shm_path
, ua_sess
->shm_path
);
2604 /* Initialize registry. */
2605 ret
= ust_registry_session_init(®_uid
->registry
->reg
.ust
, NULL
,
2606 app
->bits_per_long
, app
->uint8_t_alignment
,
2607 app
->uint16_t_alignment
, app
->uint32_t_alignment
,
2608 app
->uint64_t_alignment
, app
->long_alignment
,
2609 app
->byte_order
, app
->version
.major
,
2610 app
->version
.minor
, reg_uid
->root_shm_path
,
2611 reg_uid
->shm_path
, usess
->uid
, usess
->gid
,
2612 ua_sess
->tracing_id
, app
->uid
);
2615 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2616 * destroy the buffer registry, because it is always expected
2617 * that if the buffer registry can be found, its ust registry is
2620 buffer_reg_uid_destroy(reg_uid
, NULL
);
2623 /* Add node to teardown list of the session. */
2624 cds_list_add(®_uid
->lnode
, &usess
->buffer_reg_uid_list
);
2626 buffer_reg_uid_add(reg_uid
);
2628 DBG3("UST app buffer registry per UID created successfully");
2639 * Create a session on the tracer side for the given app.
2641 * On success, ua_sess_ptr is populated with the session pointer or else left
2642 * untouched. If the session was created, is_created is set to 1. On error,
2643 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2646 * Returns 0 on success or else a negative code which is either -ENOMEM or
2647 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2649 static int find_or_create_ust_app_session(struct ltt_ust_session
*usess
,
2650 struct ust_app
*app
, struct ust_app_session
**ua_sess_ptr
,
2653 int ret
, created
= 0;
2654 struct ust_app_session
*ua_sess
;
2658 assert(ua_sess_ptr
);
2660 health_code_update();
2662 ua_sess
= lookup_session_by_app(usess
, app
);
2663 if (ua_sess
== NULL
) {
2664 DBG2("UST app pid: %d session id %" PRIu64
" not found, creating it",
2665 app
->pid
, usess
->id
);
2666 ua_sess
= alloc_ust_app_session();
2667 if (ua_sess
== NULL
) {
2668 /* Only malloc can failed so something is really wrong */
2672 shadow_copy_session(ua_sess
, usess
, app
);
2676 switch (usess
->buffer_type
) {
2677 case LTTNG_BUFFER_PER_PID
:
2678 /* Init local registry. */
2679 ret
= setup_buffer_reg_pid(ua_sess
, app
, NULL
);
2681 delete_ust_app_session(-1, ua_sess
, app
);
2685 case LTTNG_BUFFER_PER_UID
:
2686 /* Look for a global registry. If none exists, create one. */
2687 ret
= setup_buffer_reg_uid(usess
, ua_sess
, app
, NULL
);
2689 delete_ust_app_session(-1, ua_sess
, app
);
2699 health_code_update();
2701 if (ua_sess
->handle
== -1) {
2702 pthread_mutex_lock(&app
->sock_lock
);
2703 ret
= lttng_ust_ctl_create_session(app
->sock
);
2704 pthread_mutex_unlock(&app
->sock_lock
);
2706 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
2707 DBG("UST app creating session failed. Application is dead: pid = %d, sock = %d",
2708 app
->pid
, app
->sock
);
2710 } else if (ret
== -EAGAIN
) {
2711 DBG("UST app creating session failed. Communication time out: pid = %d, sock = %d",
2712 app
->pid
, app
->sock
);
2715 ERR("UST app creating session failed with ret %d: pid = %d, sock =%d",
2716 ret
, app
->pid
, app
->sock
);
2718 delete_ust_app_session(-1, ua_sess
, app
);
2719 if (ret
!= -ENOMEM
) {
2721 * Tracer is probably gone or got an internal error so let's
2722 * behave like it will soon unregister or not usable.
2729 ua_sess
->handle
= ret
;
2731 /* Add ust app session to app's HT */
2732 lttng_ht_node_init_u64(&ua_sess
->node
,
2733 ua_sess
->tracing_id
);
2734 lttng_ht_add_unique_u64(app
->sessions
, &ua_sess
->node
);
2735 lttng_ht_node_init_ulong(&ua_sess
->ust_objd_node
, ua_sess
->handle
);
2736 lttng_ht_add_unique_ulong(app
->ust_sessions_objd
,
2737 &ua_sess
->ust_objd_node
);
2739 DBG2("UST app session created successfully with handle %d", ret
);
2742 *ua_sess_ptr
= ua_sess
;
2744 *is_created
= created
;
2747 /* Everything went well. */
2751 health_code_update();
2756 * Match function for a hash table lookup of ust_app_ctx.
2758 * It matches an ust app context based on the context type and, in the case
2759 * of perf counters, their name.
2761 static int ht_match_ust_app_ctx(struct cds_lfht_node
*node
, const void *_key
)
2763 struct ust_app_ctx
*ctx
;
2764 const struct lttng_ust_context_attr
*key
;
2769 ctx
= caa_container_of(node
, struct ust_app_ctx
, node
.node
);
2773 if (ctx
->ctx
.ctx
!= key
->ctx
) {
2778 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER
:
2779 if (strncmp(key
->u
.perf_counter
.name
,
2780 ctx
->ctx
.u
.perf_counter
.name
,
2781 sizeof(key
->u
.perf_counter
.name
))) {
2785 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT
:
2786 if (strcmp(key
->u
.app_ctx
.provider_name
,
2787 ctx
->ctx
.u
.app_ctx
.provider_name
) ||
2788 strcmp(key
->u
.app_ctx
.ctx_name
,
2789 ctx
->ctx
.u
.app_ctx
.ctx_name
)) {
2805 * Lookup for an ust app context from an lttng_ust_context.
2807 * Must be called while holding RCU read side lock.
2808 * Return an ust_app_ctx object or NULL on error.
2811 struct ust_app_ctx
*find_ust_app_context(struct lttng_ht
*ht
,
2812 struct lttng_ust_context_attr
*uctx
)
2814 struct lttng_ht_iter iter
;
2815 struct lttng_ht_node_ulong
*node
;
2816 struct ust_app_ctx
*app_ctx
= NULL
;
2821 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2822 cds_lfht_lookup(ht
->ht
, ht
->hash_fct((void *) uctx
->ctx
, lttng_ht_seed
),
2823 ht_match_ust_app_ctx
, uctx
, &iter
.iter
);
2824 node
= lttng_ht_iter_get_node_ulong(&iter
);
2829 app_ctx
= caa_container_of(node
, struct ust_app_ctx
, node
);
2836 * Create a context for the channel on the tracer.
2838 * Called with UST app session lock held and a RCU read side lock.
2841 int create_ust_app_channel_context(struct ust_app_channel
*ua_chan
,
2842 struct lttng_ust_context_attr
*uctx
,
2843 struct ust_app
*app
)
2846 struct ust_app_ctx
*ua_ctx
;
2848 DBG2("UST app adding context to channel %s", ua_chan
->name
);
2850 ua_ctx
= find_ust_app_context(ua_chan
->ctx
, uctx
);
2856 ua_ctx
= alloc_ust_app_ctx(uctx
);
2857 if (ua_ctx
== NULL
) {
2863 lttng_ht_node_init_ulong(&ua_ctx
->node
, (unsigned long) ua_ctx
->ctx
.ctx
);
2864 lttng_ht_add_ulong(ua_chan
->ctx
, &ua_ctx
->node
);
2865 cds_list_add_tail(&ua_ctx
->list
, &ua_chan
->ctx_list
);
2867 ret
= create_ust_channel_context(ua_chan
, ua_ctx
, app
);
2877 * Enable on the tracer side a ust app event for the session and channel.
2879 * Called with UST app session lock held.
2882 int enable_ust_app_event(struct ust_app_session
*ua_sess
,
2883 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2887 ret
= enable_ust_object(app
, ua_event
->obj
);
2892 ua_event
->enabled
= 1;
2899 * Disable on the tracer side a ust app event for the session and channel.
2901 static int disable_ust_app_event(struct ust_app_session
*ua_sess
,
2902 struct ust_app_event
*ua_event
, struct ust_app
*app
)
2906 ret
= disable_ust_object(app
, ua_event
->obj
);
2911 ua_event
->enabled
= 0;
2918 * Lookup ust app channel for session and disable it on the tracer side.
2921 int disable_ust_app_channel(struct ust_app_session
*ua_sess
,
2922 struct ust_app_channel
*ua_chan
, struct ust_app
*app
)
2926 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
2931 ua_chan
->enabled
= 0;
2938 * Lookup ust app channel for session and enable it on the tracer side. This
2939 * MUST be called with a RCU read side lock acquired.
2941 static int enable_ust_app_channel(struct ust_app_session
*ua_sess
,
2942 struct ltt_ust_channel
*uchan
, struct ust_app
*app
)
2945 struct lttng_ht_iter iter
;
2946 struct lttng_ht_node_str
*ua_chan_node
;
2947 struct ust_app_channel
*ua_chan
;
2949 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
2950 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
2951 if (ua_chan_node
== NULL
) {
2952 DBG2("Unable to find channel %s in ust session id %" PRIu64
,
2953 uchan
->name
, ua_sess
->tracing_id
);
2957 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
2959 ret
= enable_ust_channel(app
, ua_sess
, ua_chan
);
2969 * Ask the consumer to create a channel and get it if successful.
2971 * Called with UST app session lock held.
2973 * Return 0 on success or else a negative value.
2975 static int do_consumer_create_channel(struct ltt_ust_session
*usess
,
2976 struct ust_app_session
*ua_sess
, struct ust_app_channel
*ua_chan
,
2977 int bitness
, struct ust_registry_session
*registry
,
2978 uint64_t trace_archive_id
)
2981 unsigned int nb_fd
= 0;
2982 struct consumer_socket
*socket
;
2990 health_code_update();
2992 /* Get the right consumer socket for the application. */
2993 socket
= consumer_find_socket_by_bitness(bitness
, usess
->consumer
);
2999 health_code_update();
3001 /* Need one fd for the channel. */
3002 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3004 ERR("Exhausted number of available FD upon create channel");
3009 * Ask consumer to create channel. The consumer will return the number of
3010 * stream we have to expect.
3012 ret
= ust_consumer_ask_channel(ua_sess
, ua_chan
, usess
->consumer
, socket
,
3013 registry
, usess
->current_trace_chunk
);
3019 * Compute the number of fd needed before receiving them. It must be 2 per
3020 * stream (2 being the default value here).
3022 nb_fd
= DEFAULT_UST_STREAM_FD_NUM
* ua_chan
->expected_stream_count
;
3024 /* Reserve the amount of file descriptor we need. */
3025 ret
= lttng_fd_get(LTTNG_FD_APPS
, nb_fd
);
3027 ERR("Exhausted number of available FD upon create channel");
3028 goto error_fd_get_stream
;
3031 health_code_update();
3034 * Now get the channel from the consumer. This call will populate the stream
3035 * list of that channel and set the ust objects.
3037 if (usess
->consumer
->enabled
) {
3038 ret
= ust_consumer_get_channel(socket
, ua_chan
);
3048 lttng_fd_put(LTTNG_FD_APPS
, nb_fd
);
3049 error_fd_get_stream
:
3051 * Initiate a destroy channel on the consumer since we had an error
3052 * handling it on our side. The return value is of no importance since we
3053 * already have a ret value set by the previous error that we need to
3056 (void) ust_consumer_destroy_channel(socket
, ua_chan
);
3058 lttng_fd_put(LTTNG_FD_APPS
, 1);
3060 health_code_update();
3066 * Duplicate the ust data object of the ust app stream and save it in the
3067 * buffer registry stream.
3069 * Return 0 on success or else a negative value.
3071 static int duplicate_stream_object(struct buffer_reg_stream
*reg_stream
,
3072 struct ust_app_stream
*stream
)
3079 /* Duplicating a stream requires 2 new fds. Reserve them. */
3080 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3082 ERR("Exhausted number of available FD upon duplicate stream");
3086 /* Duplicate object for stream once the original is in the registry. */
3087 ret
= lttng_ust_ctl_duplicate_ust_object_data(&stream
->obj
,
3088 reg_stream
->obj
.ust
);
3090 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3091 reg_stream
->obj
.ust
, stream
->obj
, ret
);
3092 lttng_fd_put(LTTNG_FD_APPS
, 2);
3095 stream
->handle
= stream
->obj
->handle
;
3102 * Duplicate the ust data object of the ust app. channel and save it in the
3103 * buffer registry channel.
3105 * Return 0 on success or else a negative value.
3107 static int duplicate_channel_object(struct buffer_reg_channel
*buf_reg_chan
,
3108 struct ust_app_channel
*ua_chan
)
3112 assert(buf_reg_chan
);
3115 /* Duplicating a channel requires 1 new fd. Reserve it. */
3116 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3118 ERR("Exhausted number of available FD upon duplicate channel");
3122 /* Duplicate object for stream once the original is in the registry. */
3123 ret
= lttng_ust_ctl_duplicate_ust_object_data(&ua_chan
->obj
, buf_reg_chan
->obj
.ust
);
3125 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3126 buf_reg_chan
->obj
.ust
, ua_chan
->obj
, ret
);
3129 ua_chan
->handle
= ua_chan
->obj
->handle
;
3134 lttng_fd_put(LTTNG_FD_APPS
, 1);
3140 * For a given channel buffer registry, setup all streams of the given ust
3141 * application channel.
3143 * Return 0 on success or else a negative value.
3145 static int setup_buffer_reg_streams(struct buffer_reg_channel
*buf_reg_chan
,
3146 struct ust_app_channel
*ua_chan
,
3147 struct ust_app
*app
)
3150 struct ust_app_stream
*stream
, *stmp
;
3152 assert(buf_reg_chan
);
3155 DBG2("UST app setup buffer registry stream");
3157 /* Send all streams to application. */
3158 cds_list_for_each_entry_safe(stream
, stmp
, &ua_chan
->streams
.head
, list
) {
3159 struct buffer_reg_stream
*reg_stream
;
3161 ret
= buffer_reg_stream_create(®_stream
);
3167 * Keep original pointer and nullify it in the stream so the delete
3168 * stream call does not release the object.
3170 reg_stream
->obj
.ust
= stream
->obj
;
3172 buffer_reg_stream_add(reg_stream
, buf_reg_chan
);
3174 /* We don't need the streams anymore. */
3175 cds_list_del(&stream
->list
);
3176 delete_ust_app_stream(-1, stream
, app
);
3184 * Create a buffer registry channel for the given session registry and
3185 * application channel object. If regp pointer is valid, it's set with the
3186 * created object. Important, the created object is NOT added to the session
3187 * registry hash table.
3189 * Return 0 on success else a negative value.
3191 static int create_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3192 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
**regp
)
3195 struct buffer_reg_channel
*buf_reg_chan
= NULL
;
3200 DBG2("UST app creating buffer registry channel for %s", ua_chan
->name
);
3202 /* Create buffer registry channel. */
3203 ret
= buffer_reg_channel_create(ua_chan
->tracing_channel_id
, &buf_reg_chan
);
3207 assert(buf_reg_chan
);
3208 buf_reg_chan
->consumer_key
= ua_chan
->key
;
3209 buf_reg_chan
->subbuf_size
= ua_chan
->attr
.subbuf_size
;
3210 buf_reg_chan
->num_subbuf
= ua_chan
->attr
.num_subbuf
;
3212 /* Create and add a channel registry to session. */
3213 ret
= ust_registry_channel_add(reg_sess
->reg
.ust
,
3214 ua_chan
->tracing_channel_id
);
3218 buffer_reg_channel_add(reg_sess
, buf_reg_chan
);
3221 *regp
= buf_reg_chan
;
3227 /* Safe because the registry channel object was not added to any HT. */
3228 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3234 * Setup buffer registry channel for the given session registry and application
3235 * channel object. If regp pointer is valid, it's set with the created object.
3237 * Return 0 on success else a negative value.
3239 static int setup_buffer_reg_channel(struct buffer_reg_session
*reg_sess
,
3240 struct ust_app_channel
*ua_chan
, struct buffer_reg_channel
*buf_reg_chan
,
3241 struct ust_app
*app
)
3246 assert(buf_reg_chan
);
3248 assert(ua_chan
->obj
);
3250 DBG2("UST app setup buffer registry channel for %s", ua_chan
->name
);
3252 /* Setup all streams for the registry. */
3253 ret
= setup_buffer_reg_streams(buf_reg_chan
, ua_chan
, app
);
3258 buf_reg_chan
->obj
.ust
= ua_chan
->obj
;
3259 ua_chan
->obj
= NULL
;
3264 buffer_reg_channel_remove(reg_sess
, buf_reg_chan
);
3265 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3270 * Send buffer registry channel to the application.
3272 * Return 0 on success else a negative value.
3274 static int send_channel_uid_to_ust(struct buffer_reg_channel
*buf_reg_chan
,
3275 struct ust_app
*app
, struct ust_app_session
*ua_sess
,
3276 struct ust_app_channel
*ua_chan
)
3279 struct buffer_reg_stream
*reg_stream
;
3281 assert(buf_reg_chan
);
3286 DBG("UST app sending buffer registry channel to ust sock %d", app
->sock
);
3288 ret
= duplicate_channel_object(buf_reg_chan
, ua_chan
);
3293 /* Send channel to the application. */
3294 ret
= ust_consumer_send_channel_to_ust(app
, ua_sess
, ua_chan
);
3295 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3296 ret
= -ENOTCONN
; /* Caused by app exiting. */
3298 } else if (ret
== -EAGAIN
) {
3299 /* Caused by timeout. */
3300 WARN("Communication with application %d timed out on send_channel for channel \"%s\" of session \"%" PRIu64
"\".",
3301 app
->pid
, ua_chan
->name
, ua_sess
->tracing_id
);
3302 /* Treat this the same way as an application that is exiting. */
3305 } else if (ret
< 0) {
3309 health_code_update();
3311 /* Send all streams to application. */
3312 pthread_mutex_lock(&buf_reg_chan
->stream_list_lock
);
3313 cds_list_for_each_entry(reg_stream
, &buf_reg_chan
->streams
, lnode
) {
3314 struct ust_app_stream stream
= {};
3316 ret
= duplicate_stream_object(reg_stream
, &stream
);
3318 goto error_stream_unlock
;
3321 ret
= ust_consumer_send_stream_to_ust(app
, ua_chan
, &stream
);
3323 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
3324 ret
= -ENOTCONN
; /* Caused by app exiting. */
3325 } else if (ret
== -EAGAIN
) {
3327 * Caused by timeout.
3328 * Treat this the same way as an application
3331 WARN("Communication with application %d timed out on send_stream for stream of channel \"%s\" of session \"%" PRIu64
"\".",
3334 ua_sess
->tracing_id
);
3337 (void) release_ust_app_stream(-1, &stream
, app
);
3338 goto error_stream_unlock
;
3342 * The return value is not important here. This function will output an
3345 (void) release_ust_app_stream(-1, &stream
, app
);
3347 ua_chan
->is_sent
= 1;
3349 error_stream_unlock
:
3350 pthread_mutex_unlock(&buf_reg_chan
->stream_list_lock
);
3356 * Create and send to the application the created buffers with per UID buffers.
3358 * This MUST be called with a RCU read side lock acquired.
3359 * The session list lock and the session's lock must be acquired.
3361 * Return 0 on success else a negative value.
3363 static int create_channel_per_uid(struct ust_app
*app
,
3364 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3365 struct ust_app_channel
*ua_chan
)
3368 struct buffer_reg_uid
*reg_uid
;
3369 struct buffer_reg_channel
*buf_reg_chan
;
3370 struct ltt_session
*session
= NULL
;
3371 enum lttng_error_code notification_ret
;
3372 struct ust_registry_channel
*ust_reg_chan
;
3379 DBG("UST app creating channel %s with per UID buffers", ua_chan
->name
);
3381 reg_uid
= buffer_reg_uid_find(usess
->id
, app
->bits_per_long
, app
->uid
);
3383 * The session creation handles the creation of this global registry
3384 * object. If none can be find, there is a code flow problem or a
3389 buf_reg_chan
= buffer_reg_channel_find(ua_chan
->tracing_channel_id
,
3395 /* Create the buffer registry channel object. */
3396 ret
= create_buffer_reg_channel(reg_uid
->registry
, ua_chan
, &buf_reg_chan
);
3398 ERR("Error creating the UST channel \"%s\" registry instance",
3403 session
= session_find_by_id(ua_sess
->tracing_id
);
3405 assert(pthread_mutex_trylock(&session
->lock
));
3406 assert(session_trylock_list());
3409 * Create the buffers on the consumer side. This call populates the
3410 * ust app channel object with all streams and data object.
3412 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3413 app
->bits_per_long
, reg_uid
->registry
->reg
.ust
,
3414 session
->most_recent_chunk_id
.value
);
3416 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3420 * Let's remove the previously created buffer registry channel so
3421 * it's not visible anymore in the session registry.
3423 ust_registry_channel_del_free(reg_uid
->registry
->reg
.ust
,
3424 ua_chan
->tracing_channel_id
, false);
3425 buffer_reg_channel_remove(reg_uid
->registry
, buf_reg_chan
);
3426 buffer_reg_channel_destroy(buf_reg_chan
, LTTNG_DOMAIN_UST
);
3431 * Setup the streams and add it to the session registry.
3433 ret
= setup_buffer_reg_channel(reg_uid
->registry
,
3434 ua_chan
, buf_reg_chan
, app
);
3436 ERR("Error setting up UST channel \"%s\"", ua_chan
->name
);
3440 /* Notify the notification subsystem of the channel's creation. */
3441 pthread_mutex_lock(®_uid
->registry
->reg
.ust
->lock
);
3442 ust_reg_chan
= ust_registry_channel_find(reg_uid
->registry
->reg
.ust
,
3443 ua_chan
->tracing_channel_id
);
3444 assert(ust_reg_chan
);
3445 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3446 ust_reg_chan
= NULL
;
3447 pthread_mutex_unlock(®_uid
->registry
->reg
.ust
->lock
);
3449 notification_ret
= notification_thread_command_add_channel(
3450 the_notification_thread_handle
, session
->name
,
3451 lttng_credentials_get_uid(
3452 &ua_sess
->effective_credentials
),
3453 lttng_credentials_get_gid(
3454 &ua_sess
->effective_credentials
),
3455 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3456 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3457 if (notification_ret
!= LTTNG_OK
) {
3458 ret
= - (int) notification_ret
;
3459 ERR("Failed to add channel to notification thread");
3464 /* Send buffers to the application. */
3465 ret
= send_channel_uid_to_ust(buf_reg_chan
, app
, ua_sess
, ua_chan
);
3467 if (ret
!= -ENOTCONN
) {
3468 ERR("Error sending channel to application");
3475 session_put(session
);
3481 * Create and send to the application the created buffers with per PID buffers.
3483 * Called with UST app session lock held.
3484 * The session list lock and the session's lock must be acquired.
3486 * Return 0 on success else a negative value.
3488 static int create_channel_per_pid(struct ust_app
*app
,
3489 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3490 struct ust_app_channel
*ua_chan
)
3493 struct ust_registry_session
*registry
;
3494 enum lttng_error_code cmd_ret
;
3495 struct ltt_session
*session
= NULL
;
3496 uint64_t chan_reg_key
;
3497 struct ust_registry_channel
*ust_reg_chan
;
3504 DBG("UST app creating channel %s with per PID buffers", ua_chan
->name
);
3508 registry
= get_session_registry(ua_sess
);
3509 /* The UST app session lock is held, registry shall not be null. */
3512 /* Create and add a new channel registry to session. */
3513 ret
= ust_registry_channel_add(registry
, ua_chan
->key
);
3515 ERR("Error creating the UST channel \"%s\" registry instance",
3520 session
= session_find_by_id(ua_sess
->tracing_id
);
3523 assert(pthread_mutex_trylock(&session
->lock
));
3524 assert(session_trylock_list());
3526 /* Create and get channel on the consumer side. */
3527 ret
= do_consumer_create_channel(usess
, ua_sess
, ua_chan
,
3528 app
->bits_per_long
, registry
,
3529 session
->most_recent_chunk_id
.value
);
3531 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3533 goto error_remove_from_registry
;
3536 ret
= send_channel_pid_to_ust(app
, ua_sess
, ua_chan
);
3538 if (ret
!= -ENOTCONN
) {
3539 ERR("Error sending channel to application");
3541 goto error_remove_from_registry
;
3544 chan_reg_key
= ua_chan
->key
;
3545 pthread_mutex_lock(®istry
->lock
);
3546 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
3547 assert(ust_reg_chan
);
3548 ust_reg_chan
->consumer_key
= ua_chan
->key
;
3549 pthread_mutex_unlock(®istry
->lock
);
3551 cmd_ret
= notification_thread_command_add_channel(
3552 the_notification_thread_handle
, session
->name
,
3553 lttng_credentials_get_uid(
3554 &ua_sess
->effective_credentials
),
3555 lttng_credentials_get_gid(
3556 &ua_sess
->effective_credentials
),
3557 ua_chan
->name
, ua_chan
->key
, LTTNG_DOMAIN_UST
,
3558 ua_chan
->attr
.subbuf_size
* ua_chan
->attr
.num_subbuf
);
3559 if (cmd_ret
!= LTTNG_OK
) {
3560 ret
= - (int) cmd_ret
;
3561 ERR("Failed to add channel to notification thread");
3562 goto error_remove_from_registry
;
3565 error_remove_from_registry
:
3567 ust_registry_channel_del_free(registry
, ua_chan
->key
, false);
3572 session_put(session
);
3578 * From an already allocated ust app channel, create the channel buffers if
3579 * needed and send them to the application. This MUST be called with a RCU read
3580 * side lock acquired.
3582 * Called with UST app session lock held.
3584 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3585 * the application exited concurrently.
3587 static int ust_app_channel_send(struct ust_app
*app
,
3588 struct ltt_ust_session
*usess
, struct ust_app_session
*ua_sess
,
3589 struct ust_app_channel
*ua_chan
)
3595 assert(usess
->active
);
3599 /* Handle buffer type before sending the channel to the application. */
3600 switch (usess
->buffer_type
) {
3601 case LTTNG_BUFFER_PER_UID
:
3603 ret
= create_channel_per_uid(app
, usess
, ua_sess
, ua_chan
);
3609 case LTTNG_BUFFER_PER_PID
:
3611 ret
= create_channel_per_pid(app
, usess
, ua_sess
, ua_chan
);
3623 /* Initialize ust objd object using the received handle and add it. */
3624 lttng_ht_node_init_ulong(&ua_chan
->ust_objd_node
, ua_chan
->handle
);
3625 lttng_ht_add_unique_ulong(app
->ust_objd
, &ua_chan
->ust_objd_node
);
3627 /* If channel is not enabled, disable it on the tracer */
3628 if (!ua_chan
->enabled
) {
3629 ret
= disable_ust_channel(app
, ua_sess
, ua_chan
);
3640 * Create UST app channel and return it through ua_chanp if not NULL.
3642 * Called with UST app session lock and RCU read-side lock held.
3644 * Return 0 on success or else a negative value.
3646 static int ust_app_channel_allocate(struct ust_app_session
*ua_sess
,
3647 struct ltt_ust_channel
*uchan
,
3648 enum lttng_ust_abi_chan_type type
, struct ltt_ust_session
*usess
,
3649 struct ust_app_channel
**ua_chanp
)
3652 struct lttng_ht_iter iter
;
3653 struct lttng_ht_node_str
*ua_chan_node
;
3654 struct ust_app_channel
*ua_chan
;
3656 /* Lookup channel in the ust app session */
3657 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &iter
);
3658 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
3659 if (ua_chan_node
!= NULL
) {
3660 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
3664 ua_chan
= alloc_ust_app_channel(uchan
->name
, ua_sess
, &uchan
->attr
);
3665 if (ua_chan
== NULL
) {
3666 /* Only malloc can fail here */
3670 shadow_copy_channel(ua_chan
, uchan
);
3672 /* Set channel type. */
3673 ua_chan
->attr
.type
= type
;
3675 /* Only add the channel if successful on the tracer side. */
3676 lttng_ht_add_unique_str(ua_sess
->channels
, &ua_chan
->node
);
3679 *ua_chanp
= ua_chan
;
3682 /* Everything went well. */
3690 * Create UST app event and create it on the tracer side.
3692 * Must be called with the RCU read side lock held.
3693 * Called with ust app session mutex held.
3696 int create_ust_app_event(struct ust_app_session
*ua_sess
,
3697 struct ust_app_channel
*ua_chan
, struct ltt_ust_event
*uevent
,
3698 struct ust_app
*app
)
3701 struct ust_app_event
*ua_event
;
3703 ua_event
= alloc_ust_app_event(uevent
->attr
.name
, &uevent
->attr
);
3704 if (ua_event
== NULL
) {
3705 /* Only failure mode of alloc_ust_app_event(). */
3709 shadow_copy_event(ua_event
, uevent
);
3711 /* Create it on the tracer side */
3712 ret
= create_ust_event(app
, ua_sess
, ua_chan
, ua_event
);
3715 * Not found previously means that it does not exist on the
3716 * tracer. If the application reports that the event existed,
3717 * it means there is a bug in the sessiond or lttng-ust
3718 * (or corruption, etc.)
3720 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3721 ERR("Tracer for application reported that an event being created already existed: "
3722 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3724 app
->pid
, app
->ppid
, app
->uid
,
3730 add_unique_ust_app_event(ua_chan
, ua_event
);
3732 DBG2("UST app create event completed: app = '%s' pid = %d",
3733 app
->name
, app
->pid
);
3739 /* Valid. Calling here is already in a read side lock */
3740 delete_ust_app_event(-1, ua_event
, app
);
3745 * Create UST app event notifier rule and create it on the tracer side.
3747 * Must be called with the RCU read side lock held.
3748 * Called with ust app session mutex held.
3751 int create_ust_app_event_notifier_rule(struct lttng_trigger
*trigger
,
3752 struct ust_app
*app
)
3755 struct ust_app_event_notifier_rule
*ua_event_notifier_rule
;
3757 ua_event_notifier_rule
= alloc_ust_app_event_notifier_rule(trigger
);
3758 if (ua_event_notifier_rule
== NULL
) {
3763 /* Create it on the tracer side. */
3764 ret
= create_ust_event_notifier(app
, ua_event_notifier_rule
);
3767 * Not found previously means that it does not exist on the
3768 * tracer. If the application reports that the event existed,
3769 * it means there is a bug in the sessiond or lttng-ust
3770 * (or corruption, etc.)
3772 if (ret
== -LTTNG_UST_ERR_EXIST
) {
3773 ERR("Tracer for application reported that an event notifier being created already exists: "
3774 "token = \"%" PRIu64
"\", pid = %d, ppid = %d, uid = %d, gid = %d",
3775 lttng_trigger_get_tracer_token(trigger
),
3776 app
->pid
, app
->ppid
, app
->uid
,
3782 lttng_ht_add_unique_u64(app
->token_to_event_notifier_rule_ht
,
3783 &ua_event_notifier_rule
->node
);
3785 DBG2("UST app create token event rule completed: app = '%s', pid = %d), token = %" PRIu64
,
3786 app
->name
, app
->pid
, lttng_trigger_get_tracer_token(trigger
));
3791 /* The RCU read side lock is already being held by the caller. */
3792 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule
, app
);
3798 * Create UST metadata and open it on the tracer side.
3800 * Called with UST app session lock held and RCU read side lock.
3802 static int create_ust_app_metadata(struct ust_app_session
*ua_sess
,
3803 struct ust_app
*app
, struct consumer_output
*consumer
)
3806 struct ust_app_channel
*metadata
;
3807 struct consumer_socket
*socket
;
3808 struct ust_registry_session
*registry
;
3809 struct ltt_session
*session
= NULL
;
3815 registry
= get_session_registry(ua_sess
);
3816 /* The UST app session is held registry shall not be null. */
3819 pthread_mutex_lock(®istry
->lock
);
3821 /* Metadata already exists for this registry or it was closed previously */
3822 if (registry
->metadata_key
|| registry
->metadata_closed
) {
3827 /* Allocate UST metadata */
3828 metadata
= alloc_ust_app_channel(DEFAULT_METADATA_NAME
, ua_sess
, NULL
);
3830 /* malloc() failed */
3835 memcpy(&metadata
->attr
, &ua_sess
->metadata_attr
, sizeof(metadata
->attr
));
3837 /* Need one fd for the channel. */
3838 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
3840 ERR("Exhausted number of available FD upon create metadata");
3844 /* Get the right consumer socket for the application. */
3845 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
, consumer
);
3848 goto error_consumer
;
3852 * Keep metadata key so we can identify it on the consumer side. Assign it
3853 * to the registry *before* we ask the consumer so we avoid the race of the
3854 * consumer requesting the metadata and the ask_channel call on our side
3855 * did not returned yet.
3857 registry
->metadata_key
= metadata
->key
;
3859 session
= session_find_by_id(ua_sess
->tracing_id
);
3862 assert(pthread_mutex_trylock(&session
->lock
));
3863 assert(session_trylock_list());
3866 * Ask the metadata channel creation to the consumer. The metadata object
3867 * will be created by the consumer and kept their. However, the stream is
3868 * never added or monitored until we do a first push metadata to the
3871 ret
= ust_consumer_ask_channel(ua_sess
, metadata
, consumer
, socket
,
3872 registry
, session
->current_trace_chunk
);
3874 /* Nullify the metadata key so we don't try to close it later on. */
3875 registry
->metadata_key
= 0;
3876 goto error_consumer
;
3880 * The setup command will make the metadata stream be sent to the relayd,
3881 * if applicable, and the thread managing the metadatas. This is important
3882 * because after this point, if an error occurs, the only way the stream
3883 * can be deleted is to be monitored in the consumer.
3885 ret
= consumer_setup_metadata(socket
, metadata
->key
);
3887 /* Nullify the metadata key so we don't try to close it later on. */
3888 registry
->metadata_key
= 0;
3889 goto error_consumer
;
3892 DBG2("UST metadata with key %" PRIu64
" created for app pid %d",
3893 metadata
->key
, app
->pid
);
3896 lttng_fd_put(LTTNG_FD_APPS
, 1);
3897 delete_ust_app_channel(-1, metadata
, app
);
3899 pthread_mutex_unlock(®istry
->lock
);
3901 session_put(session
);
3907 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3908 * acquired before calling this function.
3910 struct ust_app
*ust_app_find_by_pid(pid_t pid
)
3912 struct ust_app
*app
= NULL
;
3913 struct lttng_ht_node_ulong
*node
;
3914 struct lttng_ht_iter iter
;
3916 lttng_ht_lookup(ust_app_ht
, (void *)((unsigned long) pid
), &iter
);
3917 node
= lttng_ht_iter_get_node_ulong(&iter
);
3919 DBG2("UST app no found with pid %d", pid
);
3923 DBG2("Found UST app by pid %d", pid
);
3925 app
= caa_container_of(node
, struct ust_app
, pid_n
);
3932 * Allocate and init an UST app object using the registration information and
3933 * the command socket. This is called when the command socket connects to the
3936 * The object is returned on success or else NULL.
3938 struct ust_app
*ust_app_create(struct ust_register_msg
*msg
, int sock
)
3941 struct ust_app
*lta
= NULL
;
3942 struct lttng_pipe
*event_notifier_event_source_pipe
= NULL
;
3947 DBG3("UST app creating application for socket %d", sock
);
3949 if ((msg
->bits_per_long
== 64 &&
3950 (uatomic_read(&the_ust_consumerd64_fd
) ==
3952 (msg
->bits_per_long
== 32 &&
3953 (uatomic_read(&the_ust_consumerd32_fd
) ==
3955 ERR("Registration failed: application \"%s\" (pid: %d) has "
3956 "%d-bit long, but no consumerd for this size is available.\n",
3957 msg
->name
, msg
->pid
, msg
->bits_per_long
);
3962 * Reserve the two file descriptors of the event source pipe. The write
3963 * end will be closed once it is passed to the application, at which
3964 * point a single 'put' will be performed.
3966 ret
= lttng_fd_get(LTTNG_FD_APPS
, 2);
3968 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s', pid = %d",
3969 msg
->name
, (int) msg
->pid
);
3973 event_notifier_event_source_pipe
= lttng_pipe_open(FD_CLOEXEC
);
3974 if (!event_notifier_event_source_pipe
) {
3975 PERROR("Failed to open application event source pipe: '%s' (pid = %d)",
3976 msg
->name
, msg
->pid
);
3980 lta
= zmalloc(sizeof(struct ust_app
));
3983 goto error_free_pipe
;
3986 urcu_ref_init(<a
->ref
);
3988 lta
->event_notifier_group
.event_pipe
= event_notifier_event_source_pipe
;
3990 lta
->ppid
= msg
->ppid
;
3991 lta
->uid
= msg
->uid
;
3992 lta
->gid
= msg
->gid
;
3994 lta
->bits_per_long
= msg
->bits_per_long
;
3995 lta
->uint8_t_alignment
= msg
->uint8_t_alignment
;
3996 lta
->uint16_t_alignment
= msg
->uint16_t_alignment
;
3997 lta
->uint32_t_alignment
= msg
->uint32_t_alignment
;
3998 lta
->uint64_t_alignment
= msg
->uint64_t_alignment
;
3999 lta
->long_alignment
= msg
->long_alignment
;
4000 lta
->byte_order
= msg
->byte_order
;
4002 lta
->v_major
= msg
->major
;
4003 lta
->v_minor
= msg
->minor
;
4004 lta
->sessions
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
4005 lta
->ust_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4006 lta
->ust_sessions_objd
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4007 lta
->notify_sock
= -1;
4008 lta
->token_to_event_notifier_rule_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
4010 /* Copy name and make sure it's NULL terminated. */
4011 strncpy(lta
->name
, msg
->name
, sizeof(lta
->name
));
4012 lta
->name
[UST_APP_PROCNAME_LEN
] = '\0';
4015 * Before this can be called, when receiving the registration information,
4016 * the application compatibility is checked. So, at this point, the
4017 * application can work with this session daemon.
4019 lta
->compatible
= 1;
4021 lta
->pid
= msg
->pid
;
4022 lttng_ht_node_init_ulong(<a
->pid_n
, (unsigned long) lta
->pid
);
4024 pthread_mutex_init(<a
->sock_lock
, NULL
);
4025 lttng_ht_node_init_ulong(<a
->sock_n
, (unsigned long) lta
->sock
);
4027 CDS_INIT_LIST_HEAD(<a
->teardown_head
);
4031 lttng_pipe_destroy(event_notifier_event_source_pipe
);
4032 lttng_fd_put(LTTNG_FD_APPS
, 2);
4038 * For a given application object, add it to every hash table.
4040 void ust_app_add(struct ust_app
*app
)
4043 assert(app
->notify_sock
>= 0);
4045 app
->registration_time
= time(NULL
);
4050 * On a re-registration, we want to kick out the previous registration of
4053 lttng_ht_add_replace_ulong(ust_app_ht
, &app
->pid_n
);
4056 * The socket _should_ be unique until _we_ call close. So, a add_unique
4057 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
4058 * already in the table.
4060 lttng_ht_add_unique_ulong(ust_app_ht_by_sock
, &app
->sock_n
);
4062 /* Add application to the notify socket hash table. */
4063 lttng_ht_node_init_ulong(&app
->notify_sock_n
, app
->notify_sock
);
4064 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock
, &app
->notify_sock_n
);
4066 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock =%d name:%s "
4067 "notify_sock =%d (version %d.%d)", app
->pid
, app
->ppid
, app
->uid
,
4068 app
->gid
, app
->sock
, app
->name
, app
->notify_sock
, app
->v_major
,
4075 * Set the application version into the object.
4077 * Return 0 on success else a negative value either an errno code or a
4078 * LTTng-UST error code.
4080 int ust_app_version(struct ust_app
*app
)
4086 pthread_mutex_lock(&app
->sock_lock
);
4087 ret
= lttng_ust_ctl_tracer_version(app
->sock
, &app
->version
);
4088 pthread_mutex_unlock(&app
->sock_lock
);
4090 if (ret
== -LTTNG_UST_ERR_EXITING
|| ret
== -EPIPE
) {
4091 DBG3("UST app version failed. Application is dead: pid = %d, sock = %d",
4092 app
->pid
, app
->sock
);
4093 } else if (ret
== -EAGAIN
) {
4094 WARN("UST app version failed. Communication time out: pid = %d, sock = %d",
4095 app
->pid
, app
->sock
);
4097 ERR("UST app version failed with ret %d: pid = %d, sock = %d",
4098 ret
, app
->pid
, app
->sock
);
4105 bool ust_app_supports_notifiers(const struct ust_app
*app
)
4107 return app
->v_major
>= 9;
4110 bool ust_app_supports_counters(const struct ust_app
*app
)
4112 return app
->v_major
>= 9;
4116 * Setup the base event notifier group.
4118 * Return 0 on success else a negative value either an errno code or a
4119 * LTTng-UST error code.
4121 int ust_app_setup_event_notifier_group(struct ust_app
*app
)
4124 int event_pipe_write_fd
;
4125 struct lttng_ust_abi_object_data
*event_notifier_group
= NULL
;
4126 enum lttng_error_code lttng_ret
;
4127 enum event_notifier_error_accounting_status event_notifier_error_accounting_status
;
4131 if (!ust_app_supports_notifiers(app
)) {
4136 /* Get the write side of the pipe. */
4137 event_pipe_write_fd
= lttng_pipe_get_writefd(
4138 app
->event_notifier_group
.event_pipe
);
4140 pthread_mutex_lock(&app
->sock_lock
);
4141 ret
= lttng_ust_ctl_create_event_notifier_group(app
->sock
,
4142 event_pipe_write_fd
, &event_notifier_group
);
4143 pthread_mutex_unlock(&app
->sock_lock
);
4145 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
4147 DBG3("UST app create event notifier group failed. Application is dead: pid = %d, sock = %d",
4148 app
->pid
, app
->sock
);
4149 } else if (ret
== -EAGAIN
) {
4151 WARN("UST app create event notifier group failed. Communication time out: pid = %d, sock = %d",
4152 app
->pid
, app
->sock
);
4154 ERR("UST app create event notifier group failed with ret %d: pid = %d, sock = %d, event_pipe_write_fd: %d",
4155 ret
, app
->pid
, app
->sock
, event_pipe_write_fd
);
4160 ret
= lttng_pipe_write_close(app
->event_notifier_group
.event_pipe
);
4162 ERR("Failed to close write end of the application's event source pipe: app = '%s' (pid = %d)",
4163 app
->name
, app
->pid
);
4168 * Release the file descriptor that was reserved for the write-end of
4171 lttng_fd_put(LTTNG_FD_APPS
, 1);
4173 lttng_ret
= notification_thread_command_add_tracer_event_source(
4174 the_notification_thread_handle
,
4175 lttng_pipe_get_readfd(
4176 app
->event_notifier_group
.event_pipe
),
4178 if (lttng_ret
!= LTTNG_OK
) {
4179 ERR("Failed to add tracer event source to notification thread");
4184 /* Assign handle only when the complete setup is valid. */
4185 app
->event_notifier_group
.object
= event_notifier_group
;
4187 event_notifier_error_accounting_status
=
4188 event_notifier_error_accounting_register_app(app
);
4189 switch (event_notifier_error_accounting_status
) {
4190 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK
:
4192 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED
:
4193 DBG3("Failed to setup event notifier error accounting (application does not support notifier error accounting): app socket fd = %d, app name = '%s', app pid = %d",
4194 app
->sock
, app
->name
, (int) app
->pid
);
4196 goto error_accounting
;
4197 case EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD
:
4198 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d, app name = '%s', app pid = %d",
4199 app
->sock
, app
->name
, (int) app
->pid
);
4201 goto error_accounting
;
4203 ERR("Failed to setup event notifier error accounting for app");
4205 goto error_accounting
;
4211 lttng_ret
= notification_thread_command_remove_tracer_event_source(
4212 the_notification_thread_handle
,
4213 lttng_pipe_get_readfd(
4214 app
->event_notifier_group
.event_pipe
));
4215 if (lttng_ret
!= LTTNG_OK
) {
4216 ERR("Failed to remove application tracer event source from notification thread");
4220 lttng_ust_ctl_release_object(app
->sock
, app
->event_notifier_group
.object
);
4221 free(app
->event_notifier_group
.object
);
4222 app
->event_notifier_group
.object
= NULL
;
4226 static void ust_app_unregister(struct ust_app
*app
)
4229 struct lttng_ht_iter iter
;
4230 struct ust_app_session
*ua_sess
;
4235 * For per-PID buffers, perform "push metadata" and flush all
4236 * application streams before removing app from hash tables,
4237 * ensuring proper behavior of data_pending check.
4238 * Remove sessions so they are not visible during deletion.
4240 cds_lfht_for_each_entry(app
->sessions
->ht
, &iter
.iter
, ua_sess
,
4242 struct ust_registry_session
*registry
;
4244 ret
= lttng_ht_del(app
->sessions
, &iter
);
4246 /* The session was already removed so scheduled for teardown. */
4250 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_PID
) {
4251 (void) ust_app_flush_app_session(app
, ua_sess
);
4255 * Add session to list for teardown. This is safe since at this point we
4256 * are the only one using this list.
4258 pthread_mutex_lock(&ua_sess
->lock
);
4260 if (ua_sess
->deleted
) {
4261 pthread_mutex_unlock(&ua_sess
->lock
);
4266 * Normally, this is done in the delete session process which is
4267 * executed in the call rcu below. However, upon registration we can't
4268 * afford to wait for the grace period before pushing data or else the
4269 * data pending feature can race between the unregistration and stop
4270 * command where the data pending command is sent *before* the grace
4273 * The close metadata below nullifies the metadata pointer in the
4274 * session so the delete session will NOT push/close a second time.
4276 registry
= get_session_registry(ua_sess
);
4278 /* Push metadata for application before freeing the application. */
4279 (void) push_metadata(registry
, ua_sess
->consumer
);
4282 * Don't ask to close metadata for global per UID buffers. Close
4283 * metadata only on destroy trace session in this case. Also, the
4284 * previous push metadata could have flag the metadata registry to
4285 * close so don't send a close command if closed.
4287 if (ua_sess
->buffer_type
!= LTTNG_BUFFER_PER_UID
) {
4288 /* And ask to close it for this session registry. */
4289 (void) close_metadata(registry
, ua_sess
->consumer
);
4293 cds_list_add(&ua_sess
->teardown_node
, &app
->teardown_head
);
4294 pthread_mutex_unlock(&ua_sess
->lock
);
4298 * Remove application from notify hash table. The thread handling the
4299 * notify socket could have deleted the node so ignore on error because
4300 * either way it's valid. The close of that socket is handled by the
4301 * apps_notify_thread.
4303 iter
.iter
.node
= &app
->notify_sock_n
.node
;
4304 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
4306 iter
.iter
.node
= &app
->pid_n
.node
;
4307 ret
= lttng_ht_del(ust_app_ht
, &iter
);
4309 WARN("Unregister app by PID %d failed", app
->pid
);
4316 * Unregister app by removing it from the global traceable app list and freeing
4319 * The socket is already closed at this point, so there is no need to close it.
4321 void ust_app_unregister_by_socket(int sock
)
4323 struct ust_app
*app
;
4324 struct lttng_ht_node_ulong
*node
;
4325 struct lttng_ht_iter ust_app_sock_iter
;
4330 /* Get the node reference for a call_rcu */
4331 lttng_ht_lookup(ust_app_ht_by_sock
, (void *)((unsigned long) sock
), &ust_app_sock_iter
);
4332 node
= lttng_ht_iter_get_node_ulong(&ust_app_sock_iter
);
4335 app
= caa_container_of(node
, struct ust_app
, sock_n
);
4337 DBG("PID %d unregistering with sock %d", app
->pid
, sock
);
4339 /* Remove application from socket hash table */
4340 ret
= lttng_ht_del(ust_app_ht_by_sock
, &ust_app_sock_iter
);
4344 * The socket is closed: release its reference to the application
4345 * to trigger its eventual teardown.
4352 * Fill events array with all events name of all registered apps.
4354 int ust_app_list_events(struct lttng_event
**events
)
4357 size_t nbmem
, count
= 0;
4358 struct lttng_ht_iter iter
;
4359 struct ust_app
*app
;
4360 struct lttng_event
*tmp_event
;
4362 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4363 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event
));
4364 if (tmp_event
== NULL
) {
4365 PERROR("zmalloc ust app events");
4372 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4373 struct lttng_ust_abi_tracepoint_iter uiter
;
4375 health_code_update();
4377 if (!app
->compatible
) {
4379 * TODO: In time, we should notice the caller of this error by
4380 * telling him that this is a version error.
4384 pthread_mutex_lock(&app
->sock_lock
);
4385 handle
= lttng_ust_ctl_tracepoint_list(app
->sock
);
4387 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4388 ERR("UST app list events getting handle failed for app pid %d",
4391 pthread_mutex_unlock(&app
->sock_lock
);
4395 while ((ret
= lttng_ust_ctl_tracepoint_list_get(app
->sock
, handle
,
4396 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4397 /* Handle ustctl error. */
4401 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4402 ERR("UST app tp list get failed for app %d with ret %d",
4405 DBG3("UST app tp list get failed. Application is dead");
4409 release_ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4410 if (release_ret
< 0 &&
4411 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4412 release_ret
!= -EPIPE
) {
4413 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4415 pthread_mutex_unlock(&app
->sock_lock
);
4419 health_code_update();
4420 if (count
>= nbmem
) {
4421 /* In case the realloc fails, we free the memory */
4422 struct lttng_event
*new_tmp_event
;
4425 new_nbmem
= nbmem
<< 1;
4426 DBG2("Reallocating event list from %zu to %zu entries",
4428 new_tmp_event
= realloc(tmp_event
,
4429 new_nbmem
* sizeof(struct lttng_event
));
4430 if (new_tmp_event
== NULL
) {
4433 PERROR("realloc ust app events");
4436 release_ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4437 if (release_ret
< 0 &&
4438 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4439 release_ret
!= -EPIPE
) {
4440 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4442 pthread_mutex_unlock(&app
->sock_lock
);
4445 /* Zero the new memory */
4446 memset(new_tmp_event
+ nbmem
, 0,
4447 (new_nbmem
- nbmem
) * sizeof(struct lttng_event
));
4449 tmp_event
= new_tmp_event
;
4451 memcpy(tmp_event
[count
].name
, uiter
.name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4452 tmp_event
[count
].loglevel
= uiter
.loglevel
;
4453 tmp_event
[count
].type
= (enum lttng_event_type
) LTTNG_UST_ABI_TRACEPOINT
;
4454 tmp_event
[count
].pid
= app
->pid
;
4455 tmp_event
[count
].enabled
= -1;
4458 ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4459 pthread_mutex_unlock(&app
->sock_lock
);
4461 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
4462 DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
4463 app
->pid
, app
->sock
);
4464 } else if (ret
== -EAGAIN
) {
4465 WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
4466 app
->pid
, app
->sock
);
4468 ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
4469 ret
, app
->pid
, app
->sock
);
4475 *events
= tmp_event
;
4477 DBG2("UST app list events done (%zu events)", count
);
4482 health_code_update();
4487 * Fill events array with all events name of all registered apps.
4489 int ust_app_list_event_fields(struct lttng_event_field
**fields
)
4492 size_t nbmem
, count
= 0;
4493 struct lttng_ht_iter iter
;
4494 struct ust_app
*app
;
4495 struct lttng_event_field
*tmp_event
;
4497 nbmem
= UST_APP_EVENT_LIST_SIZE
;
4498 tmp_event
= zmalloc(nbmem
* sizeof(struct lttng_event_field
));
4499 if (tmp_event
== NULL
) {
4500 PERROR("zmalloc ust app event fields");
4507 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4508 struct lttng_ust_abi_field_iter uiter
;
4510 health_code_update();
4512 if (!app
->compatible
) {
4514 * TODO: In time, we should notice the caller of this error by
4515 * telling him that this is a version error.
4519 pthread_mutex_lock(&app
->sock_lock
);
4520 handle
= lttng_ust_ctl_tracepoint_field_list(app
->sock
);
4522 if (handle
!= -EPIPE
&& handle
!= -LTTNG_UST_ERR_EXITING
) {
4523 ERR("UST app list field getting handle failed for app pid %d",
4526 pthread_mutex_unlock(&app
->sock_lock
);
4530 while ((ret
= lttng_ust_ctl_tracepoint_field_list_get(app
->sock
, handle
,
4531 &uiter
)) != -LTTNG_UST_ERR_NOENT
) {
4532 /* Handle ustctl error. */
4536 if (ret
!= -LTTNG_UST_ERR_EXITING
&& ret
!= -EPIPE
) {
4537 ERR("UST app tp list field failed for app %d with ret %d",
4540 DBG3("UST app tp list field failed. Application is dead");
4544 release_ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4545 pthread_mutex_unlock(&app
->sock_lock
);
4546 if (release_ret
< 0 &&
4547 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4548 release_ret
!= -EPIPE
) {
4549 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4554 health_code_update();
4555 if (count
>= nbmem
) {
4556 /* In case the realloc fails, we free the memory */
4557 struct lttng_event_field
*new_tmp_event
;
4560 new_nbmem
= nbmem
<< 1;
4561 DBG2("Reallocating event field list from %zu to %zu entries",
4563 new_tmp_event
= realloc(tmp_event
,
4564 new_nbmem
* sizeof(struct lttng_event_field
));
4565 if (new_tmp_event
== NULL
) {
4568 PERROR("realloc ust app event fields");
4571 release_ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4572 pthread_mutex_unlock(&app
->sock_lock
);
4574 release_ret
!= -LTTNG_UST_ERR_EXITING
&&
4575 release_ret
!= -EPIPE
) {
4576 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, release_ret
);
4580 /* Zero the new memory */
4581 memset(new_tmp_event
+ nbmem
, 0,
4582 (new_nbmem
- nbmem
) * sizeof(struct lttng_event_field
));
4584 tmp_event
= new_tmp_event
;
4587 memcpy(tmp_event
[count
].field_name
, uiter
.field_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4588 /* Mapping between these enums matches 1 to 1. */
4589 tmp_event
[count
].type
= (enum lttng_event_field_type
) uiter
.type
;
4590 tmp_event
[count
].nowrite
= uiter
.nowrite
;
4592 memcpy(tmp_event
[count
].event
.name
, uiter
.event_name
, LTTNG_UST_ABI_SYM_NAME_LEN
);
4593 tmp_event
[count
].event
.loglevel
= uiter
.loglevel
;
4594 tmp_event
[count
].event
.type
= LTTNG_EVENT_TRACEPOINT
;
4595 tmp_event
[count
].event
.pid
= app
->pid
;
4596 tmp_event
[count
].event
.enabled
= -1;
4599 ret
= lttng_ust_ctl_release_handle(app
->sock
, handle
);
4600 pthread_mutex_unlock(&app
->sock_lock
);
4602 ret
!= -LTTNG_UST_ERR_EXITING
&&
4604 ERR("Error releasing app handle for app %d with ret %d", app
->sock
, ret
);
4609 *fields
= tmp_event
;
4611 DBG2("UST app list event fields done (%zu events)", count
);
4616 health_code_update();
4621 * Free and clean all traceable apps of the global list.
4623 * Should _NOT_ be called with RCU read-side lock held.
4625 void ust_app_clean_list(void)
4628 struct ust_app
*app
;
4629 struct lttng_ht_iter iter
;
4631 DBG2("UST app cleaning registered apps hash table");
4635 /* Cleanup notify socket hash table */
4636 if (ust_app_ht_by_notify_sock
) {
4637 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock
->ht
, &iter
.iter
, app
,
4638 notify_sock_n
.node
) {
4640 * Assert that all notifiers are gone as all triggers
4641 * are unregistered prior to this clean-up.
4643 assert(lttng_ht_get_count(app
->token_to_event_notifier_rule_ht
) == 0);
4644 ust_app_notify_sock_unregister(app
->notify_sock
);
4648 /* Cleanup socket hash table */
4649 if (ust_app_ht_by_sock
) {
4650 cds_lfht_for_each_entry(ust_app_ht_by_sock
->ht
, &iter
.iter
, app
,
4652 ret
= lttng_ht_del(ust_app_ht_by_sock
, &iter
);
4661 /* Destroy is done only when the ht is empty */
4663 ht_cleanup_push(ust_app_ht
);
4665 if (ust_app_ht_by_sock
) {
4666 ht_cleanup_push(ust_app_ht_by_sock
);
4668 if (ust_app_ht_by_notify_sock
) {
4669 ht_cleanup_push(ust_app_ht_by_notify_sock
);
4674 * Init UST app hash table.
4676 int ust_app_ht_alloc(void)
4678 ust_app_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4682 ust_app_ht_by_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4683 if (!ust_app_ht_by_sock
) {
4686 ust_app_ht_by_notify_sock
= lttng_ht_new(0, LTTNG_HT_TYPE_ULONG
);
4687 if (!ust_app_ht_by_notify_sock
) {
4694 * For a specific UST session, disable the channel for all registered apps.
4696 int ust_app_disable_channel_glb(struct ltt_ust_session
*usess
,
4697 struct ltt_ust_channel
*uchan
)
4700 struct lttng_ht_iter iter
;
4701 struct lttng_ht_node_str
*ua_chan_node
;
4702 struct ust_app
*app
;
4703 struct ust_app_session
*ua_sess
;
4704 struct ust_app_channel
*ua_chan
;
4706 assert(usess
->active
);
4707 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64
,
4708 uchan
->name
, usess
->id
);
4712 /* For every registered applications */
4713 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4714 struct lttng_ht_iter uiter
;
4715 if (!app
->compatible
) {
4717 * TODO: In time, we should notice the caller of this error by
4718 * telling him that this is a version error.
4722 ua_sess
= lookup_session_by_app(usess
, app
);
4723 if (ua_sess
== NULL
) {
4728 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4729 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4730 /* If the session if found for the app, the channel must be there */
4731 assert(ua_chan_node
);
4733 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4734 /* The channel must not be already disabled */
4735 assert(ua_chan
->enabled
== 1);
4737 /* Disable channel onto application */
4738 ret
= disable_ust_app_channel(ua_sess
, ua_chan
, app
);
4740 /* XXX: We might want to report this error at some point... */
4750 * For a specific UST session, enable the channel for all registered apps.
4752 int ust_app_enable_channel_glb(struct ltt_ust_session
*usess
,
4753 struct ltt_ust_channel
*uchan
)
4756 struct lttng_ht_iter iter
;
4757 struct ust_app
*app
;
4758 struct ust_app_session
*ua_sess
;
4760 assert(usess
->active
);
4761 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64
,
4762 uchan
->name
, usess
->id
);
4766 /* For every registered applications */
4767 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4768 if (!app
->compatible
) {
4770 * TODO: In time, we should notice the caller of this error by
4771 * telling him that this is a version error.
4775 ua_sess
= lookup_session_by_app(usess
, app
);
4776 if (ua_sess
== NULL
) {
4780 /* Enable channel onto application */
4781 ret
= enable_ust_app_channel(ua_sess
, uchan
, app
);
4783 /* XXX: We might want to report this error at some point... */
4793 * Disable an event in a channel and for a specific session.
4795 int ust_app_disable_event_glb(struct ltt_ust_session
*usess
,
4796 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4799 struct lttng_ht_iter iter
, uiter
;
4800 struct lttng_ht_node_str
*ua_chan_node
;
4801 struct ust_app
*app
;
4802 struct ust_app_session
*ua_sess
;
4803 struct ust_app_channel
*ua_chan
;
4804 struct ust_app_event
*ua_event
;
4806 assert(usess
->active
);
4807 DBG("UST app disabling event %s for all apps in channel "
4808 "%s for session id %" PRIu64
,
4809 uevent
->attr
.name
, uchan
->name
, usess
->id
);
4813 /* For all registered applications */
4814 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4815 if (!app
->compatible
) {
4817 * TODO: In time, we should notice the caller of this error by
4818 * telling him that this is a version error.
4822 ua_sess
= lookup_session_by_app(usess
, app
);
4823 if (ua_sess
== NULL
) {
4828 /* Lookup channel in the ust app session */
4829 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4830 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4831 if (ua_chan_node
== NULL
) {
4832 DBG2("Channel %s not found in session id %" PRIu64
" for app pid %d."
4833 "Skipping", uchan
->name
, usess
->id
, app
->pid
);
4836 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4838 ua_event
= find_ust_app_event(ua_chan
->events
,
4839 uevent
->attr
.name
, uevent
->filter
,
4840 (enum lttng_ust_abi_loglevel_type
)
4841 uevent
->attr
.loglevel_type
,
4842 uevent
->attr
.loglevel
, uevent
->exclusion
);
4843 if (ua_event
== NULL
) {
4844 DBG2("Event %s not found in channel %s for app pid %d."
4845 "Skipping", uevent
->attr
.name
, uchan
->name
, app
->pid
);
4849 ret
= disable_ust_app_event(ua_sess
, ua_event
, app
);
4851 /* XXX: Report error someday... */
4860 /* The ua_sess lock must be held by the caller. */
4862 int ust_app_channel_create(struct ltt_ust_session
*usess
,
4863 struct ust_app_session
*ua_sess
,
4864 struct ltt_ust_channel
*uchan
, struct ust_app
*app
,
4865 struct ust_app_channel
**_ua_chan
)
4868 struct ust_app_channel
*ua_chan
= NULL
;
4871 ASSERT_LOCKED(ua_sess
->lock
);
4873 if (!strncmp(uchan
->name
, DEFAULT_METADATA_NAME
,
4874 sizeof(uchan
->name
))) {
4875 copy_channel_attr_to_ustctl(&ua_sess
->metadata_attr
,
4879 struct ltt_ust_context
*uctx
= NULL
;
4882 * Create channel onto application and synchronize its
4885 ret
= ust_app_channel_allocate(ua_sess
, uchan
,
4886 LTTNG_UST_ABI_CHAN_PER_CPU
, usess
,
4892 ret
= ust_app_channel_send(app
, usess
,
4899 cds_list_for_each_entry(uctx
, &uchan
->ctx_list
, list
) {
4900 ret
= create_ust_app_channel_context(ua_chan
,
4913 * The application's socket is not valid. Either a bad socket
4914 * or a timeout on it. We can't inform the caller that for a
4915 * specific app, the session failed so lets continue here.
4917 ret
= 0; /* Not an error. */
4925 if (ret
== 0 && _ua_chan
) {
4927 * Only return the application's channel on success. Note
4928 * that the channel can still be part of the application's
4929 * channel hashtable on error.
4931 *_ua_chan
= ua_chan
;
4937 * Enable event for a specific session and channel on the tracer.
4939 int ust_app_enable_event_glb(struct ltt_ust_session
*usess
,
4940 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
4943 struct lttng_ht_iter iter
, uiter
;
4944 struct lttng_ht_node_str
*ua_chan_node
;
4945 struct ust_app
*app
;
4946 struct ust_app_session
*ua_sess
;
4947 struct ust_app_channel
*ua_chan
;
4948 struct ust_app_event
*ua_event
;
4950 assert(usess
->active
);
4951 DBG("UST app enabling event %s for all apps for session id %" PRIu64
,
4952 uevent
->attr
.name
, usess
->id
);
4955 * NOTE: At this point, this function is called only if the session and
4956 * channel passed are already created for all apps. and enabled on the
4962 /* For all registered applications */
4963 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
4964 if (!app
->compatible
) {
4966 * TODO: In time, we should notice the caller of this error by
4967 * telling him that this is a version error.
4971 ua_sess
= lookup_session_by_app(usess
, app
);
4973 /* The application has problem or is probably dead. */
4977 pthread_mutex_lock(&ua_sess
->lock
);
4979 if (ua_sess
->deleted
) {
4980 pthread_mutex_unlock(&ua_sess
->lock
);
4984 /* Lookup channel in the ust app session */
4985 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
4986 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
4988 * It is possible that the channel cannot be found is
4989 * the channel/event creation occurs concurrently with
4990 * an application exit.
4992 if (!ua_chan_node
) {
4993 pthread_mutex_unlock(&ua_sess
->lock
);
4997 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
4999 /* Get event node */
5000 ua_event
= find_ust_app_event(ua_chan
->events
,
5001 uevent
->attr
.name
, uevent
->filter
,
5002 (enum lttng_ust_abi_loglevel_type
)
5003 uevent
->attr
.loglevel_type
,
5004 uevent
->attr
.loglevel
, uevent
->exclusion
);
5005 if (ua_event
== NULL
) {
5006 DBG3("UST app enable event %s not found for app PID %d."
5007 "Skipping app", uevent
->attr
.name
, app
->pid
);
5011 ret
= enable_ust_app_event(ua_sess
, ua_event
, app
);
5013 pthread_mutex_unlock(&ua_sess
->lock
);
5017 pthread_mutex_unlock(&ua_sess
->lock
);
5026 * For a specific existing UST session and UST channel, creates the event for
5027 * all registered apps.
5029 int ust_app_create_event_glb(struct ltt_ust_session
*usess
,
5030 struct ltt_ust_channel
*uchan
, struct ltt_ust_event
*uevent
)
5033 struct lttng_ht_iter iter
, uiter
;
5034 struct lttng_ht_node_str
*ua_chan_node
;
5035 struct ust_app
*app
;
5036 struct ust_app_session
*ua_sess
;
5037 struct ust_app_channel
*ua_chan
;
5039 assert(usess
->active
);
5040 DBG("UST app creating event %s for all apps for session id %" PRIu64
,
5041 uevent
->attr
.name
, usess
->id
);
5045 /* For all registered applications */
5046 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5047 if (!app
->compatible
) {
5049 * TODO: In time, we should notice the caller of this error by
5050 * telling him that this is a version error.
5054 ua_sess
= lookup_session_by_app(usess
, app
);
5056 /* The application has problem or is probably dead. */
5060 pthread_mutex_lock(&ua_sess
->lock
);
5062 if (ua_sess
->deleted
) {
5063 pthread_mutex_unlock(&ua_sess
->lock
);
5067 /* Lookup channel in the ust app session */
5068 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
5069 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
5070 /* If the channel is not found, there is a code flow error */
5071 assert(ua_chan_node
);
5073 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
5075 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5076 pthread_mutex_unlock(&ua_sess
->lock
);
5078 if (ret
!= -LTTNG_UST_ERR_EXIST
) {
5079 /* Possible value at this point: -ENOMEM. If so, we stop! */
5082 DBG2("UST app event %s already exist on app PID %d",
5083 uevent
->attr
.name
, app
->pid
);
5093 * Start tracing for a specific UST session and app.
5095 * Called with UST app session lock held.
5099 int ust_app_start_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5102 struct ust_app_session
*ua_sess
;
5104 DBG("Starting tracing for ust app pid %d", app
->pid
);
5108 if (!app
->compatible
) {
5112 ua_sess
= lookup_session_by_app(usess
, app
);
5113 if (ua_sess
== NULL
) {
5114 /* The session is in teardown process. Ignore and continue. */
5118 pthread_mutex_lock(&ua_sess
->lock
);
5120 if (ua_sess
->deleted
) {
5121 pthread_mutex_unlock(&ua_sess
->lock
);
5125 if (ua_sess
->enabled
) {
5126 pthread_mutex_unlock(&ua_sess
->lock
);
5130 /* Upon restart, we skip the setup, already done */
5131 if (ua_sess
->started
) {
5135 health_code_update();
5138 /* This starts the UST tracing */
5139 pthread_mutex_lock(&app
->sock_lock
);
5140 ret
= lttng_ust_ctl_start_session(app
->sock
, ua_sess
->handle
);
5141 pthread_mutex_unlock(&app
->sock_lock
);
5143 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5144 DBG3("UST app start session failed. Application is dead: pid = %d, sock = %d",
5145 app
->pid
, app
->sock
);
5146 pthread_mutex_unlock(&ua_sess
->lock
);
5148 } else if (ret
== -EAGAIN
) {
5149 WARN("UST app start session failed. Communication time out: pid = %d, sock = %d",
5150 app
->pid
, app
->sock
);
5151 pthread_mutex_unlock(&ua_sess
->lock
);
5155 ERR("UST app start session failed with ret %d: pid = %d, sock = %d",
5156 ret
, app
->pid
, app
->sock
);
5161 /* Indicate that the session has been started once */
5162 ua_sess
->started
= 1;
5163 ua_sess
->enabled
= 1;
5165 pthread_mutex_unlock(&ua_sess
->lock
);
5167 health_code_update();
5169 /* Quiescent wait after starting trace */
5170 pthread_mutex_lock(&app
->sock_lock
);
5171 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5172 pthread_mutex_unlock(&app
->sock_lock
);
5174 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5175 DBG3("UST app wait quiescent failed. Application is dead: pid = %d, sock = %d",
5176 app
->pid
, app
->sock
);
5177 } else if (ret
== -EAGAIN
) {
5178 WARN("UST app wait quiescent failed. Communication time out: pid = %d, sock = %d",
5179 app
->pid
, app
->sock
);
5181 ERR("UST app wait quiescent failed with ret %d: pid %d, sock = %d",
5182 ret
, app
->pid
, app
->sock
);
5188 health_code_update();
5192 pthread_mutex_unlock(&ua_sess
->lock
);
5194 health_code_update();
5199 * Stop tracing for a specific UST session and app.
5202 int ust_app_stop_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5205 struct ust_app_session
*ua_sess
;
5206 struct ust_registry_session
*registry
;
5208 DBG("Stopping tracing for ust app pid %d", app
->pid
);
5212 if (!app
->compatible
) {
5213 goto end_no_session
;
5216 ua_sess
= lookup_session_by_app(usess
, app
);
5217 if (ua_sess
== NULL
) {
5218 goto end_no_session
;
5221 pthread_mutex_lock(&ua_sess
->lock
);
5223 if (ua_sess
->deleted
) {
5224 pthread_mutex_unlock(&ua_sess
->lock
);
5225 goto end_no_session
;
5229 * If started = 0, it means that stop trace has been called for a session
5230 * that was never started. It's possible since we can have a fail start
5231 * from either the application manager thread or the command thread. Simply
5232 * indicate that this is a stop error.
5234 if (!ua_sess
->started
) {
5235 goto error_rcu_unlock
;
5238 health_code_update();
5240 /* This inhibits UST tracing */
5241 pthread_mutex_lock(&app
->sock_lock
);
5242 ret
= lttng_ust_ctl_stop_session(app
->sock
, ua_sess
->handle
);
5243 pthread_mutex_unlock(&app
->sock_lock
);
5245 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5246 DBG3("UST app stop session failed. Application is dead: pid = %d, sock = %d",
5247 app
->pid
, app
->sock
);
5249 } else if (ret
== -EAGAIN
) {
5250 WARN("UST app stop session failed. Communication time out: pid = %d, sock = %d",
5251 app
->pid
, app
->sock
);
5255 ERR("UST app stop session failed with ret %d: pid = %d, sock = %d",
5256 ret
, app
->pid
, app
->sock
);
5258 goto error_rcu_unlock
;
5261 health_code_update();
5262 ua_sess
->enabled
= 0;
5264 /* Quiescent wait after stopping trace */
5265 pthread_mutex_lock(&app
->sock_lock
);
5266 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5267 pthread_mutex_unlock(&app
->sock_lock
);
5269 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5270 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5271 app
->pid
, app
->sock
);
5272 } else if (ret
== -EAGAIN
) {
5273 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5274 app
->pid
, app
->sock
);
5276 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5277 ret
, app
->pid
, app
->sock
);
5281 health_code_update();
5283 registry
= get_session_registry(ua_sess
);
5285 /* The UST app session is held registry shall not be null. */
5288 /* Push metadata for application before freeing the application. */
5289 (void) push_metadata(registry
, ua_sess
->consumer
);
5292 pthread_mutex_unlock(&ua_sess
->lock
);
5295 health_code_update();
5299 pthread_mutex_unlock(&ua_sess
->lock
);
5301 health_code_update();
5306 int ust_app_flush_app_session(struct ust_app
*app
,
5307 struct ust_app_session
*ua_sess
)
5309 int ret
, retval
= 0;
5310 struct lttng_ht_iter iter
;
5311 struct ust_app_channel
*ua_chan
;
5312 struct consumer_socket
*socket
;
5314 DBG("Flushing app session buffers for ust app pid %d", app
->pid
);
5318 if (!app
->compatible
) {
5319 goto end_not_compatible
;
5322 pthread_mutex_lock(&ua_sess
->lock
);
5324 if (ua_sess
->deleted
) {
5328 health_code_update();
5330 /* Flushing buffers */
5331 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5334 /* Flush buffers and push metadata. */
5335 switch (ua_sess
->buffer_type
) {
5336 case LTTNG_BUFFER_PER_PID
:
5337 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
, ua_chan
,
5339 health_code_update();
5340 ret
= consumer_flush_channel(socket
, ua_chan
->key
);
5342 ERR("Error flushing consumer channel");
5348 case LTTNG_BUFFER_PER_UID
:
5354 health_code_update();
5357 pthread_mutex_unlock(&ua_sess
->lock
);
5361 health_code_update();
5366 * Flush buffers for all applications for a specific UST session.
5367 * Called with UST session lock held.
5370 int ust_app_flush_session(struct ltt_ust_session
*usess
)
5375 DBG("Flushing session buffers for all ust apps");
5379 /* Flush buffers and push metadata. */
5380 switch (usess
->buffer_type
) {
5381 case LTTNG_BUFFER_PER_UID
:
5383 struct buffer_reg_uid
*reg
;
5384 struct lttng_ht_iter iter
;
5386 /* Flush all per UID buffers associated to that session. */
5387 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5388 struct ust_registry_session
*ust_session_reg
;
5389 struct buffer_reg_channel
*buf_reg_chan
;
5390 struct consumer_socket
*socket
;
5392 /* Get consumer socket to use to push the metadata.*/
5393 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
5396 /* Ignore request if no consumer is found for the session. */
5400 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
5401 buf_reg_chan
, node
.node
) {
5403 * The following call will print error values so the return
5404 * code is of little importance because whatever happens, we
5405 * have to try them all.
5407 (void) consumer_flush_channel(socket
, buf_reg_chan
->consumer_key
);
5410 ust_session_reg
= reg
->registry
->reg
.ust
;
5411 /* Push metadata. */
5412 (void) push_metadata(ust_session_reg
, usess
->consumer
);
5416 case LTTNG_BUFFER_PER_PID
:
5418 struct ust_app_session
*ua_sess
;
5419 struct lttng_ht_iter iter
;
5420 struct ust_app
*app
;
5422 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5423 ua_sess
= lookup_session_by_app(usess
, app
);
5424 if (ua_sess
== NULL
) {
5427 (void) ust_app_flush_app_session(app
, ua_sess
);
5438 health_code_update();
5443 int ust_app_clear_quiescent_app_session(struct ust_app
*app
,
5444 struct ust_app_session
*ua_sess
)
5447 struct lttng_ht_iter iter
;
5448 struct ust_app_channel
*ua_chan
;
5449 struct consumer_socket
*socket
;
5451 DBG("Clearing stream quiescent state for ust app pid %d", app
->pid
);
5455 if (!app
->compatible
) {
5456 goto end_not_compatible
;
5459 pthread_mutex_lock(&ua_sess
->lock
);
5461 if (ua_sess
->deleted
) {
5465 health_code_update();
5467 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
5470 ERR("Failed to find consumer (%" PRIu32
") socket",
5471 app
->bits_per_long
);
5476 /* Clear quiescent state. */
5477 switch (ua_sess
->buffer_type
) {
5478 case LTTNG_BUFFER_PER_PID
:
5479 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &iter
.iter
,
5480 ua_chan
, node
.node
) {
5481 health_code_update();
5482 ret
= consumer_clear_quiescent_channel(socket
,
5485 ERR("Error clearing quiescent state for consumer channel");
5491 case LTTNG_BUFFER_PER_UID
:
5498 health_code_update();
5501 pthread_mutex_unlock(&ua_sess
->lock
);
5505 health_code_update();
5510 * Clear quiescent state in each stream for all applications for a
5511 * specific UST session.
5512 * Called with UST session lock held.
5515 int ust_app_clear_quiescent_session(struct ltt_ust_session
*usess
)
5520 DBG("Clearing stream quiescent state for all ust apps");
5524 switch (usess
->buffer_type
) {
5525 case LTTNG_BUFFER_PER_UID
:
5527 struct lttng_ht_iter iter
;
5528 struct buffer_reg_uid
*reg
;
5531 * Clear quiescent for all per UID buffers associated to
5534 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
5535 struct consumer_socket
*socket
;
5536 struct buffer_reg_channel
*buf_reg_chan
;
5538 /* Get associated consumer socket.*/
5539 socket
= consumer_find_socket_by_bitness(
5540 reg
->bits_per_long
, usess
->consumer
);
5543 * Ignore request if no consumer is found for
5549 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
5550 &iter
.iter
, buf_reg_chan
, node
.node
) {
5552 * The following call will print error values so
5553 * the return code is of little importance
5554 * because whatever happens, we have to try them
5557 (void) consumer_clear_quiescent_channel(socket
,
5558 buf_reg_chan
->consumer_key
);
5563 case LTTNG_BUFFER_PER_PID
:
5565 struct ust_app_session
*ua_sess
;
5566 struct lttng_ht_iter iter
;
5567 struct ust_app
*app
;
5569 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
5571 ua_sess
= lookup_session_by_app(usess
, app
);
5572 if (ua_sess
== NULL
) {
5575 (void) ust_app_clear_quiescent_app_session(app
,
5587 health_code_update();
5592 * Destroy a specific UST session in apps.
5594 static int destroy_trace(struct ltt_ust_session
*usess
, struct ust_app
*app
)
5597 struct ust_app_session
*ua_sess
;
5598 struct lttng_ht_iter iter
;
5599 struct lttng_ht_node_u64
*node
;
5601 DBG("Destroy tracing for ust app pid %d", app
->pid
);
5605 if (!app
->compatible
) {
5609 __lookup_session_by_app(usess
, app
, &iter
);
5610 node
= lttng_ht_iter_get_node_u64(&iter
);
5612 /* Session is being or is deleted. */
5615 ua_sess
= caa_container_of(node
, struct ust_app_session
, node
);
5617 health_code_update();
5618 destroy_app_session(app
, ua_sess
);
5620 health_code_update();
5622 /* Quiescent wait after stopping trace */
5623 pthread_mutex_lock(&app
->sock_lock
);
5624 ret
= lttng_ust_ctl_wait_quiescent(app
->sock
);
5625 pthread_mutex_unlock(&app
->sock_lock
);
5627 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
5628 DBG3("UST app wait quiescent failed. Application is dead: pid= %d, sock = %d)",
5629 app
->pid
, app
->sock
);
5630 } else if (ret
== -EAGAIN
) {
5631 WARN("UST app wait quiescent failed. Communication time out: pid= %d, sock = %d)",
5632 app
->pid
, app
->sock
);
5634 ERR("UST app wait quiescent failed with ret %d: pid= %d, sock = %d)",
5635 ret
, app
->pid
, app
->sock
);
5640 health_code_update();
5645 * Start tracing for the UST session.
5647 int ust_app_start_trace_all(struct ltt_ust_session
*usess
)
5649 struct lttng_ht_iter iter
;
5650 struct ust_app
*app
;
5652 DBG("Starting all UST traces");
5655 * Even though the start trace might fail, flag this session active so
5656 * other application coming in are started by default.
5663 * In a start-stop-start use-case, we need to clear the quiescent state
5664 * of each channel set by the prior stop command, thus ensuring that a
5665 * following stop or destroy is sure to grab a timestamp_end near those
5666 * operations, even if the packet is empty.
5668 (void) ust_app_clear_quiescent_session(usess
);
5670 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5671 ust_app_global_update(usess
, app
);
5680 * Start tracing for the UST session.
5681 * Called with UST session lock held.
5683 int ust_app_stop_trace_all(struct ltt_ust_session
*usess
)
5686 struct lttng_ht_iter iter
;
5687 struct ust_app
*app
;
5689 DBG("Stopping all UST traces");
5692 * Even though the stop trace might fail, flag this session inactive so
5693 * other application coming in are not started by default.
5699 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5700 ret
= ust_app_stop_trace(usess
, app
);
5702 /* Continue to next apps even on error */
5707 (void) ust_app_flush_session(usess
);
5715 * Destroy app UST session.
5717 int ust_app_destroy_trace_all(struct ltt_ust_session
*usess
)
5720 struct lttng_ht_iter iter
;
5721 struct ust_app
*app
;
5723 DBG("Destroy all UST traces");
5727 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
5728 ret
= destroy_trace(usess
, app
);
5730 /* Continue to next apps even on error */
5740 /* The ua_sess lock must be held by the caller. */
5742 int find_or_create_ust_app_channel(
5743 struct ltt_ust_session
*usess
,
5744 struct ust_app_session
*ua_sess
,
5745 struct ust_app
*app
,
5746 struct ltt_ust_channel
*uchan
,
5747 struct ust_app_channel
**ua_chan
)
5750 struct lttng_ht_iter iter
;
5751 struct lttng_ht_node_str
*ua_chan_node
;
5753 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &iter
);
5754 ua_chan_node
= lttng_ht_iter_get_node_str(&iter
);
5756 *ua_chan
= caa_container_of(ua_chan_node
,
5757 struct ust_app_channel
, node
);
5761 ret
= ust_app_channel_create(usess
, ua_sess
, uchan
, app
, ua_chan
);
5770 int ust_app_channel_synchronize_event(struct ust_app_channel
*ua_chan
,
5771 struct ltt_ust_event
*uevent
, struct ust_app_session
*ua_sess
,
5772 struct ust_app
*app
)
5775 struct ust_app_event
*ua_event
= NULL
;
5777 ua_event
= find_ust_app_event(ua_chan
->events
, uevent
->attr
.name
,
5779 (enum lttng_ust_abi_loglevel_type
)
5780 uevent
->attr
.loglevel_type
,
5781 uevent
->attr
.loglevel
, uevent
->exclusion
);
5783 ret
= create_ust_app_event(ua_sess
, ua_chan
, uevent
, app
);
5788 if (ua_event
->enabled
!= uevent
->enabled
) {
5789 ret
= uevent
->enabled
?
5790 enable_ust_app_event(ua_sess
, ua_event
, app
) :
5791 disable_ust_app_event(ua_sess
, ua_event
, app
);
5799 /* Called with RCU read-side lock held. */
5801 void ust_app_synchronize_event_notifier_rules(struct ust_app
*app
)
5804 enum lttng_error_code ret_code
;
5805 enum lttng_trigger_status t_status
;
5806 struct lttng_ht_iter app_trigger_iter
;
5807 struct lttng_triggers
*triggers
= NULL
;
5808 struct ust_app_event_notifier_rule
*event_notifier_rule
;
5809 unsigned int count
, i
;
5811 if (!ust_app_supports_notifiers(app
)) {
5816 * Currrently, registering or unregistering a trigger with an
5817 * event rule condition causes a full synchronization of the event
5820 * The first step attempts to add an event notifier for all registered
5821 * triggers that apply to the user space tracers. Then, the
5822 * application's event notifiers rules are all checked against the list
5823 * of registered triggers. Any event notifier that doesn't have a
5824 * matching trigger can be assumed to have been disabled.
5826 * All of this is inefficient, but is put in place to get the feature
5827 * rolling as it is simpler at this moment. It will be optimized Soon™
5828 * to allow the state of enabled
5829 * event notifiers to be synchronized in a piece-wise way.
5832 /* Get all triggers using uid 0 (root) */
5833 ret_code
= notification_thread_command_list_triggers(
5834 the_notification_thread_handle
, 0, &triggers
);
5835 if (ret_code
!= LTTNG_OK
) {
5841 t_status
= lttng_triggers_get_count(triggers
, &count
);
5842 if (t_status
!= LTTNG_TRIGGER_STATUS_OK
) {
5846 for (i
= 0; i
< count
; i
++) {
5847 struct lttng_condition
*condition
;
5848 struct lttng_event_rule
*event_rule
;
5849 struct lttng_trigger
*trigger
;
5850 const struct ust_app_event_notifier_rule
*looked_up_event_notifier_rule
;
5851 enum lttng_condition_status condition_status
;
5854 trigger
= lttng_triggers_borrow_mutable_at_index(triggers
, i
);
5857 token
= lttng_trigger_get_tracer_token(trigger
);
5858 condition
= lttng_trigger_get_condition(trigger
);
5860 if (lttng_condition_get_type(condition
) !=
5861 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES
) {
5862 /* Does not apply */
5867 lttng_condition_event_rule_matches_borrow_rule_mutable(
5868 condition
, &event_rule
);
5869 assert(condition_status
== LTTNG_CONDITION_STATUS_OK
);
5871 if (lttng_event_rule_get_domain_type(event_rule
) == LTTNG_DOMAIN_KERNEL
) {
5872 /* Skip kernel related triggers. */
5877 * Find or create the associated token event rule. The caller
5878 * holds the RCU read lock, so this is safe to call without
5879 * explicitly acquiring it here.
5881 looked_up_event_notifier_rule
= find_ust_app_event_notifier_rule(
5882 app
->token_to_event_notifier_rule_ht
, token
);
5883 if (!looked_up_event_notifier_rule
) {
5884 ret
= create_ust_app_event_notifier_rule(trigger
, app
);
5892 /* Remove all unknown event sources from the app. */
5893 cds_lfht_for_each_entry (app
->token_to_event_notifier_rule_ht
->ht
,
5894 &app_trigger_iter
.iter
, event_notifier_rule
,
5896 const uint64_t app_token
= event_notifier_rule
->token
;
5900 * Check if the app event trigger still exists on the
5901 * notification side.
5903 for (i
= 0; i
< count
; i
++) {
5904 uint64_t notification_thread_token
;
5905 const struct lttng_trigger
*trigger
=
5906 lttng_triggers_get_at_index(
5911 notification_thread_token
=
5912 lttng_trigger_get_tracer_token(trigger
);
5914 if (notification_thread_token
== app_token
) {
5926 * This trigger was unregistered, disable it on the tracer's
5929 ret
= lttng_ht_del(app
->token_to_event_notifier_rule_ht
,
5933 /* Callee logs errors. */
5934 (void) disable_ust_object(app
, event_notifier_rule
->obj
);
5936 delete_ust_app_event_notifier_rule(
5937 app
->sock
, event_notifier_rule
, app
);
5943 lttng_triggers_destroy(triggers
);
5948 * RCU read lock must be held by the caller.
5951 void ust_app_synchronize_all_channels(struct ltt_ust_session
*usess
,
5952 struct ust_app_session
*ua_sess
,
5953 struct ust_app
*app
)
5956 struct cds_lfht_iter uchan_iter
;
5957 struct ltt_ust_channel
*uchan
;
5963 cds_lfht_for_each_entry(usess
->domain_global
.channels
->ht
, &uchan_iter
,
5965 struct ust_app_channel
*ua_chan
;
5966 struct cds_lfht_iter uevent_iter
;
5967 struct ltt_ust_event
*uevent
;
5970 * Search for a matching ust_app_channel. If none is found,
5971 * create it. Creating the channel will cause the ua_chan
5972 * structure to be allocated, the channel buffers to be
5973 * allocated (if necessary) and sent to the application, and
5974 * all enabled contexts will be added to the channel.
5976 ret
= find_or_create_ust_app_channel(usess
, ua_sess
,
5977 app
, uchan
, &ua_chan
);
5979 /* Tracer is probably gone or ENOMEM. */
5984 /* ua_chan will be NULL for the metadata channel */
5988 cds_lfht_for_each_entry(uchan
->events
->ht
, &uevent_iter
, uevent
,
5990 ret
= ust_app_channel_synchronize_event(ua_chan
,
5991 uevent
, ua_sess
, app
);
5997 if (ua_chan
->enabled
!= uchan
->enabled
) {
5998 ret
= uchan
->enabled
?
5999 enable_ust_app_channel(ua_sess
, uchan
, app
) :
6000 disable_ust_app_channel(ua_sess
, ua_chan
, app
);
6011 * The caller must ensure that the application is compatible and is tracked
6012 * by the process attribute trackers.
6015 void ust_app_synchronize(struct ltt_ust_session
*usess
,
6016 struct ust_app
*app
)
6019 struct ust_app_session
*ua_sess
= NULL
;
6022 * The application's configuration should only be synchronized for
6025 assert(usess
->active
);
6027 ret
= find_or_create_ust_app_session(usess
, app
, &ua_sess
, NULL
);
6029 /* Tracer is probably gone or ENOMEM. */
6031 destroy_app_session(app
, ua_sess
);
6037 pthread_mutex_lock(&ua_sess
->lock
);
6038 if (ua_sess
->deleted
) {
6039 goto deleted_session
;
6044 ust_app_synchronize_all_channels(usess
, ua_sess
, app
);
6047 * Create the metadata for the application. This returns gracefully if a
6048 * metadata was already set for the session.
6050 * The metadata channel must be created after the data channels as the
6051 * consumer daemon assumes this ordering. When interacting with a relay
6052 * daemon, the consumer will use this assumption to send the
6053 * "STREAMS_SENT" message to the relay daemon.
6055 ret
= create_ust_app_metadata(ua_sess
, app
, usess
->consumer
);
6057 ERR("Metadata creation failed for app sock %d for session id %" PRIu64
,
6058 app
->sock
, usess
->id
);
6064 pthread_mutex_unlock(&ua_sess
->lock
);
6070 void ust_app_global_destroy(struct ltt_ust_session
*usess
, struct ust_app
*app
)
6072 struct ust_app_session
*ua_sess
;
6074 ua_sess
= lookup_session_by_app(usess
, app
);
6075 if (ua_sess
== NULL
) {
6078 destroy_app_session(app
, ua_sess
);
6082 * Add channels/events from UST global domain to registered apps at sock.
6084 * Called with session lock held.
6085 * Called with RCU read-side lock held.
6087 void ust_app_global_update(struct ltt_ust_session
*usess
, struct ust_app
*app
)
6090 assert(usess
->active
);
6092 DBG2("UST app global update for app sock %d for session id %" PRIu64
,
6093 app
->sock
, usess
->id
);
6095 if (!app
->compatible
) {
6098 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID
,
6100 trace_ust_id_tracker_lookup(
6101 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID
,
6103 trace_ust_id_tracker_lookup(
6104 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID
,
6107 * Synchronize the application's internal tracing configuration
6108 * and start tracing.
6110 ust_app_synchronize(usess
, app
);
6111 ust_app_start_trace(usess
, app
);
6113 ust_app_global_destroy(usess
, app
);
6118 * Add all event notifiers to an application.
6120 * Called with session lock held.
6121 * Called with RCU read-side lock held.
6123 void ust_app_global_update_event_notifier_rules(struct ust_app
*app
)
6125 DBG2("UST application global event notifier rules update: app = '%s', pid = %d)",
6126 app
->name
, app
->pid
);
6128 if (!app
->compatible
|| !ust_app_supports_notifiers(app
)) {
6132 if (app
->event_notifier_group
.object
== NULL
) {
6133 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' pid = %d)",
6134 app
->name
, app
->pid
);
6138 ust_app_synchronize_event_notifier_rules(app
);
6142 * Called with session lock held.
6144 void ust_app_global_update_all(struct ltt_ust_session
*usess
)
6146 struct lttng_ht_iter iter
;
6147 struct ust_app
*app
;
6150 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6151 ust_app_global_update(usess
, app
);
6156 void ust_app_global_update_all_event_notifier_rules(void)
6158 struct lttng_ht_iter iter
;
6159 struct ust_app
*app
;
6162 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6163 ust_app_global_update_event_notifier_rules(app
);
6170 * Add context to a specific channel for global UST domain.
6172 int ust_app_add_ctx_channel_glb(struct ltt_ust_session
*usess
,
6173 struct ltt_ust_channel
*uchan
, struct ltt_ust_context
*uctx
)
6176 struct lttng_ht_node_str
*ua_chan_node
;
6177 struct lttng_ht_iter iter
, uiter
;
6178 struct ust_app_channel
*ua_chan
= NULL
;
6179 struct ust_app_session
*ua_sess
;
6180 struct ust_app
*app
;
6182 assert(usess
->active
);
6185 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
6186 if (!app
->compatible
) {
6188 * TODO: In time, we should notice the caller of this error by
6189 * telling him that this is a version error.
6193 ua_sess
= lookup_session_by_app(usess
, app
);
6194 if (ua_sess
== NULL
) {
6198 pthread_mutex_lock(&ua_sess
->lock
);
6200 if (ua_sess
->deleted
) {
6201 pthread_mutex_unlock(&ua_sess
->lock
);
6205 /* Lookup channel in the ust app session */
6206 lttng_ht_lookup(ua_sess
->channels
, (void *)uchan
->name
, &uiter
);
6207 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
6208 if (ua_chan_node
== NULL
) {
6211 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
,
6213 ret
= create_ust_app_channel_context(ua_chan
, &uctx
->ctx
, app
);
6218 pthread_mutex_unlock(&ua_sess
->lock
);
6226 * Receive registration and populate the given msg structure.
6228 * On success return 0 else a negative value returned by the ustctl call.
6230 int ust_app_recv_registration(int sock
, struct ust_register_msg
*msg
)
6233 uint32_t pid
, ppid
, uid
, gid
;
6237 ret
= lttng_ust_ctl_recv_reg_msg(sock
, &msg
->type
, &msg
->major
, &msg
->minor
,
6238 &pid
, &ppid
, &uid
, &gid
,
6239 &msg
->bits_per_long
,
6240 &msg
->uint8_t_alignment
,
6241 &msg
->uint16_t_alignment
,
6242 &msg
->uint32_t_alignment
,
6243 &msg
->uint64_t_alignment
,
6244 &msg
->long_alignment
,
6251 case LTTNG_UST_ERR_EXITING
:
6252 DBG3("UST app recv reg message failed. Application died");
6254 case LTTNG_UST_ERR_UNSUP_MAJOR
:
6255 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6256 msg
->major
, msg
->minor
, LTTNG_UST_ABI_MAJOR_VERSION
,
6257 LTTNG_UST_ABI_MINOR_VERSION
);
6260 ERR("UST app recv reg message failed with ret %d", ret
);
6265 msg
->pid
= (pid_t
) pid
;
6266 msg
->ppid
= (pid_t
) ppid
;
6267 msg
->uid
= (uid_t
) uid
;
6268 msg
->gid
= (gid_t
) gid
;
6275 * Return a ust app session object using the application object and the
6276 * session object descriptor has a key. If not found, NULL is returned.
6277 * A RCU read side lock MUST be acquired when calling this function.
6279 static struct ust_app_session
*find_session_by_objd(struct ust_app
*app
,
6282 struct lttng_ht_node_ulong
*node
;
6283 struct lttng_ht_iter iter
;
6284 struct ust_app_session
*ua_sess
= NULL
;
6288 lttng_ht_lookup(app
->ust_sessions_objd
, (void *)((unsigned long) objd
), &iter
);
6289 node
= lttng_ht_iter_get_node_ulong(&iter
);
6291 DBG2("UST app session find by objd %d not found", objd
);
6295 ua_sess
= caa_container_of(node
, struct ust_app_session
, ust_objd_node
);
6302 * Return a ust app channel object using the application object and the channel
6303 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6304 * lock MUST be acquired before calling this function.
6306 static struct ust_app_channel
*find_channel_by_objd(struct ust_app
*app
,
6309 struct lttng_ht_node_ulong
*node
;
6310 struct lttng_ht_iter iter
;
6311 struct ust_app_channel
*ua_chan
= NULL
;
6315 lttng_ht_lookup(app
->ust_objd
, (void *)((unsigned long) objd
), &iter
);
6316 node
= lttng_ht_iter_get_node_ulong(&iter
);
6318 DBG2("UST app channel find by objd %d not found", objd
);
6322 ua_chan
= caa_container_of(node
, struct ust_app_channel
, ust_objd_node
);
6329 * Fixup legacy context fields for comparison:
6330 * - legacy array becomes array_nestable,
6331 * - legacy struct becomes struct_nestable,
6332 * - legacy variant becomes variant_nestable,
6333 * legacy sequences are not emitted in LTTng-UST contexts.
6335 static int ust_app_fixup_legacy_context_fields(size_t *_nr_fields
,
6336 struct lttng_ust_ctl_field
**_fields
)
6338 struct lttng_ust_ctl_field
*fields
= *_fields
, *new_fields
= NULL
;
6339 size_t nr_fields
= *_nr_fields
, new_nr_fields
= 0, i
, j
;
6343 for (i
= 0; i
< nr_fields
; i
++) {
6344 const struct lttng_ust_ctl_field
*field
= &fields
[i
];
6346 switch (field
->type
.atype
) {
6347 case lttng_ust_ctl_atype_sequence
:
6348 ERR("Unexpected legacy sequence context.");
6351 case lttng_ust_ctl_atype_array
:
6352 switch (field
->type
.u
.legacy
.array
.elem_type
.atype
) {
6353 case lttng_ust_ctl_atype_integer
:
6356 ERR("Unexpected legacy array element type in context.");
6361 /* One field for array_nested, one field for elem type. */
6365 case lttng_ust_ctl_atype_struct
: /* Fallthrough */
6366 case lttng_ust_ctl_atype_variant
:
6378 new_fields
= (struct lttng_ust_ctl_field
*) zmalloc(sizeof(*new_fields
) * new_nr_fields
);
6383 for (i
= 0, j
= 0; i
< nr_fields
; i
++, j
++) {
6384 const struct lttng_ust_ctl_field
*field
= &fields
[i
];
6385 struct lttng_ust_ctl_field
*new_field
= &new_fields
[j
];
6387 switch (field
->type
.atype
) {
6388 case lttng_ust_ctl_atype_array
:
6389 /* One field for array_nested, one field for elem type. */
6390 strncpy(new_field
->name
, field
->name
, LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
6391 new_field
->type
.atype
= lttng_ust_ctl_atype_array_nestable
;
6392 new_field
->type
.u
.array_nestable
.length
= field
->type
.u
.legacy
.array
.length
;
6393 new_field
->type
.u
.array_nestable
.alignment
= 0;
6394 new_field
= &new_fields
[++j
]; /* elem type */
6395 new_field
->type
.atype
= field
->type
.u
.legacy
.array
.elem_type
.atype
;
6396 assert(new_field
->type
.atype
== lttng_ust_ctl_atype_integer
);
6397 new_field
->type
.u
.integer
= field
->type
.u
.legacy
.array
.elem_type
.u
.basic
.integer
;
6399 case lttng_ust_ctl_atype_struct
:
6400 strncpy(new_field
->name
, field
->name
, LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
6401 new_field
->type
.atype
= lttng_ust_ctl_atype_struct_nestable
;
6402 new_field
->type
.u
.struct_nestable
.nr_fields
= field
->type
.u
.legacy
._struct
.nr_fields
;
6403 new_field
->type
.u
.struct_nestable
.alignment
= 0;
6405 case lttng_ust_ctl_atype_variant
:
6406 strncpy(new_field
->name
, field
->name
, LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
6407 new_field
->type
.atype
= lttng_ust_ctl_atype_variant_nestable
;
6408 new_field
->type
.u
.variant_nestable
.nr_choices
= field
->type
.u
.legacy
.variant
.nr_choices
;
6409 strncpy(new_field
->type
.u
.variant_nestable
.tag_name
,
6410 field
->type
.u
.legacy
.variant
.tag_name
,
6411 LTTNG_UST_ABI_SYM_NAME_LEN
- 1);
6412 new_field
->type
.u
.variant_nestable
.alignment
= 0;
6415 *new_field
= *field
;
6420 *_fields
= new_fields
;
6421 *_nr_fields
= new_nr_fields
;
6427 * Reply to a register channel notification from an application on the notify
6428 * socket. The channel metadata is also created.
6430 * The session UST registry lock is acquired in this function.
6432 * On success 0 is returned else a negative value.
6434 static int reply_ust_register_channel(int sock
, int cobjd
,
6435 size_t nr_fields
, struct lttng_ust_ctl_field
*fields
)
6437 int ret
, ret_code
= 0;
6439 uint64_t chan_reg_key
;
6440 enum lttng_ust_ctl_channel_header type
= LTTNG_UST_CTL_CHANNEL_HEADER_UNKNOWN
;
6441 struct ust_app
*app
;
6442 struct ust_app_channel
*ua_chan
;
6443 struct ust_app_session
*ua_sess
;
6444 struct ust_registry_session
*registry
;
6445 struct ust_registry_channel
*ust_reg_chan
;
6449 /* Lookup application. If not found, there is a code flow error. */
6450 app
= find_app_by_notify_sock(sock
);
6452 DBG("Application socket %d is being torn down. Abort event notify",
6455 goto error_rcu_unlock
;
6458 /* Lookup channel by UST object descriptor. */
6459 ua_chan
= find_channel_by_objd(app
, cobjd
);
6461 DBG("Application channel is being torn down. Abort event notify");
6463 goto error_rcu_unlock
;
6466 assert(ua_chan
->session
);
6467 ua_sess
= ua_chan
->session
;
6469 /* Get right session registry depending on the session buffer type. */
6470 registry
= get_session_registry(ua_sess
);
6472 DBG("Application session is being torn down. Abort event notify");
6474 goto error_rcu_unlock
;
6477 /* Depending on the buffer type, a different channel key is used. */
6478 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6479 chan_reg_key
= ua_chan
->tracing_channel_id
;
6481 chan_reg_key
= ua_chan
->key
;
6484 pthread_mutex_lock(®istry
->lock
);
6486 ust_reg_chan
= ust_registry_channel_find(registry
, chan_reg_key
);
6487 assert(ust_reg_chan
);
6489 /* Channel id is set during the object creation. */
6490 chan_id
= ust_reg_chan
->chan_id
;
6492 ret
= ust_app_fixup_legacy_context_fields(&nr_fields
, &fields
);
6494 ERR("Registering application channel due to legacy context fields fixup error: pid = %d, sock = %d",
6495 app
->pid
, app
->sock
);
6499 if (!ust_reg_chan
->register_done
) {
6501 * TODO: eventually use the registry event count for
6502 * this channel to better guess header type for per-pid
6505 type
= LTTNG_UST_CTL_CHANNEL_HEADER_LARGE
;
6506 ust_reg_chan
->nr_ctx_fields
= nr_fields
;
6507 ust_reg_chan
->ctx_fields
= fields
;
6509 ust_reg_chan
->header_type
= type
;
6511 /* Get current already assigned values. */
6512 type
= ust_reg_chan
->header_type
;
6514 * Validate that the context fields match between
6515 * registry and newcoming application.
6517 if (!match_lttng_ust_ctl_field_array(ust_reg_chan
->ctx_fields
,
6518 ust_reg_chan
->nr_ctx_fields
,
6519 fields
, nr_fields
)) {
6520 ERR("Registering application channel due to context field mismatch: pid = %d, sock = %d",
6521 app
->pid
, app
->sock
);
6527 /* Append to metadata */
6528 if (!ust_reg_chan
->metadata_dumped
) {
6529 ret_code
= ust_metadata_channel_statedump(registry
, ust_reg_chan
);
6531 ERR("Error appending channel metadata (errno = %d)", ret_code
);
6537 DBG3("UST app replying to register channel key %" PRIu64
6538 " with id %u, type = %d, ret = %d", chan_reg_key
, chan_id
, type
,
6541 ret
= lttng_ust_ctl_reply_register_channel(sock
, chan_id
, type
, ret_code
);
6543 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6544 DBG3("UST app reply channel failed. Application died: pid = %d, sock = %d",
6545 app
->pid
, app
->sock
);
6546 } else if (ret
== -EAGAIN
) {
6547 WARN("UST app reply channel failed. Communication time out: pid = %d, sock = %d",
6548 app
->pid
, app
->sock
);
6550 ERR("UST app reply channel failed with ret %d: pid = %d, sock = %d",
6551 ret
, app
->pid
, app
->sock
);
6556 /* This channel registry registration is completed. */
6557 ust_reg_chan
->register_done
= 1;
6560 pthread_mutex_unlock(®istry
->lock
);
6568 * Add event to the UST channel registry. When the event is added to the
6569 * registry, the metadata is also created. Once done, this replies to the
6570 * application with the appropriate error code.
6572 * The session UST registry lock is acquired in the function.
6574 * On success 0 is returned else a negative value.
6576 static int add_event_ust_registry(int sock
, int sobjd
, int cobjd
, char *name
,
6577 char *sig
, size_t nr_fields
, struct lttng_ust_ctl_field
*fields
,
6578 int loglevel_value
, char *model_emf_uri
)
6581 uint32_t event_id
= 0;
6582 uint64_t chan_reg_key
;
6583 struct ust_app
*app
;
6584 struct ust_app_channel
*ua_chan
;
6585 struct ust_app_session
*ua_sess
;
6586 struct ust_registry_session
*registry
;
6590 /* Lookup application. If not found, there is a code flow error. */
6591 app
= find_app_by_notify_sock(sock
);
6593 DBG("Application socket %d is being torn down. Abort event notify",
6596 goto error_rcu_unlock
;
6599 /* Lookup channel by UST object descriptor. */
6600 ua_chan
= find_channel_by_objd(app
, cobjd
);
6602 DBG("Application channel is being torn down. Abort event notify");
6604 goto error_rcu_unlock
;
6607 assert(ua_chan
->session
);
6608 ua_sess
= ua_chan
->session
;
6610 registry
= get_session_registry(ua_sess
);
6612 DBG("Application session is being torn down. Abort event notify");
6614 goto error_rcu_unlock
;
6617 if (ua_sess
->buffer_type
== LTTNG_BUFFER_PER_UID
) {
6618 chan_reg_key
= ua_chan
->tracing_channel_id
;
6620 chan_reg_key
= ua_chan
->key
;
6623 pthread_mutex_lock(®istry
->lock
);
6626 * From this point on, this call acquires the ownership of the sig, fields
6627 * and model_emf_uri meaning any free are done inside it if needed. These
6628 * three variables MUST NOT be read/write after this.
6630 ret_code
= ust_registry_create_event(registry
, chan_reg_key
,
6631 sobjd
, cobjd
, name
, sig
, nr_fields
, fields
,
6632 loglevel_value
, model_emf_uri
, ua_sess
->buffer_type
,
6636 model_emf_uri
= NULL
;
6639 * The return value is returned to ustctl so in case of an error, the
6640 * application can be notified. In case of an error, it's important not to
6641 * return a negative error or else the application will get closed.
6643 ret
= lttng_ust_ctl_reply_register_event(sock
, event_id
, ret_code
);
6645 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6646 DBG3("UST app reply event failed. Application died: pid = %d, sock = %d.",
6647 app
->pid
, app
->sock
);
6648 } else if (ret
== -EAGAIN
) {
6649 WARN("UST app reply event failed. Communication time out: pid = %d, sock = %d",
6650 app
->pid
, app
->sock
);
6652 ERR("UST app reply event failed with ret %d: pid = %d, sock = %d",
6653 ret
, app
->pid
, app
->sock
);
6656 * No need to wipe the create event since the application socket will
6657 * get close on error hence cleaning up everything by itself.
6662 DBG3("UST registry event %s with id %" PRId32
" added successfully",
6666 pthread_mutex_unlock(®istry
->lock
);
6671 free(model_emf_uri
);
6676 * Add enum to the UST session registry. Once done, this replies to the
6677 * application with the appropriate error code.
6679 * The session UST registry lock is acquired within this function.
6681 * On success 0 is returned else a negative value.
6683 static int add_enum_ust_registry(int sock
, int sobjd
, char *name
,
6684 struct lttng_ust_ctl_enum_entry
*entries
, size_t nr_entries
)
6686 int ret
= 0, ret_code
;
6687 struct ust_app
*app
;
6688 struct ust_app_session
*ua_sess
;
6689 struct ust_registry_session
*registry
;
6690 uint64_t enum_id
= -1ULL;
6694 /* Lookup application. If not found, there is a code flow error. */
6695 app
= find_app_by_notify_sock(sock
);
6697 /* Return an error since this is not an error */
6698 DBG("Application socket %d is being torn down. Aborting enum registration",
6702 goto error_rcu_unlock
;
6705 /* Lookup session by UST object descriptor. */
6706 ua_sess
= find_session_by_objd(app
, sobjd
);
6708 /* Return an error since this is not an error */
6709 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6711 goto error_rcu_unlock
;
6714 registry
= get_session_registry(ua_sess
);
6716 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6718 goto error_rcu_unlock
;
6721 pthread_mutex_lock(®istry
->lock
);
6724 * From this point on, the callee acquires the ownership of
6725 * entries. The variable entries MUST NOT be read/written after
6728 ret_code
= ust_registry_create_or_find_enum(registry
, sobjd
, name
,
6729 entries
, nr_entries
, &enum_id
);
6733 * The return value is returned to ustctl so in case of an error, the
6734 * application can be notified. In case of an error, it's important not to
6735 * return a negative error or else the application will get closed.
6737 ret
= lttng_ust_ctl_reply_register_enum(sock
, enum_id
, ret_code
);
6739 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6740 DBG3("UST app reply enum failed. Application died: pid = %d, sock = %d",
6741 app
->pid
, app
->sock
);
6742 } else if (ret
== -EAGAIN
) {
6743 WARN("UST app reply enum failed. Communication time out: pid = %d, sock = %d",
6744 app
->pid
, app
->sock
);
6746 ERR("UST app reply enum failed with ret %d: pid = %d, sock = %d",
6747 ret
, app
->pid
, app
->sock
);
6750 * No need to wipe the create enum since the application socket will
6751 * get close on error hence cleaning up everything by itself.
6756 DBG3("UST registry enum %s added successfully or already found", name
);
6759 pthread_mutex_unlock(®istry
->lock
);
6766 * Handle application notification through the given notify socket.
6768 * Return 0 on success or else a negative value.
6770 int ust_app_recv_notify(int sock
)
6773 enum lttng_ust_ctl_notify_cmd cmd
;
6775 DBG3("UST app receiving notify from sock %d", sock
);
6777 ret
= lttng_ust_ctl_recv_notify(sock
, &cmd
);
6779 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6780 DBG3("UST app recv notify failed. Application died: sock = %d",
6782 } else if (ret
== -EAGAIN
) {
6783 WARN("UST app recv notify failed. Communication time out: sock = %d",
6786 ERR("UST app recv notify failed with ret %d: sock = %d",
6793 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT
:
6795 int sobjd
, cobjd
, loglevel_value
;
6796 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
], *sig
, *model_emf_uri
;
6798 struct lttng_ust_ctl_field
*fields
;
6800 DBG2("UST app ustctl register event received");
6802 ret
= lttng_ust_ctl_recv_register_event(sock
, &sobjd
, &cobjd
, name
,
6803 &loglevel_value
, &sig
, &nr_fields
, &fields
,
6806 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6807 DBG3("UST app recv event failed. Application died: sock = %d",
6809 } else if (ret
== -EAGAIN
) {
6810 WARN("UST app recv event failed. Communication time out: sock = %d",
6813 ERR("UST app recv event failed with ret %d: sock = %d",
6820 * Add event to the UST registry coming from the notify socket. This
6821 * call will free if needed the sig, fields and model_emf_uri. This
6822 * code path loses the ownsership of these variables and transfer them
6823 * to the this function.
6825 ret
= add_event_ust_registry(sock
, sobjd
, cobjd
, name
, sig
, nr_fields
,
6826 fields
, loglevel_value
, model_emf_uri
);
6833 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL
:
6837 struct lttng_ust_ctl_field
*fields
;
6839 DBG2("UST app ustctl register channel received");
6841 ret
= lttng_ust_ctl_recv_register_channel(sock
, &sobjd
, &cobjd
, &nr_fields
,
6844 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6845 DBG3("UST app recv channel failed. Application died: sock = %d",
6847 } else if (ret
== -EAGAIN
) {
6848 WARN("UST app recv channel failed. Communication time out: sock = %d",
6851 ERR("UST app recv channel failed with ret %d: sock = %d)",
6858 * The fields ownership are transfered to this function call meaning
6859 * that if needed it will be freed. After this, it's invalid to access
6860 * fields or clean it up.
6862 ret
= reply_ust_register_channel(sock
, cobjd
, nr_fields
,
6870 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM
:
6873 char name
[LTTNG_UST_ABI_SYM_NAME_LEN
];
6875 struct lttng_ust_ctl_enum_entry
*entries
;
6877 DBG2("UST app ustctl register enum received");
6879 ret
= lttng_ust_ctl_recv_register_enum(sock
, &sobjd
, name
,
6880 &entries
, &nr_entries
);
6882 if (ret
== -EPIPE
|| ret
== -LTTNG_UST_ERR_EXITING
) {
6883 DBG3("UST app recv enum failed. Application died: sock = %d",
6885 } else if (ret
== -EAGAIN
) {
6886 WARN("UST app recv enum failed. Communication time out: sock = %d",
6889 ERR("UST app recv enum failed with ret %d: sock = %d",
6895 /* Callee assumes ownership of entries */
6896 ret
= add_enum_ust_registry(sock
, sobjd
, name
,
6897 entries
, nr_entries
);
6905 /* Should NEVER happen. */
6914 * Once the notify socket hangs up, this is called. First, it tries to find the
6915 * corresponding application. On failure, the call_rcu to close the socket is
6916 * executed. If an application is found, it tries to delete it from the notify
6917 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6919 * Note that an object needs to be allocated here so on ENOMEM failure, the
6920 * call RCU is not done but the rest of the cleanup is.
6922 void ust_app_notify_sock_unregister(int sock
)
6925 struct lttng_ht_iter iter
;
6926 struct ust_app
*app
;
6927 struct ust_app_notify_sock_obj
*obj
;
6933 obj
= zmalloc(sizeof(*obj
));
6936 * An ENOMEM is kind of uncool. If this strikes we continue the
6937 * procedure but the call_rcu will not be called. In this case, we
6938 * accept the fd leak rather than possibly creating an unsynchronized
6939 * state between threads.
6941 * TODO: The notify object should be created once the notify socket is
6942 * registered and stored independantely from the ust app object. The
6943 * tricky part is to synchronize the teardown of the application and
6944 * this notify object. Let's keep that in mind so we can avoid this
6945 * kind of shenanigans with ENOMEM in the teardown path.
6952 DBG("UST app notify socket unregister %d", sock
);
6955 * Lookup application by notify socket. If this fails, this means that the
6956 * hash table delete has already been done by the application
6957 * unregistration process so we can safely close the notify socket in a
6960 app
= find_app_by_notify_sock(sock
);
6965 iter
.iter
.node
= &app
->notify_sock_n
.node
;
6968 * Whatever happens here either we fail or succeed, in both cases we have
6969 * to close the socket after a grace period to continue to the call RCU
6970 * here. If the deletion is successful, the application is not visible
6971 * anymore by other threads and is it fails it means that it was already
6972 * deleted from the hash table so either way we just have to close the
6975 (void) lttng_ht_del(ust_app_ht_by_notify_sock
, &iter
);
6981 * Close socket after a grace period to avoid for the socket to be reused
6982 * before the application object is freed creating potential race between
6983 * threads trying to add unique in the global hash table.
6986 call_rcu(&obj
->head
, close_notify_sock_rcu
);
6991 * Destroy a ust app data structure and free its memory.
6993 static void ust_app_destroy(struct ust_app
*app
)
6999 call_rcu(&app
->pid_n
.head
, delete_ust_app_rcu
);
7003 * Take a snapshot for a given UST session. The snapshot is sent to the given
7006 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
7008 enum lttng_error_code
ust_app_snapshot_record(
7009 const struct ltt_ust_session
*usess
,
7010 const struct consumer_output
*output
, int wait
,
7011 uint64_t nb_packets_per_stream
)
7014 enum lttng_error_code status
= LTTNG_OK
;
7015 struct lttng_ht_iter iter
;
7016 struct ust_app
*app
;
7017 char *trace_path
= NULL
;
7024 switch (usess
->buffer_type
) {
7025 case LTTNG_BUFFER_PER_UID
:
7027 struct buffer_reg_uid
*reg
;
7029 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7030 struct buffer_reg_channel
*buf_reg_chan
;
7031 struct consumer_socket
*socket
;
7032 char pathname
[PATH_MAX
];
7033 size_t consumer_path_offset
= 0;
7035 if (!reg
->registry
->reg
.ust
->metadata_key
) {
7036 /* Skip since no metadata is present */
7040 /* Get consumer socket to use to push the metadata.*/
7041 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7044 status
= LTTNG_ERR_INVALID
;
7048 memset(pathname
, 0, sizeof(pathname
));
7049 ret
= snprintf(pathname
, sizeof(pathname
),
7050 DEFAULT_UST_TRACE_UID_PATH
,
7051 reg
->uid
, reg
->bits_per_long
);
7053 PERROR("snprintf snapshot path");
7054 status
= LTTNG_ERR_INVALID
;
7057 /* Free path allowed on previous iteration. */
7059 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
7060 &consumer_path_offset
);
7062 status
= LTTNG_ERR_INVALID
;
7065 /* Add the UST default trace dir to path. */
7066 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7067 buf_reg_chan
, node
.node
) {
7068 status
= consumer_snapshot_channel(socket
,
7069 buf_reg_chan
->consumer_key
,
7070 output
, 0, usess
->uid
,
7071 usess
->gid
, &trace_path
[consumer_path_offset
], wait
,
7072 nb_packets_per_stream
);
7073 if (status
!= LTTNG_OK
) {
7077 status
= consumer_snapshot_channel(socket
,
7078 reg
->registry
->reg
.ust
->metadata_key
, output
, 1,
7079 usess
->uid
, usess
->gid
, &trace_path
[consumer_path_offset
],
7081 if (status
!= LTTNG_OK
) {
7087 case LTTNG_BUFFER_PER_PID
:
7089 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7090 struct consumer_socket
*socket
;
7091 struct lttng_ht_iter chan_iter
;
7092 struct ust_app_channel
*ua_chan
;
7093 struct ust_app_session
*ua_sess
;
7094 struct ust_registry_session
*registry
;
7095 char pathname
[PATH_MAX
];
7096 size_t consumer_path_offset
= 0;
7098 ua_sess
= lookup_session_by_app(usess
, app
);
7100 /* Session not associated with this app. */
7104 /* Get the right consumer socket for the application. */
7105 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7108 status
= LTTNG_ERR_INVALID
;
7112 /* Add the UST default trace dir to path. */
7113 memset(pathname
, 0, sizeof(pathname
));
7114 ret
= snprintf(pathname
, sizeof(pathname
), "%s",
7117 status
= LTTNG_ERR_INVALID
;
7118 PERROR("snprintf snapshot path");
7121 /* Free path allowed on previous iteration. */
7123 trace_path
= setup_channel_trace_path(usess
->consumer
, pathname
,
7124 &consumer_path_offset
);
7126 status
= LTTNG_ERR_INVALID
;
7129 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7130 ua_chan
, node
.node
) {
7131 status
= consumer_snapshot_channel(socket
,
7132 ua_chan
->key
, output
, 0,
7133 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7134 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7135 &trace_path
[consumer_path_offset
], wait
,
7136 nb_packets_per_stream
);
7140 case LTTNG_ERR_CHAN_NOT_FOUND
:
7147 registry
= get_session_registry(ua_sess
);
7149 DBG("Application session is being torn down. Skip application.");
7152 status
= consumer_snapshot_channel(socket
,
7153 registry
->metadata_key
, output
, 1,
7154 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7155 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7156 &trace_path
[consumer_path_offset
], wait
, 0);
7160 case LTTNG_ERR_CHAN_NOT_FOUND
:
7180 * Return the size taken by one more packet per stream.
7182 uint64_t ust_app_get_size_one_more_packet_per_stream(
7183 const struct ltt_ust_session
*usess
, uint64_t cur_nr_packets
)
7185 uint64_t tot_size
= 0;
7186 struct ust_app
*app
;
7187 struct lttng_ht_iter iter
;
7191 switch (usess
->buffer_type
) {
7192 case LTTNG_BUFFER_PER_UID
:
7194 struct buffer_reg_uid
*reg
;
7196 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7197 struct buffer_reg_channel
*buf_reg_chan
;
7200 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7201 buf_reg_chan
, node
.node
) {
7202 if (cur_nr_packets
>= buf_reg_chan
->num_subbuf
) {
7204 * Don't take channel into account if we
7205 * already grab all its packets.
7209 tot_size
+= buf_reg_chan
->subbuf_size
* buf_reg_chan
->stream_count
;
7215 case LTTNG_BUFFER_PER_PID
:
7218 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7219 struct ust_app_channel
*ua_chan
;
7220 struct ust_app_session
*ua_sess
;
7221 struct lttng_ht_iter chan_iter
;
7223 ua_sess
= lookup_session_by_app(usess
, app
);
7225 /* Session not associated with this app. */
7229 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7230 ua_chan
, node
.node
) {
7231 if (cur_nr_packets
>= ua_chan
->attr
.num_subbuf
) {
7233 * Don't take channel into account if we
7234 * already grab all its packets.
7238 tot_size
+= ua_chan
->attr
.subbuf_size
* ua_chan
->streams
.count
;
7252 int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id
,
7253 struct cds_list_head
*buffer_reg_uid_list
,
7254 struct consumer_output
*consumer
, uint64_t uchan_id
,
7255 int overwrite
, uint64_t *discarded
, uint64_t *lost
)
7258 uint64_t consumer_chan_key
;
7263 ret
= buffer_reg_uid_consumer_channel_key(
7264 buffer_reg_uid_list
, uchan_id
, &consumer_chan_key
);
7272 ret
= consumer_get_lost_packets(ust_session_id
,
7273 consumer_chan_key
, consumer
, lost
);
7275 ret
= consumer_get_discarded_events(ust_session_id
,
7276 consumer_chan_key
, consumer
, discarded
);
7283 int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session
*usess
,
7284 struct ltt_ust_channel
*uchan
,
7285 struct consumer_output
*consumer
, int overwrite
,
7286 uint64_t *discarded
, uint64_t *lost
)
7289 struct lttng_ht_iter iter
;
7290 struct lttng_ht_node_str
*ua_chan_node
;
7291 struct ust_app
*app
;
7292 struct ust_app_session
*ua_sess
;
7293 struct ust_app_channel
*ua_chan
;
7300 * Iterate over every registered applications. Sum counters for
7301 * all applications containing requested session and channel.
7303 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7304 struct lttng_ht_iter uiter
;
7306 ua_sess
= lookup_session_by_app(usess
, app
);
7307 if (ua_sess
== NULL
) {
7312 lttng_ht_lookup(ua_sess
->channels
, (void *) uchan
->name
, &uiter
);
7313 ua_chan_node
= lttng_ht_iter_get_node_str(&uiter
);
7314 /* If the session is found for the app, the channel must be there */
7315 assert(ua_chan_node
);
7317 ua_chan
= caa_container_of(ua_chan_node
, struct ust_app_channel
, node
);
7322 ret
= consumer_get_lost_packets(usess
->id
, ua_chan
->key
,
7329 uint64_t _discarded
;
7331 ret
= consumer_get_discarded_events(usess
->id
,
7332 ua_chan
->key
, consumer
, &_discarded
);
7336 (*discarded
) += _discarded
;
7345 int ust_app_regenerate_statedump(struct ltt_ust_session
*usess
,
7346 struct ust_app
*app
)
7349 struct ust_app_session
*ua_sess
;
7351 DBG("Regenerating the metadata for ust app pid %d", app
->pid
);
7355 ua_sess
= lookup_session_by_app(usess
, app
);
7356 if (ua_sess
== NULL
) {
7357 /* The session is in teardown process. Ignore and continue. */
7361 pthread_mutex_lock(&ua_sess
->lock
);
7363 if (ua_sess
->deleted
) {
7367 pthread_mutex_lock(&app
->sock_lock
);
7368 ret
= lttng_ust_ctl_regenerate_statedump(app
->sock
, ua_sess
->handle
);
7369 pthread_mutex_unlock(&app
->sock_lock
);
7372 pthread_mutex_unlock(&ua_sess
->lock
);
7376 health_code_update();
7381 * Regenerate the statedump for each app in the session.
7383 int ust_app_regenerate_statedump_all(struct ltt_ust_session
*usess
)
7386 struct lttng_ht_iter iter
;
7387 struct ust_app
*app
;
7389 DBG("Regenerating the metadata for all UST apps");
7393 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7394 if (!app
->compatible
) {
7398 ret
= ust_app_regenerate_statedump(usess
, app
);
7400 /* Continue to the next app even on error */
7411 * Rotate all the channels of a session.
7413 * Return LTTNG_OK on success or else an LTTng error code.
7415 enum lttng_error_code
ust_app_rotate_session(struct ltt_session
*session
)
7418 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7419 struct lttng_ht_iter iter
;
7420 struct ust_app
*app
= NULL
;
7421 struct ltt_ust_session
*usess
= session
->ust_session
;
7427 switch (usess
->buffer_type
) {
7428 case LTTNG_BUFFER_PER_UID
:
7430 struct buffer_reg_uid
*reg
;
7432 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7433 struct buffer_reg_channel
*buf_reg_chan
;
7434 struct consumer_socket
*socket
;
7436 /* Get consumer socket to use to push the metadata.*/
7437 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7440 cmd_ret
= LTTNG_ERR_INVALID
;
7444 /* Rotate the data channels. */
7445 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7446 buf_reg_chan
, node
.node
) {
7447 ret
= consumer_rotate_channel(socket
,
7448 buf_reg_chan
->consumer_key
,
7449 usess
->uid
, usess
->gid
,
7451 /* is_metadata_channel */ false);
7453 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7459 * The metadata channel might not be present.
7461 * Consumer stream allocation can be done
7462 * asynchronously and can fail on intermediary
7463 * operations (i.e add context) and lead to data
7464 * channels created with no metadata channel.
7466 if (!reg
->registry
->reg
.ust
->metadata_key
) {
7467 /* Skip since no metadata is present. */
7471 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7473 ret
= consumer_rotate_channel(socket
,
7474 reg
->registry
->reg
.ust
->metadata_key
,
7475 usess
->uid
, usess
->gid
,
7477 /* is_metadata_channel */ true);
7479 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7485 case LTTNG_BUFFER_PER_PID
:
7487 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7488 struct consumer_socket
*socket
;
7489 struct lttng_ht_iter chan_iter
;
7490 struct ust_app_channel
*ua_chan
;
7491 struct ust_app_session
*ua_sess
;
7492 struct ust_registry_session
*registry
;
7493 bool app_reference_taken
;
7495 app_reference_taken
= ust_app_get(app
);
7496 if (!app_reference_taken
) {
7497 /* Application unregistered concurrently, skip it. */
7498 DBG("Could not get application reference as it is being torn down; skipping application");
7502 ua_sess
= lookup_session_by_app(usess
, app
);
7504 /* Session not associated with this app. */
7510 /* Get the right consumer socket for the application. */
7511 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7514 cmd_ret
= LTTNG_ERR_INVALID
;
7519 registry
= get_session_registry(ua_sess
);
7522 /* Rotate the data channels. */
7523 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7524 ua_chan
, node
.node
) {
7525 ret
= consumer_rotate_channel(socket
,
7527 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7528 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7530 /* is_metadata_channel */ false);
7532 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7537 /* Rotate the metadata channel. */
7538 (void) push_metadata(registry
, usess
->consumer
);
7539 ret
= consumer_rotate_channel(socket
,
7540 registry
->metadata_key
,
7541 lttng_credentials_get_uid(&ua_sess
->effective_credentials
),
7542 lttng_credentials_get_gid(&ua_sess
->effective_credentials
),
7544 /* is_metadata_channel */ true);
7546 cmd_ret
= LTTNG_ERR_ROTATION_FAIL_CONSUMER
;
7570 enum lttng_error_code
ust_app_create_channel_subdirectories(
7571 const struct ltt_ust_session
*usess
)
7573 enum lttng_error_code ret
= LTTNG_OK
;
7574 struct lttng_ht_iter iter
;
7575 enum lttng_trace_chunk_status chunk_status
;
7576 char *pathname_index
;
7579 assert(usess
->current_trace_chunk
);
7582 switch (usess
->buffer_type
) {
7583 case LTTNG_BUFFER_PER_UID
:
7585 struct buffer_reg_uid
*reg
;
7587 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7588 fmt_ret
= asprintf(&pathname_index
,
7589 DEFAULT_UST_TRACE_DIR
"/" DEFAULT_UST_TRACE_UID_PATH
"/" DEFAULT_INDEX_DIR
,
7590 reg
->uid
, reg
->bits_per_long
);
7592 ERR("Failed to format channel index directory");
7593 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7598 * Create the index subdirectory which will take care
7599 * of implicitly creating the channel's path.
7601 chunk_status
= lttng_trace_chunk_create_subdirectory(
7602 usess
->current_trace_chunk
,
7604 free(pathname_index
);
7605 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7606 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7612 case LTTNG_BUFFER_PER_PID
:
7614 struct ust_app
*app
;
7617 * Create the toplevel ust/ directory in case no apps are running.
7619 chunk_status
= lttng_trace_chunk_create_subdirectory(
7620 usess
->current_trace_chunk
,
7621 DEFAULT_UST_TRACE_DIR
);
7622 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7623 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7627 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
,
7629 struct ust_app_session
*ua_sess
;
7630 struct ust_registry_session
*registry
;
7632 ua_sess
= lookup_session_by_app(usess
, app
);
7634 /* Session not associated with this app. */
7638 registry
= get_session_registry(ua_sess
);
7640 DBG("Application session is being torn down. Skip application.");
7644 fmt_ret
= asprintf(&pathname_index
,
7645 DEFAULT_UST_TRACE_DIR
"/%s/" DEFAULT_INDEX_DIR
,
7648 ERR("Failed to format channel index directory");
7649 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7653 * Create the index subdirectory which will take care
7654 * of implicitly creating the channel's path.
7656 chunk_status
= lttng_trace_chunk_create_subdirectory(
7657 usess
->current_trace_chunk
,
7659 free(pathname_index
);
7660 if (chunk_status
!= LTTNG_TRACE_CHUNK_STATUS_OK
) {
7661 ret
= LTTNG_ERR_CREATE_DIR_FAIL
;
7678 * Clear all the channels of a session.
7680 * Return LTTNG_OK on success or else an LTTng error code.
7682 enum lttng_error_code
ust_app_clear_session(struct ltt_session
*session
)
7685 enum lttng_error_code cmd_ret
= LTTNG_OK
;
7686 struct lttng_ht_iter iter
;
7687 struct ust_app
*app
;
7688 struct ltt_ust_session
*usess
= session
->ust_session
;
7694 if (usess
->active
) {
7695 ERR("Expecting inactive session %s (%" PRIu64
")", session
->name
, session
->id
);
7696 cmd_ret
= LTTNG_ERR_FATAL
;
7700 switch (usess
->buffer_type
) {
7701 case LTTNG_BUFFER_PER_UID
:
7703 struct buffer_reg_uid
*reg
;
7705 cds_list_for_each_entry(reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7706 struct buffer_reg_channel
*buf_reg_chan
;
7707 struct consumer_socket
*socket
;
7709 /* Get consumer socket to use to push the metadata.*/
7710 socket
= consumer_find_socket_by_bitness(reg
->bits_per_long
,
7713 cmd_ret
= LTTNG_ERR_INVALID
;
7717 /* Clear the data channels. */
7718 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
, &iter
.iter
,
7719 buf_reg_chan
, node
.node
) {
7720 ret
= consumer_clear_channel(socket
,
7721 buf_reg_chan
->consumer_key
);
7727 (void) push_metadata(reg
->registry
->reg
.ust
, usess
->consumer
);
7730 * Clear the metadata channel.
7731 * Metadata channel is not cleared per se but we still need to
7732 * perform a rotation operation on it behind the scene.
7734 ret
= consumer_clear_channel(socket
,
7735 reg
->registry
->reg
.ust
->metadata_key
);
7742 case LTTNG_BUFFER_PER_PID
:
7744 cds_lfht_for_each_entry(ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7745 struct consumer_socket
*socket
;
7746 struct lttng_ht_iter chan_iter
;
7747 struct ust_app_channel
*ua_chan
;
7748 struct ust_app_session
*ua_sess
;
7749 struct ust_registry_session
*registry
;
7751 ua_sess
= lookup_session_by_app(usess
, app
);
7753 /* Session not associated with this app. */
7757 /* Get the right consumer socket for the application. */
7758 socket
= consumer_find_socket_by_bitness(app
->bits_per_long
,
7761 cmd_ret
= LTTNG_ERR_INVALID
;
7765 registry
= get_session_registry(ua_sess
);
7767 DBG("Application session is being torn down. Skip application.");
7771 /* Clear the data channels. */
7772 cds_lfht_for_each_entry(ua_sess
->channels
->ht
, &chan_iter
.iter
,
7773 ua_chan
, node
.node
) {
7774 ret
= consumer_clear_channel(socket
, ua_chan
->key
);
7776 /* Per-PID buffer and application going away. */
7777 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7784 (void) push_metadata(registry
, usess
->consumer
);
7787 * Clear the metadata channel.
7788 * Metadata channel is not cleared per se but we still need to
7789 * perform rotation operation on it behind the scene.
7791 ret
= consumer_clear_channel(socket
, registry
->metadata_key
);
7793 /* Per-PID buffer and application going away. */
7794 if (ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7812 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED
:
7813 cmd_ret
= LTTNG_ERR_CLEAR_RELAY_DISALLOWED
;
7816 cmd_ret
= LTTNG_ERR_CLEAR_FAIL_CONSUMER
;
7826 * This function skips the metadata channel as the begin/end timestamps of a
7827 * metadata packet are useless.
7829 * Moreover, opening a packet after a "clear" will cause problems for live
7830 * sessions as it will introduce padding that was not part of the first trace
7831 * chunk. The relay daemon expects the content of the metadata stream of
7832 * successive metadata trace chunks to be strict supersets of one another.
7834 * For example, flushing a packet at the beginning of the metadata stream of
7835 * a trace chunk resulting from a "clear" session command will cause the
7836 * size of the metadata stream of the new trace chunk to not match the size of
7837 * the metadata stream of the original chunk. This will confuse the relay
7838 * daemon as the same "offset" in a metadata stream will no longer point
7839 * to the same content.
7841 enum lttng_error_code
ust_app_open_packets(struct ltt_session
*session
)
7843 enum lttng_error_code ret
= LTTNG_OK
;
7844 struct lttng_ht_iter iter
;
7845 struct ltt_ust_session
*usess
= session
->ust_session
;
7851 switch (usess
->buffer_type
) {
7852 case LTTNG_BUFFER_PER_UID
:
7854 struct buffer_reg_uid
*reg
;
7856 cds_list_for_each_entry (
7857 reg
, &usess
->buffer_reg_uid_list
, lnode
) {
7858 struct buffer_reg_channel
*buf_reg_chan
;
7859 struct consumer_socket
*socket
;
7861 socket
= consumer_find_socket_by_bitness(
7862 reg
->bits_per_long
, usess
->consumer
);
7864 ret
= LTTNG_ERR_FATAL
;
7868 cds_lfht_for_each_entry(reg
->registry
->channels
->ht
,
7869 &iter
.iter
, buf_reg_chan
, node
.node
) {
7870 const int open_ret
=
7871 consumer_open_channel_packets(
7873 buf_reg_chan
->consumer_key
);
7876 ret
= LTTNG_ERR_UNK
;
7883 case LTTNG_BUFFER_PER_PID
:
7885 struct ust_app
*app
;
7887 cds_lfht_for_each_entry (
7888 ust_app_ht
->ht
, &iter
.iter
, app
, pid_n
.node
) {
7889 struct consumer_socket
*socket
;
7890 struct lttng_ht_iter chan_iter
;
7891 struct ust_app_channel
*ua_chan
;
7892 struct ust_app_session
*ua_sess
;
7893 struct ust_registry_session
*registry
;
7895 ua_sess
= lookup_session_by_app(usess
, app
);
7897 /* Session not associated with this app. */
7901 /* Get the right consumer socket for the application. */
7902 socket
= consumer_find_socket_by_bitness(
7903 app
->bits_per_long
, usess
->consumer
);
7905 ret
= LTTNG_ERR_FATAL
;
7909 registry
= get_session_registry(ua_sess
);
7911 DBG("Application session is being torn down. Skip application.");
7915 cds_lfht_for_each_entry(ua_sess
->channels
->ht
,
7916 &chan_iter
.iter
, ua_chan
, node
.node
) {
7917 const int open_ret
=
7918 consumer_open_channel_packets(
7924 * Per-PID buffer and application going
7927 if (open_ret
== -LTTNG_ERR_CHAN_NOT_FOUND
) {
7931 ret
= LTTNG_ERR_UNK
;
7948 static void ust_app_release(struct urcu_ref
*ref
)
7950 struct ust_app
*app
= container_of(ref
, struct ust_app
, ref
);
7952 ust_app_unregister(app
);
7953 ust_app_destroy(app
);
7956 bool ust_app_get(struct ust_app
*app
)
7959 return urcu_ref_get_unless_zero(&app
->ref
);
7962 void ust_app_put(struct ust_app
*app
)
7968 urcu_ref_put(&app
->ref
, ust_app_release
);