Fix: snapshot path have domain subdir duplicate "ust/ust" or "kernel/kernel"
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9#define _LGPL_SOURCE
10#include <errno.h>
11#include <fcntl.h>
12#include <inttypes.h>
13#include <pthread.h>
14#include <stdio.h>
15#include <stdlib.h>
16#include <string.h>
17#include <sys/mman.h>
18#include <sys/stat.h>
19#include <sys/types.h>
20#include <unistd.h>
21#include <urcu/compiler.h>
22#include <signal.h>
23
24#include <common/bytecode/bytecode.h>
25#include <common/compat/errno.h>
26#include <common/common.h>
27#include <common/hashtable/utils.h>
28#include <lttng/event-rule/event-rule.h>
29#include <lttng/event-rule/event-rule-internal.h>
30#include <lttng/event-rule/tracepoint.h>
31#include <lttng/condition/condition.h>
32#include <lttng/condition/event-rule-matches-internal.h>
33#include <lttng/condition/event-rule-matches.h>
34#include <lttng/trigger/trigger-internal.h>
35#include <common/sessiond-comm/sessiond-comm.h>
36
37#include "buffer-registry.h"
38#include "condition-internal.h"
39#include "fd-limit.h"
40#include "health-sessiond.h"
41#include "ust-app.h"
42#include "ust-consumer.h"
43#include "lttng-ust-ctl.h"
44#include "lttng-ust-error.h"
45#include "utils.h"
46#include "session.h"
47#include "lttng-sessiond.h"
48#include "notification-thread-commands.h"
49#include "rotate.h"
50#include "event.h"
51#include "event-notifier-error-accounting.h"
52
53
54struct lttng_ht *ust_app_ht;
55struct lttng_ht *ust_app_ht_by_sock;
56struct lttng_ht *ust_app_ht_by_notify_sock;
57
58static
59int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
60
61/* Next available channel key. Access under next_channel_key_lock. */
62static uint64_t _next_channel_key;
63static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
64
65/* Next available session ID. Access under next_session_id_lock. */
66static uint64_t _next_session_id;
67static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
68
69/*
70 * Return the incremented value of next_channel_key.
71 */
72static uint64_t get_next_channel_key(void)
73{
74 uint64_t ret;
75
76 pthread_mutex_lock(&next_channel_key_lock);
77 ret = ++_next_channel_key;
78 pthread_mutex_unlock(&next_channel_key_lock);
79 return ret;
80}
81
82/*
83 * Return the atomically incremented value of next_session_id.
84 */
85static uint64_t get_next_session_id(void)
86{
87 uint64_t ret;
88
89 pthread_mutex_lock(&next_session_id_lock);
90 ret = ++_next_session_id;
91 pthread_mutex_unlock(&next_session_id_lock);
92 return ret;
93}
94
95static void copy_channel_attr_to_ustctl(
96 struct lttng_ust_ctl_consumer_channel_attr *attr,
97 struct lttng_ust_abi_channel_attr *uattr)
98{
99 /* Copy event attributes since the layout is different. */
100 attr->subbuf_size = uattr->subbuf_size;
101 attr->num_subbuf = uattr->num_subbuf;
102 attr->overwrite = uattr->overwrite;
103 attr->switch_timer_interval = uattr->switch_timer_interval;
104 attr->read_timer_interval = uattr->read_timer_interval;
105 attr->output = uattr->output;
106 attr->blocking_timeout = uattr->u.s.blocking_timeout;
107}
108
109/*
110 * Match function for the hash table lookup.
111 *
112 * It matches an ust app event based on three attributes which are the event
113 * name, the filter bytecode and the loglevel.
114 */
115static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
116{
117 struct ust_app_event *event;
118 const struct ust_app_ht_key *key;
119 int ev_loglevel_value;
120
121 assert(node);
122 assert(_key);
123
124 event = caa_container_of(node, struct ust_app_event, node.node);
125 key = _key;
126 ev_loglevel_value = event->attr.loglevel;
127
128 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
129
130 /* Event name */
131 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
132 goto no_match;
133 }
134
135 /* Event loglevel. */
136 if (ev_loglevel_value != key->loglevel_type) {
137 if (event->attr.loglevel_type == LTTNG_UST_ABI_LOGLEVEL_ALL
138 && key->loglevel_type == 0 &&
139 ev_loglevel_value == -1) {
140 /*
141 * Match is accepted. This is because on event creation, the
142 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
143 * -1 are accepted for this loglevel type since 0 is the one set by
144 * the API when receiving an enable event.
145 */
146 } else {
147 goto no_match;
148 }
149 }
150
151 /* One of the filters is NULL, fail. */
152 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
153 goto no_match;
154 }
155
156 if (key->filter && event->filter) {
157 /* Both filters exists, check length followed by the bytecode. */
158 if (event->filter->len != key->filter->len ||
159 memcmp(event->filter->data, key->filter->data,
160 event->filter->len) != 0) {
161 goto no_match;
162 }
163 }
164
165 /* One of the exclusions is NULL, fail. */
166 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
167 goto no_match;
168 }
169
170 if (key->exclusion && event->exclusion) {
171 /* Both exclusions exists, check count followed by the names. */
172 if (event->exclusion->count != key->exclusion->count ||
173 memcmp(event->exclusion->names, key->exclusion->names,
174 event->exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) != 0) {
175 goto no_match;
176 }
177 }
178
179
180 /* Match. */
181 return 1;
182
183no_match:
184 return 0;
185}
186
187/*
188 * Unique add of an ust app event in the given ht. This uses the custom
189 * ht_match_ust_app_event match function and the event name as hash.
190 */
191static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
192 struct ust_app_event *event)
193{
194 struct cds_lfht_node *node_ptr;
195 struct ust_app_ht_key key;
196 struct lttng_ht *ht;
197
198 assert(ua_chan);
199 assert(ua_chan->events);
200 assert(event);
201
202 ht = ua_chan->events;
203 key.name = event->attr.name;
204 key.filter = event->filter;
205 key.loglevel_type = event->attr.loglevel;
206 key.exclusion = event->exclusion;
207
208 node_ptr = cds_lfht_add_unique(ht->ht,
209 ht->hash_fct(event->node.key, lttng_ht_seed),
210 ht_match_ust_app_event, &key, &event->node.node);
211 assert(node_ptr == &event->node.node);
212}
213
214/*
215 * Close the notify socket from the given RCU head object. This MUST be called
216 * through a call_rcu().
217 */
218static void close_notify_sock_rcu(struct rcu_head *head)
219{
220 int ret;
221 struct ust_app_notify_sock_obj *obj =
222 caa_container_of(head, struct ust_app_notify_sock_obj, head);
223
224 /* Must have a valid fd here. */
225 assert(obj->fd >= 0);
226
227 ret = close(obj->fd);
228 if (ret) {
229 ERR("close notify sock %d RCU", obj->fd);
230 }
231 lttng_fd_put(LTTNG_FD_APPS, 1);
232
233 free(obj);
234}
235
236/*
237 * Return the session registry according to the buffer type of the given
238 * session.
239 *
240 * A registry per UID object MUST exists before calling this function or else
241 * it assert() if not found. RCU read side lock must be acquired.
242 */
243static struct ust_registry_session *get_session_registry(
244 struct ust_app_session *ua_sess)
245{
246 struct ust_registry_session *registry = NULL;
247
248 assert(ua_sess);
249
250 switch (ua_sess->buffer_type) {
251 case LTTNG_BUFFER_PER_PID:
252 {
253 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
254 if (!reg_pid) {
255 goto error;
256 }
257 registry = reg_pid->registry->reg.ust;
258 break;
259 }
260 case LTTNG_BUFFER_PER_UID:
261 {
262 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
263 ua_sess->tracing_id, ua_sess->bits_per_long,
264 lttng_credentials_get_uid(&ua_sess->real_credentials));
265 if (!reg_uid) {
266 goto error;
267 }
268 registry = reg_uid->registry->reg.ust;
269 break;
270 }
271 default:
272 assert(0);
273 };
274
275error:
276 return registry;
277}
278
279/*
280 * Delete ust context safely. RCU read lock must be held before calling
281 * this function.
282 */
283static
284void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
285 struct ust_app *app)
286{
287 int ret;
288
289 assert(ua_ctx);
290
291 if (ua_ctx->obj) {
292 pthread_mutex_lock(&app->sock_lock);
293 ret = lttng_ust_ctl_release_object(sock, ua_ctx->obj);
294 pthread_mutex_unlock(&app->sock_lock);
295 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
296 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
297 sock, ua_ctx->obj->handle, ret);
298 }
299 free(ua_ctx->obj);
300 }
301 free(ua_ctx);
302}
303
304/*
305 * Delete ust app event safely. RCU read lock must be held before calling
306 * this function.
307 */
308static
309void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
310 struct ust_app *app)
311{
312 int ret;
313
314 assert(ua_event);
315
316 free(ua_event->filter);
317 if (ua_event->exclusion != NULL)
318 free(ua_event->exclusion);
319 if (ua_event->obj != NULL) {
320 pthread_mutex_lock(&app->sock_lock);
321 ret = lttng_ust_ctl_release_object(sock, ua_event->obj);
322 pthread_mutex_unlock(&app->sock_lock);
323 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
324 ERR("UST app sock %d release event obj failed with ret %d",
325 sock, ret);
326 }
327 free(ua_event->obj);
328 }
329 free(ua_event);
330}
331
332/*
333 * Delayed reclaim of a ust_app_event_notifier_rule object. This MUST be called
334 * through a call_rcu().
335 */
336static
337void free_ust_app_event_notifier_rule_rcu(struct rcu_head *head)
338{
339 struct ust_app_event_notifier_rule *obj = caa_container_of(
340 head, struct ust_app_event_notifier_rule, rcu_head);
341
342 free(obj);
343}
344
345/*
346 * Delete ust app event notifier rule safely.
347 */
348static void delete_ust_app_event_notifier_rule(int sock,
349 struct ust_app_event_notifier_rule *ua_event_notifier_rule,
350 struct ust_app *app)
351{
352 int ret;
353
354 assert(ua_event_notifier_rule);
355
356 if (ua_event_notifier_rule->exclusion != NULL) {
357 free(ua_event_notifier_rule->exclusion);
358 }
359
360 if (ua_event_notifier_rule->obj != NULL) {
361 pthread_mutex_lock(&app->sock_lock);
362 ret = lttng_ust_ctl_release_object(sock, ua_event_notifier_rule->obj);
363 pthread_mutex_unlock(&app->sock_lock);
364 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
365 ERR("Failed to release event notifier object: app = '%s' (ppid %d), ret = %d",
366 app->name, (int) app->ppid, ret);
367 }
368
369 free(ua_event_notifier_rule->obj);
370 }
371
372 lttng_trigger_put(ua_event_notifier_rule->trigger);
373 call_rcu(&ua_event_notifier_rule->rcu_head,
374 free_ust_app_event_notifier_rule_rcu);
375}
376
377/*
378 * Release ust data object of the given stream.
379 *
380 * Return 0 on success or else a negative value.
381 */
382static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
383 struct ust_app *app)
384{
385 int ret = 0;
386
387 assert(stream);
388
389 if (stream->obj) {
390 pthread_mutex_lock(&app->sock_lock);
391 ret = lttng_ust_ctl_release_object(sock, stream->obj);
392 pthread_mutex_unlock(&app->sock_lock);
393 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
394 ERR("UST app sock %d release stream obj failed with ret %d",
395 sock, ret);
396 }
397 lttng_fd_put(LTTNG_FD_APPS, 2);
398 free(stream->obj);
399 }
400
401 return ret;
402}
403
404/*
405 * Delete ust app stream safely. RCU read lock must be held before calling
406 * this function.
407 */
408static
409void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
410 struct ust_app *app)
411{
412 assert(stream);
413
414 (void) release_ust_app_stream(sock, stream, app);
415 free(stream);
416}
417
418/*
419 * We need to execute ht_destroy outside of RCU read-side critical
420 * section and outside of call_rcu thread, so we postpone its execution
421 * using ht_cleanup_push. It is simpler than to change the semantic of
422 * the many callers of delete_ust_app_session().
423 */
424static
425void delete_ust_app_channel_rcu(struct rcu_head *head)
426{
427 struct ust_app_channel *ua_chan =
428 caa_container_of(head, struct ust_app_channel, rcu_head);
429
430 ht_cleanup_push(ua_chan->ctx);
431 ht_cleanup_push(ua_chan->events);
432 free(ua_chan);
433}
434
435/*
436 * Extract the lost packet or discarded events counter when the channel is
437 * being deleted and store the value in the parent channel so we can
438 * access it from lttng list and at stop/destroy.
439 *
440 * The session list lock must be held by the caller.
441 */
442static
443void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
444{
445 uint64_t discarded = 0, lost = 0;
446 struct ltt_session *session;
447 struct ltt_ust_channel *uchan;
448
449 if (ua_chan->attr.type != LTTNG_UST_ABI_CHAN_PER_CPU) {
450 return;
451 }
452
453 rcu_read_lock();
454 session = session_find_by_id(ua_chan->session->tracing_id);
455 if (!session || !session->ust_session) {
456 /*
457 * Not finding the session is not an error because there are
458 * multiple ways the channels can be torn down.
459 *
460 * 1) The session daemon can initiate the destruction of the
461 * ust app session after receiving a destroy command or
462 * during its shutdown/teardown.
463 * 2) The application, since we are in per-pid tracing, is
464 * unregistering and tearing down its ust app session.
465 *
466 * Both paths are protected by the session list lock which
467 * ensures that the accounting of lost packets and discarded
468 * events is done exactly once. The session is then unpublished
469 * from the session list, resulting in this condition.
470 */
471 goto end;
472 }
473
474 if (ua_chan->attr.overwrite) {
475 consumer_get_lost_packets(ua_chan->session->tracing_id,
476 ua_chan->key, session->ust_session->consumer,
477 &lost);
478 } else {
479 consumer_get_discarded_events(ua_chan->session->tracing_id,
480 ua_chan->key, session->ust_session->consumer,
481 &discarded);
482 }
483 uchan = trace_ust_find_channel_by_name(
484 session->ust_session->domain_global.channels,
485 ua_chan->name);
486 if (!uchan) {
487 ERR("Missing UST channel to store discarded counters");
488 goto end;
489 }
490
491 uchan->per_pid_closed_app_discarded += discarded;
492 uchan->per_pid_closed_app_lost += lost;
493
494end:
495 rcu_read_unlock();
496 if (session) {
497 session_put(session);
498 }
499}
500
501/*
502 * Delete ust app channel safely. RCU read lock must be held before calling
503 * this function.
504 *
505 * The session list lock must be held by the caller.
506 */
507static
508void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
509 struct ust_app *app)
510{
511 int ret;
512 struct lttng_ht_iter iter;
513 struct ust_app_event *ua_event;
514 struct ust_app_ctx *ua_ctx;
515 struct ust_app_stream *stream, *stmp;
516 struct ust_registry_session *registry;
517
518 assert(ua_chan);
519
520 DBG3("UST app deleting channel %s", ua_chan->name);
521
522 /* Wipe stream */
523 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
524 cds_list_del(&stream->list);
525 delete_ust_app_stream(sock, stream, app);
526 }
527
528 /* Wipe context */
529 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
530 cds_list_del(&ua_ctx->list);
531 ret = lttng_ht_del(ua_chan->ctx, &iter);
532 assert(!ret);
533 delete_ust_app_ctx(sock, ua_ctx, app);
534 }
535
536 /* Wipe events */
537 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
538 node.node) {
539 ret = lttng_ht_del(ua_chan->events, &iter);
540 assert(!ret);
541 delete_ust_app_event(sock, ua_event, app);
542 }
543
544 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
545 /* Wipe and free registry from session registry. */
546 registry = get_session_registry(ua_chan->session);
547 if (registry) {
548 ust_registry_channel_del_free(registry, ua_chan->key,
549 sock >= 0);
550 }
551 /*
552 * A negative socket can be used by the caller when
553 * cleaning-up a ua_chan in an error path. Skip the
554 * accounting in this case.
555 */
556 if (sock >= 0) {
557 save_per_pid_lost_discarded_counters(ua_chan);
558 }
559 }
560
561 if (ua_chan->obj != NULL) {
562 /* Remove channel from application UST object descriptor. */
563 iter.iter.node = &ua_chan->ust_objd_node.node;
564 ret = lttng_ht_del(app->ust_objd, &iter);
565 assert(!ret);
566 pthread_mutex_lock(&app->sock_lock);
567 ret = lttng_ust_ctl_release_object(sock, ua_chan->obj);
568 pthread_mutex_unlock(&app->sock_lock);
569 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
570 ERR("UST app sock %d release channel obj failed with ret %d",
571 sock, ret);
572 }
573 lttng_fd_put(LTTNG_FD_APPS, 1);
574 free(ua_chan->obj);
575 }
576 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
577}
578
579int ust_app_register_done(struct ust_app *app)
580{
581 int ret;
582
583 pthread_mutex_lock(&app->sock_lock);
584 ret = lttng_ust_ctl_register_done(app->sock);
585 pthread_mutex_unlock(&app->sock_lock);
586 return ret;
587}
588
589int ust_app_release_object(struct ust_app *app, struct lttng_ust_abi_object_data *data)
590{
591 int ret, sock;
592
593 if (app) {
594 pthread_mutex_lock(&app->sock_lock);
595 sock = app->sock;
596 } else {
597 sock = -1;
598 }
599 ret = lttng_ust_ctl_release_object(sock, data);
600 if (app) {
601 pthread_mutex_unlock(&app->sock_lock);
602 }
603 return ret;
604}
605
606/*
607 * Push metadata to consumer socket.
608 *
609 * RCU read-side lock must be held to guarantee existance of socket.
610 * Must be called with the ust app session lock held.
611 * Must be called with the registry lock held.
612 *
613 * On success, return the len of metadata pushed or else a negative value.
614 * Returning a -EPIPE return value means we could not send the metadata,
615 * but it can be caused by recoverable errors (e.g. the application has
616 * terminated concurrently).
617 */
618ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
619 struct consumer_socket *socket, int send_zero_data)
620{
621 int ret;
622 char *metadata_str = NULL;
623 size_t len, offset, new_metadata_len_sent;
624 ssize_t ret_val;
625 uint64_t metadata_key, metadata_version;
626
627 assert(registry);
628 assert(socket);
629
630 metadata_key = registry->metadata_key;
631
632 /*
633 * Means that no metadata was assigned to the session. This can
634 * happens if no start has been done previously.
635 */
636 if (!metadata_key) {
637 return 0;
638 }
639
640 offset = registry->metadata_len_sent;
641 len = registry->metadata_len - registry->metadata_len_sent;
642 new_metadata_len_sent = registry->metadata_len;
643 metadata_version = registry->metadata_version;
644 if (len == 0) {
645 DBG3("No metadata to push for metadata key %" PRIu64,
646 registry->metadata_key);
647 ret_val = len;
648 if (send_zero_data) {
649 DBG("No metadata to push");
650 goto push_data;
651 }
652 goto end;
653 }
654
655 /* Allocate only what we have to send. */
656 metadata_str = zmalloc(len);
657 if (!metadata_str) {
658 PERROR("zmalloc ust app metadata string");
659 ret_val = -ENOMEM;
660 goto error;
661 }
662 /* Copy what we haven't sent out. */
663 memcpy(metadata_str, registry->metadata + offset, len);
664
665push_data:
666 pthread_mutex_unlock(&registry->lock);
667 /*
668 * We need to unlock the registry while we push metadata to
669 * break a circular dependency between the consumerd metadata
670 * lock and the sessiond registry lock. Indeed, pushing metadata
671 * to the consumerd awaits that it gets pushed all the way to
672 * relayd, but doing so requires grabbing the metadata lock. If
673 * a concurrent metadata request is being performed by
674 * consumerd, this can try to grab the registry lock on the
675 * sessiond while holding the metadata lock on the consumer
676 * daemon. Those push and pull schemes are performed on two
677 * different bidirectionnal communication sockets.
678 */
679 ret = consumer_push_metadata(socket, metadata_key,
680 metadata_str, len, offset, metadata_version);
681 pthread_mutex_lock(&registry->lock);
682 if (ret < 0) {
683 /*
684 * There is an acceptable race here between the registry
685 * metadata key assignment and the creation on the
686 * consumer. The session daemon can concurrently push
687 * metadata for this registry while being created on the
688 * consumer since the metadata key of the registry is
689 * assigned *before* it is setup to avoid the consumer
690 * to ask for metadata that could possibly be not found
691 * in the session daemon.
692 *
693 * The metadata will get pushed either by the session
694 * being stopped or the consumer requesting metadata if
695 * that race is triggered.
696 */
697 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
698 ret = 0;
699 } else {
700 ERR("Error pushing metadata to consumer");
701 }
702 ret_val = ret;
703 goto error_push;
704 } else {
705 /*
706 * Metadata may have been concurrently pushed, since
707 * we're not holding the registry lock while pushing to
708 * consumer. This is handled by the fact that we send
709 * the metadata content, size, and the offset at which
710 * that metadata belongs. This may arrive out of order
711 * on the consumer side, and the consumer is able to
712 * deal with overlapping fragments. The consumer
713 * supports overlapping fragments, which must be
714 * contiguous starting from offset 0. We keep the
715 * largest metadata_len_sent value of the concurrent
716 * send.
717 */
718 registry->metadata_len_sent =
719 max_t(size_t, registry->metadata_len_sent,
720 new_metadata_len_sent);
721 }
722 free(metadata_str);
723 return len;
724
725end:
726error:
727 if (ret_val) {
728 /*
729 * On error, flag the registry that the metadata is
730 * closed. We were unable to push anything and this
731 * means that either the consumer is not responding or
732 * the metadata cache has been destroyed on the
733 * consumer.
734 */
735 registry->metadata_closed = 1;
736 }
737error_push:
738 free(metadata_str);
739 return ret_val;
740}
741
742/*
743 * For a given application and session, push metadata to consumer.
744 * Either sock or consumer is required : if sock is NULL, the default
745 * socket to send the metadata is retrieved from consumer, if sock
746 * is not NULL we use it to send the metadata.
747 * RCU read-side lock must be held while calling this function,
748 * therefore ensuring existance of registry. It also ensures existance
749 * of socket throughout this function.
750 *
751 * Return 0 on success else a negative error.
752 * Returning a -EPIPE return value means we could not send the metadata,
753 * but it can be caused by recoverable errors (e.g. the application has
754 * terminated concurrently).
755 */
756static int push_metadata(struct ust_registry_session *registry,
757 struct consumer_output *consumer)
758{
759 int ret_val;
760 ssize_t ret;
761 struct consumer_socket *socket;
762
763 assert(registry);
764 assert(consumer);
765
766 pthread_mutex_lock(&registry->lock);
767 if (registry->metadata_closed) {
768 ret_val = -EPIPE;
769 goto error;
770 }
771
772 /* Get consumer socket to use to push the metadata.*/
773 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
774 consumer);
775 if (!socket) {
776 ret_val = -1;
777 goto error;
778 }
779
780 ret = ust_app_push_metadata(registry, socket, 0);
781 if (ret < 0) {
782 ret_val = ret;
783 goto error;
784 }
785 pthread_mutex_unlock(&registry->lock);
786 return 0;
787
788error:
789 pthread_mutex_unlock(&registry->lock);
790 return ret_val;
791}
792
793/*
794 * Send to the consumer a close metadata command for the given session. Once
795 * done, the metadata channel is deleted and the session metadata pointer is
796 * nullified. The session lock MUST be held unless the application is
797 * in the destroy path.
798 *
799 * Do not hold the registry lock while communicating with the consumerd, because
800 * doing so causes inter-process deadlocks between consumerd and sessiond with
801 * the metadata request notification.
802 *
803 * Return 0 on success else a negative value.
804 */
805static int close_metadata(struct ust_registry_session *registry,
806 struct consumer_output *consumer)
807{
808 int ret;
809 struct consumer_socket *socket;
810 uint64_t metadata_key;
811 bool registry_was_already_closed;
812
813 assert(registry);
814 assert(consumer);
815
816 rcu_read_lock();
817
818 pthread_mutex_lock(&registry->lock);
819 metadata_key = registry->metadata_key;
820 registry_was_already_closed = registry->metadata_closed;
821 if (metadata_key != 0) {
822 /*
823 * Metadata closed. Even on error this means that the consumer
824 * is not responding or not found so either way a second close
825 * should NOT be emit for this registry.
826 */
827 registry->metadata_closed = 1;
828 }
829 pthread_mutex_unlock(&registry->lock);
830
831 if (metadata_key == 0 || registry_was_already_closed) {
832 ret = 0;
833 goto end;
834 }
835
836 /* Get consumer socket to use to push the metadata.*/
837 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
838 consumer);
839 if (!socket) {
840 ret = -1;
841 goto end;
842 }
843
844 ret = consumer_close_metadata(socket, metadata_key);
845 if (ret < 0) {
846 goto end;
847 }
848
849end:
850 rcu_read_unlock();
851 return ret;
852}
853
854/*
855 * We need to execute ht_destroy outside of RCU read-side critical
856 * section and outside of call_rcu thread, so we postpone its execution
857 * using ht_cleanup_push. It is simpler than to change the semantic of
858 * the many callers of delete_ust_app_session().
859 */
860static
861void delete_ust_app_session_rcu(struct rcu_head *head)
862{
863 struct ust_app_session *ua_sess =
864 caa_container_of(head, struct ust_app_session, rcu_head);
865
866 ht_cleanup_push(ua_sess->channels);
867 free(ua_sess);
868}
869
870/*
871 * Delete ust app session safely. RCU read lock must be held before calling
872 * this function.
873 *
874 * The session list lock must be held by the caller.
875 */
876static
877void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
878 struct ust_app *app)
879{
880 int ret;
881 struct lttng_ht_iter iter;
882 struct ust_app_channel *ua_chan;
883 struct ust_registry_session *registry;
884
885 assert(ua_sess);
886
887 pthread_mutex_lock(&ua_sess->lock);
888
889 assert(!ua_sess->deleted);
890 ua_sess->deleted = true;
891
892 registry = get_session_registry(ua_sess);
893 /* Registry can be null on error path during initialization. */
894 if (registry) {
895 /* Push metadata for application before freeing the application. */
896 (void) push_metadata(registry, ua_sess->consumer);
897
898 /*
899 * Don't ask to close metadata for global per UID buffers. Close
900 * metadata only on destroy trace session in this case. Also, the
901 * previous push metadata could have flag the metadata registry to
902 * close so don't send a close command if closed.
903 */
904 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
905 /* And ask to close it for this session registry. */
906 (void) close_metadata(registry, ua_sess->consumer);
907 }
908 }
909
910 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
911 node.node) {
912 ret = lttng_ht_del(ua_sess->channels, &iter);
913 assert(!ret);
914 delete_ust_app_channel(sock, ua_chan, app);
915 }
916
917 /* In case of per PID, the registry is kept in the session. */
918 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
919 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
920 if (reg_pid) {
921 /*
922 * Registry can be null on error path during
923 * initialization.
924 */
925 buffer_reg_pid_remove(reg_pid);
926 buffer_reg_pid_destroy(reg_pid);
927 }
928 }
929
930 if (ua_sess->handle != -1) {
931 pthread_mutex_lock(&app->sock_lock);
932 ret = lttng_ust_ctl_release_handle(sock, ua_sess->handle);
933 pthread_mutex_unlock(&app->sock_lock);
934 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
935 ERR("UST app sock %d release session handle failed with ret %d",
936 sock, ret);
937 }
938 /* Remove session from application UST object descriptor. */
939 iter.iter.node = &ua_sess->ust_objd_node.node;
940 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
941 assert(!ret);
942 }
943
944 pthread_mutex_unlock(&ua_sess->lock);
945
946 consumer_output_put(ua_sess->consumer);
947
948 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
949}
950
951/*
952 * Delete a traceable application structure from the global list. Never call
953 * this function outside of a call_rcu call.
954 *
955 * RCU read side lock should _NOT_ be held when calling this function.
956 */
957static
958void delete_ust_app(struct ust_app *app)
959{
960 int ret, sock;
961 struct ust_app_session *ua_sess, *tmp_ua_sess;
962 struct lttng_ht_iter iter;
963 struct ust_app_event_notifier_rule *event_notifier_rule;
964 bool event_notifier_write_fd_is_open;
965
966 /*
967 * The session list lock must be held during this function to guarantee
968 * the existence of ua_sess.
969 */
970 session_lock_list();
971 /* Delete ust app sessions info */
972 sock = app->sock;
973 app->sock = -1;
974
975 /* Wipe sessions */
976 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
977 teardown_node) {
978 /* Free every object in the session and the session. */
979 rcu_read_lock();
980 delete_ust_app_session(sock, ua_sess, app);
981 rcu_read_unlock();
982 }
983
984 /* Remove the event notifier rules associated with this app. */
985 rcu_read_lock();
986 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
987 &iter.iter, event_notifier_rule, node.node) {
988 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
989 assert(!ret);
990
991 delete_ust_app_event_notifier_rule(
992 app->sock, event_notifier_rule, app);
993 }
994
995 rcu_read_unlock();
996
997 ht_cleanup_push(app->sessions);
998 ht_cleanup_push(app->ust_sessions_objd);
999 ht_cleanup_push(app->ust_objd);
1000 ht_cleanup_push(app->token_to_event_notifier_rule_ht);
1001
1002 /*
1003 * This could be NULL if the event notifier setup failed (e.g the app
1004 * was killed or the tracer does not support this feature).
1005 */
1006 if (app->event_notifier_group.object) {
1007 enum lttng_error_code ret_code;
1008 enum event_notifier_error_accounting_status status;
1009
1010 const int event_notifier_read_fd = lttng_pipe_get_readfd(
1011 app->event_notifier_group.event_pipe);
1012
1013 ret_code = notification_thread_command_remove_tracer_event_source(
1014 the_notification_thread_handle,
1015 event_notifier_read_fd);
1016 if (ret_code != LTTNG_OK) {
1017 ERR("Failed to remove application tracer event source from notification thread");
1018 }
1019
1020 status = event_notifier_error_accounting_unregister_app(app);
1021 if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
1022 ERR("Error unregistering app from event notifier error accounting");
1023 }
1024
1025 lttng_ust_ctl_release_object(sock, app->event_notifier_group.object);
1026 free(app->event_notifier_group.object);
1027 }
1028
1029 event_notifier_write_fd_is_open = lttng_pipe_is_write_open(
1030 app->event_notifier_group.event_pipe);
1031 lttng_pipe_destroy(app->event_notifier_group.event_pipe);
1032 /*
1033 * Release the file descriptors reserved for the event notifier pipe.
1034 * The app could be destroyed before the write end of the pipe could be
1035 * passed to the application (and closed). In that case, both file
1036 * descriptors must be released.
1037 */
1038 lttng_fd_put(LTTNG_FD_APPS, event_notifier_write_fd_is_open ? 2 : 1);
1039
1040 /*
1041 * Wait until we have deleted the application from the sock hash table
1042 * before closing this socket, otherwise an application could re-use the
1043 * socket ID and race with the teardown, using the same hash table entry.
1044 *
1045 * It's OK to leave the close in call_rcu. We want it to stay unique for
1046 * all RCU readers that could run concurrently with unregister app,
1047 * therefore we _need_ to only close that socket after a grace period. So
1048 * it should stay in this RCU callback.
1049 *
1050 * This close() is a very important step of the synchronization model so
1051 * every modification to this function must be carefully reviewed.
1052 */
1053 ret = close(sock);
1054 if (ret) {
1055 PERROR("close");
1056 }
1057 lttng_fd_put(LTTNG_FD_APPS, 1);
1058
1059 DBG2("UST app pid %d deleted", app->pid);
1060 free(app);
1061 session_unlock_list();
1062}
1063
1064/*
1065 * URCU intermediate call to delete an UST app.
1066 */
1067static
1068void delete_ust_app_rcu(struct rcu_head *head)
1069{
1070 struct lttng_ht_node_ulong *node =
1071 caa_container_of(head, struct lttng_ht_node_ulong, head);
1072 struct ust_app *app =
1073 caa_container_of(node, struct ust_app, pid_n);
1074
1075 DBG3("Call RCU deleting app PID %d", app->pid);
1076 delete_ust_app(app);
1077}
1078
1079/*
1080 * Delete the session from the application ht and delete the data structure by
1081 * freeing every object inside and releasing them.
1082 *
1083 * The session list lock must be held by the caller.
1084 */
1085static void destroy_app_session(struct ust_app *app,
1086 struct ust_app_session *ua_sess)
1087{
1088 int ret;
1089 struct lttng_ht_iter iter;
1090
1091 assert(app);
1092 assert(ua_sess);
1093
1094 iter.iter.node = &ua_sess->node.node;
1095 ret = lttng_ht_del(app->sessions, &iter);
1096 if (ret) {
1097 /* Already scheduled for teardown. */
1098 goto end;
1099 }
1100
1101 /* Once deleted, free the data structure. */
1102 delete_ust_app_session(app->sock, ua_sess, app);
1103
1104end:
1105 return;
1106}
1107
1108/*
1109 * Alloc new UST app session.
1110 */
1111static
1112struct ust_app_session *alloc_ust_app_session(void)
1113{
1114 struct ust_app_session *ua_sess;
1115
1116 /* Init most of the default value by allocating and zeroing */
1117 ua_sess = zmalloc(sizeof(struct ust_app_session));
1118 if (ua_sess == NULL) {
1119 PERROR("malloc");
1120 goto error_free;
1121 }
1122
1123 ua_sess->handle = -1;
1124 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1125 ua_sess->metadata_attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1126 pthread_mutex_init(&ua_sess->lock, NULL);
1127
1128 return ua_sess;
1129
1130error_free:
1131 return NULL;
1132}
1133
1134/*
1135 * Alloc new UST app channel.
1136 */
1137static
1138struct ust_app_channel *alloc_ust_app_channel(const char *name,
1139 struct ust_app_session *ua_sess,
1140 struct lttng_ust_abi_channel_attr *attr)
1141{
1142 struct ust_app_channel *ua_chan;
1143
1144 /* Init most of the default value by allocating and zeroing */
1145 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1146 if (ua_chan == NULL) {
1147 PERROR("malloc");
1148 goto error;
1149 }
1150
1151 /* Setup channel name */
1152 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1153 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1154
1155 ua_chan->enabled = 1;
1156 ua_chan->handle = -1;
1157 ua_chan->session = ua_sess;
1158 ua_chan->key = get_next_channel_key();
1159 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1160 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1161 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
1162
1163 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
1164 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
1165
1166 /* Copy attributes */
1167 if (attr) {
1168 /* Translate from lttng_ust_channel to lttng_ust_ctl_consumer_channel_attr. */
1169 ua_chan->attr.subbuf_size = attr->subbuf_size;
1170 ua_chan->attr.num_subbuf = attr->num_subbuf;
1171 ua_chan->attr.overwrite = attr->overwrite;
1172 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1173 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1174 ua_chan->attr.output = attr->output;
1175 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
1176 }
1177 /* By default, the channel is a per cpu channel. */
1178 ua_chan->attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1179
1180 DBG3("UST app channel %s allocated", ua_chan->name);
1181
1182 return ua_chan;
1183
1184error:
1185 return NULL;
1186}
1187
1188/*
1189 * Allocate and initialize a UST app stream.
1190 *
1191 * Return newly allocated stream pointer or NULL on error.
1192 */
1193struct ust_app_stream *ust_app_alloc_stream(void)
1194{
1195 struct ust_app_stream *stream = NULL;
1196
1197 stream = zmalloc(sizeof(*stream));
1198 if (stream == NULL) {
1199 PERROR("zmalloc ust app stream");
1200 goto error;
1201 }
1202
1203 /* Zero could be a valid value for a handle so flag it to -1. */
1204 stream->handle = -1;
1205
1206error:
1207 return stream;
1208}
1209
1210/*
1211 * Alloc new UST app event.
1212 */
1213static
1214struct ust_app_event *alloc_ust_app_event(char *name,
1215 struct lttng_ust_abi_event *attr)
1216{
1217 struct ust_app_event *ua_event;
1218
1219 /* Init most of the default value by allocating and zeroing */
1220 ua_event = zmalloc(sizeof(struct ust_app_event));
1221 if (ua_event == NULL) {
1222 PERROR("Failed to allocate ust_app_event structure");
1223 goto error;
1224 }
1225
1226 ua_event->enabled = 1;
1227 strncpy(ua_event->name, name, sizeof(ua_event->name));
1228 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1229 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1230
1231 /* Copy attributes */
1232 if (attr) {
1233 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1234 }
1235
1236 DBG3("UST app event %s allocated", ua_event->name);
1237
1238 return ua_event;
1239
1240error:
1241 return NULL;
1242}
1243
1244/*
1245 * Allocate a new UST app event notifier rule.
1246 */
1247static struct ust_app_event_notifier_rule *alloc_ust_app_event_notifier_rule(
1248 struct lttng_trigger *trigger)
1249{
1250 enum lttng_event_rule_generate_exclusions_status
1251 generate_exclusion_status;
1252 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
1253 struct lttng_condition *condition = NULL;
1254 const struct lttng_event_rule *event_rule = NULL;
1255
1256 ua_event_notifier_rule = zmalloc(sizeof(struct ust_app_event_notifier_rule));
1257 if (ua_event_notifier_rule == NULL) {
1258 PERROR("Failed to allocate ust_app_event_notifier_rule structure");
1259 goto error;
1260 }
1261
1262 ua_event_notifier_rule->enabled = 1;
1263 ua_event_notifier_rule->token = lttng_trigger_get_tracer_token(trigger);
1264 lttng_ht_node_init_u64(&ua_event_notifier_rule->node,
1265 ua_event_notifier_rule->token);
1266
1267 condition = lttng_trigger_get_condition(trigger);
1268 assert(condition);
1269 assert(lttng_condition_get_type(condition) ==
1270 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
1271
1272 assert(LTTNG_CONDITION_STATUS_OK ==
1273 lttng_condition_event_rule_matches_get_rule(
1274 condition, &event_rule));
1275 assert(event_rule);
1276
1277 ua_event_notifier_rule->error_counter_index =
1278 lttng_condition_event_rule_matches_get_error_counter_index(condition);
1279 /* Acquire the event notifier's reference to the trigger. */
1280 lttng_trigger_get(trigger);
1281
1282 ua_event_notifier_rule->trigger = trigger;
1283 ua_event_notifier_rule->filter = lttng_event_rule_get_filter_bytecode(event_rule);
1284 generate_exclusion_status = lttng_event_rule_generate_exclusions(
1285 event_rule, &ua_event_notifier_rule->exclusion);
1286 switch (generate_exclusion_status) {
1287 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_OK:
1288 case LTTNG_EVENT_RULE_GENERATE_EXCLUSIONS_STATUS_NONE:
1289 break;
1290 default:
1291 /* Error occured. */
1292 ERR("Failed to generate exclusions from trigger while allocating an event notifier rule");
1293 goto error_put_trigger;
1294 }
1295
1296 DBG3("UST app event notifier rule allocated: token = %" PRIu64,
1297 ua_event_notifier_rule->token);
1298
1299 return ua_event_notifier_rule;
1300
1301error_put_trigger:
1302 lttng_trigger_put(trigger);
1303error:
1304 free(ua_event_notifier_rule);
1305 return NULL;
1306}
1307
1308/*
1309 * Alloc new UST app context.
1310 */
1311static
1312struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
1313{
1314 struct ust_app_ctx *ua_ctx;
1315
1316 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1317 if (ua_ctx == NULL) {
1318 goto error;
1319 }
1320
1321 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1322
1323 if (uctx) {
1324 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1325 if (uctx->ctx == LTTNG_UST_ABI_CONTEXT_APP_CONTEXT) {
1326 char *provider_name = NULL, *ctx_name = NULL;
1327
1328 provider_name = strdup(uctx->u.app_ctx.provider_name);
1329 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1330 if (!provider_name || !ctx_name) {
1331 free(provider_name);
1332 free(ctx_name);
1333 goto error;
1334 }
1335
1336 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1337 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1338 }
1339 }
1340
1341 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1342 return ua_ctx;
1343error:
1344 free(ua_ctx);
1345 return NULL;
1346}
1347
1348/*
1349 * Create a liblttng-ust filter bytecode from given bytecode.
1350 *
1351 * Return allocated filter or NULL on error.
1352 */
1353static struct lttng_ust_abi_filter_bytecode *create_ust_filter_bytecode_from_bytecode(
1354 const struct lttng_bytecode *orig_f)
1355{
1356 struct lttng_ust_abi_filter_bytecode *filter = NULL;
1357
1358 /* Copy filter bytecode. */
1359 filter = zmalloc(sizeof(*filter) + orig_f->len);
1360 if (!filter) {
1361 PERROR("Failed to allocate lttng_ust_filter_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1362 goto error;
1363 }
1364
1365 assert(sizeof(struct lttng_bytecode) ==
1366 sizeof(struct lttng_ust_abi_filter_bytecode));
1367 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1368error:
1369 return filter;
1370}
1371
1372/*
1373 * Create a liblttng-ust capture bytecode from given bytecode.
1374 *
1375 * Return allocated filter or NULL on error.
1376 */
1377static struct lttng_ust_abi_capture_bytecode *
1378create_ust_capture_bytecode_from_bytecode(const struct lttng_bytecode *orig_f)
1379{
1380 struct lttng_ust_abi_capture_bytecode *capture = NULL;
1381
1382 /* Copy capture bytecode. */
1383 capture = zmalloc(sizeof(*capture) + orig_f->len);
1384 if (!capture) {
1385 PERROR("Failed to allocate lttng_ust_abi_capture_bytecode: bytecode len = %" PRIu32 " bytes", orig_f->len);
1386 goto error;
1387 }
1388
1389 assert(sizeof(struct lttng_bytecode) ==
1390 sizeof(struct lttng_ust_abi_capture_bytecode));
1391 memcpy(capture, orig_f, sizeof(*capture) + orig_f->len);
1392error:
1393 return capture;
1394}
1395
1396/*
1397 * Find an ust_app using the sock and return it. RCU read side lock must be
1398 * held before calling this helper function.
1399 */
1400struct ust_app *ust_app_find_by_sock(int sock)
1401{
1402 struct lttng_ht_node_ulong *node;
1403 struct lttng_ht_iter iter;
1404
1405 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1406 node = lttng_ht_iter_get_node_ulong(&iter);
1407 if (node == NULL) {
1408 DBG2("UST app find by sock %d not found", sock);
1409 goto error;
1410 }
1411
1412 return caa_container_of(node, struct ust_app, sock_n);
1413
1414error:
1415 return NULL;
1416}
1417
1418/*
1419 * Find an ust_app using the notify sock and return it. RCU read side lock must
1420 * be held before calling this helper function.
1421 */
1422static struct ust_app *find_app_by_notify_sock(int sock)
1423{
1424 struct lttng_ht_node_ulong *node;
1425 struct lttng_ht_iter iter;
1426
1427 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1428 &iter);
1429 node = lttng_ht_iter_get_node_ulong(&iter);
1430 if (node == NULL) {
1431 DBG2("UST app find by notify sock %d not found", sock);
1432 goto error;
1433 }
1434
1435 return caa_container_of(node, struct ust_app, notify_sock_n);
1436
1437error:
1438 return NULL;
1439}
1440
1441/*
1442 * Lookup for an ust app event based on event name, filter bytecode and the
1443 * event loglevel.
1444 *
1445 * Return an ust_app_event object or NULL on error.
1446 */
1447static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1448 const char *name, const struct lttng_bytecode *filter,
1449 int loglevel_value,
1450 const struct lttng_event_exclusion *exclusion)
1451{
1452 struct lttng_ht_iter iter;
1453 struct lttng_ht_node_str *node;
1454 struct ust_app_event *event = NULL;
1455 struct ust_app_ht_key key;
1456
1457 assert(name);
1458 assert(ht);
1459
1460 /* Setup key for event lookup. */
1461 key.name = name;
1462 key.filter = filter;
1463 key.loglevel_type = loglevel_value;
1464 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1465 key.exclusion = exclusion;
1466
1467 /* Lookup using the event name as hash and a custom match fct. */
1468 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1469 ht_match_ust_app_event, &key, &iter.iter);
1470 node = lttng_ht_iter_get_node_str(&iter);
1471 if (node == NULL) {
1472 goto end;
1473 }
1474
1475 event = caa_container_of(node, struct ust_app_event, node);
1476
1477end:
1478 return event;
1479}
1480
1481/*
1482 * Look-up an event notifier rule based on its token id.
1483 *
1484 * Must be called with the RCU read lock held.
1485 * Return an ust_app_event_notifier_rule object or NULL on error.
1486 */
1487static struct ust_app_event_notifier_rule *find_ust_app_event_notifier_rule(
1488 struct lttng_ht *ht, uint64_t token)
1489{
1490 struct lttng_ht_iter iter;
1491 struct lttng_ht_node_u64 *node;
1492 struct ust_app_event_notifier_rule *event_notifier_rule = NULL;
1493
1494 assert(ht);
1495
1496 lttng_ht_lookup(ht, &token, &iter);
1497 node = lttng_ht_iter_get_node_u64(&iter);
1498 if (node == NULL) {
1499 DBG2("UST app event notifier rule token not found: token = %" PRIu64,
1500 token);
1501 goto end;
1502 }
1503
1504 event_notifier_rule = caa_container_of(
1505 node, struct ust_app_event_notifier_rule, node);
1506end:
1507 return event_notifier_rule;
1508}
1509
1510/*
1511 * Create the channel context on the tracer.
1512 *
1513 * Called with UST app session lock held.
1514 */
1515static
1516int create_ust_channel_context(struct ust_app_channel *ua_chan,
1517 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1518{
1519 int ret;
1520
1521 health_code_update();
1522
1523 pthread_mutex_lock(&app->sock_lock);
1524 ret = lttng_ust_ctl_add_context(app->sock, &ua_ctx->ctx,
1525 ua_chan->obj, &ua_ctx->obj);
1526 pthread_mutex_unlock(&app->sock_lock);
1527 if (ret < 0) {
1528 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1529 ERR("UST app create channel context failed for app (pid: %d) "
1530 "with ret %d", app->pid, ret);
1531 } else {
1532 /*
1533 * This is normal behavior, an application can die during the
1534 * creation process. Don't report an error so the execution can
1535 * continue normally.
1536 */
1537 ret = 0;
1538 DBG3("UST app add context failed. Application is dead.");
1539 }
1540 goto error;
1541 }
1542
1543 ua_ctx->handle = ua_ctx->obj->handle;
1544
1545 DBG2("UST app context handle %d created successfully for channel %s",
1546 ua_ctx->handle, ua_chan->name);
1547
1548error:
1549 health_code_update();
1550 return ret;
1551}
1552
1553/*
1554 * Set the filter on the tracer.
1555 */
1556static int set_ust_object_filter(struct ust_app *app,
1557 const struct lttng_bytecode *bytecode,
1558 struct lttng_ust_abi_object_data *ust_object)
1559{
1560 int ret;
1561 struct lttng_ust_abi_filter_bytecode *ust_bytecode = NULL;
1562
1563 health_code_update();
1564
1565 ust_bytecode = create_ust_filter_bytecode_from_bytecode(bytecode);
1566 if (!ust_bytecode) {
1567 ret = -LTTNG_ERR_NOMEM;
1568 goto error;
1569 }
1570 pthread_mutex_lock(&app->sock_lock);
1571 ret = lttng_ust_ctl_set_filter(app->sock, ust_bytecode,
1572 ust_object);
1573 pthread_mutex_unlock(&app->sock_lock);
1574 if (ret < 0) {
1575 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1576 ERR("UST app set object filter failed: object = %p of app pid = %d, ret = %d",
1577 ust_object, app->pid, ret);
1578 } else {
1579 /*
1580 * This is normal behavior, an application can die during the
1581 * creation process. Don't report an error so the execution can
1582 * continue normally.
1583 */
1584 ret = 0;
1585 DBG3("Failed to set UST app object filter. Application is dead.");
1586 }
1587 goto error;
1588 }
1589
1590 DBG2("UST filter successfully set: object = %p", ust_object);
1591
1592error:
1593 health_code_update();
1594 free(ust_bytecode);
1595 return ret;
1596}
1597
1598/*
1599 * Set a capture bytecode for the passed object.
1600 * The sequence number enforces the ordering at runtime and on reception of
1601 * the captured payloads.
1602 */
1603static int set_ust_capture(struct ust_app *app,
1604 const struct lttng_bytecode *bytecode,
1605 unsigned int capture_seqnum,
1606 struct lttng_ust_abi_object_data *ust_object)
1607{
1608 int ret;
1609 struct lttng_ust_abi_capture_bytecode *ust_bytecode = NULL;
1610
1611 health_code_update();
1612
1613 ust_bytecode = create_ust_capture_bytecode_from_bytecode(bytecode);
1614 if (!ust_bytecode) {
1615 ret = -LTTNG_ERR_NOMEM;
1616 goto error;
1617 }
1618
1619 /*
1620 * Set the sequence number to ensure the capture of fields is ordered.
1621 */
1622 ust_bytecode->seqnum = capture_seqnum;
1623
1624 pthread_mutex_lock(&app->sock_lock);
1625 ret = lttng_ust_ctl_set_capture(app->sock, ust_bytecode,
1626 ust_object);
1627 pthread_mutex_unlock(&app->sock_lock);
1628 if (ret < 0) {
1629 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1630 ERR("UST app set object capture failed: object = %p of app pid = %d, ret = %d",
1631 ust_object, app->pid, ret);
1632 } else {
1633 /*
1634 * This is normal behavior, an application can die during the
1635 * creation process. Don't report an error so the execution can
1636 * continue normally.
1637 */
1638 ret = 0;
1639 DBG3("Failed to set UST app object capture. Application is dead.");
1640 }
1641
1642 goto error;
1643 }
1644
1645 DBG2("UST capture successfully set: object = %p", ust_object);
1646
1647error:
1648 health_code_update();
1649 free(ust_bytecode);
1650 return ret;
1651}
1652
1653static
1654struct lttng_ust_abi_event_exclusion *create_ust_exclusion_from_exclusion(
1655 const struct lttng_event_exclusion *exclusion)
1656{
1657 struct lttng_ust_abi_event_exclusion *ust_exclusion = NULL;
1658 size_t exclusion_alloc_size = sizeof(struct lttng_ust_abi_event_exclusion) +
1659 LTTNG_UST_ABI_SYM_NAME_LEN * exclusion->count;
1660
1661 ust_exclusion = zmalloc(exclusion_alloc_size);
1662 if (!ust_exclusion) {
1663 PERROR("malloc");
1664 goto end;
1665 }
1666
1667 assert(sizeof(struct lttng_event_exclusion) ==
1668 sizeof(struct lttng_ust_abi_event_exclusion));
1669 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1670end:
1671 return ust_exclusion;
1672}
1673
1674/*
1675 * Set event exclusions on the tracer.
1676 */
1677static int set_ust_object_exclusions(struct ust_app *app,
1678 const struct lttng_event_exclusion *exclusions,
1679 struct lttng_ust_abi_object_data *ust_object)
1680{
1681 int ret;
1682 struct lttng_ust_abi_event_exclusion *ust_exclusions = NULL;
1683
1684 assert(exclusions && exclusions->count > 0);
1685
1686 health_code_update();
1687
1688 ust_exclusions = create_ust_exclusion_from_exclusion(
1689 exclusions);
1690 if (!ust_exclusions) {
1691 ret = -LTTNG_ERR_NOMEM;
1692 goto error;
1693 }
1694 pthread_mutex_lock(&app->sock_lock);
1695 ret = lttng_ust_ctl_set_exclusion(app->sock, ust_exclusions, ust_object);
1696 pthread_mutex_unlock(&app->sock_lock);
1697 if (ret < 0) {
1698 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1699 ERR("Failed to set UST app exclusions for object %p of app (pid: %d) "
1700 "with ret %d", ust_object, app->pid, ret);
1701 } else {
1702 /*
1703 * This is normal behavior, an application can die during the
1704 * creation process. Don't report an error so the execution can
1705 * continue normally.
1706 */
1707 ret = 0;
1708 DBG3("Failed to set UST app object exclusions. Application is dead.");
1709 }
1710 goto error;
1711 }
1712
1713 DBG2("UST exclusions set successfully for object %p", ust_object);
1714
1715error:
1716 health_code_update();
1717 free(ust_exclusions);
1718 return ret;
1719}
1720
1721/*
1722 * Disable the specified event on to UST tracer for the UST session.
1723 */
1724static int disable_ust_object(struct ust_app *app,
1725 struct lttng_ust_abi_object_data *object)
1726{
1727 int ret;
1728
1729 health_code_update();
1730
1731 pthread_mutex_lock(&app->sock_lock);
1732 ret = lttng_ust_ctl_disable(app->sock, object);
1733 pthread_mutex_unlock(&app->sock_lock);
1734 if (ret < 0) {
1735 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1736 ERR("Failed to disable UST app object %p app (pid: %d) with ret %d",
1737 object, app->pid, ret);
1738 } else {
1739 /*
1740 * This is normal behavior, an application can die during the
1741 * creation process. Don't report an error so the execution can
1742 * continue normally.
1743 */
1744 ret = 0;
1745 DBG3("Failed to disable UST app object. Application is dead.");
1746 }
1747 goto error;
1748 }
1749
1750 DBG2("UST app object %p disabled successfully for app (pid: %d)",
1751 object, app->pid);
1752
1753error:
1754 health_code_update();
1755 return ret;
1756}
1757
1758/*
1759 * Disable the specified channel on to UST tracer for the UST session.
1760 */
1761static int disable_ust_channel(struct ust_app *app,
1762 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1763{
1764 int ret;
1765
1766 health_code_update();
1767
1768 pthread_mutex_lock(&app->sock_lock);
1769 ret = lttng_ust_ctl_disable(app->sock, ua_chan->obj);
1770 pthread_mutex_unlock(&app->sock_lock);
1771 if (ret < 0) {
1772 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1773 ERR("UST app channel %s disable failed for app (pid: %d) "
1774 "and session handle %d with ret %d",
1775 ua_chan->name, app->pid, ua_sess->handle, ret);
1776 } else {
1777 /*
1778 * This is normal behavior, an application can die during the
1779 * creation process. Don't report an error so the execution can
1780 * continue normally.
1781 */
1782 ret = 0;
1783 DBG3("UST app disable channel failed. Application is dead.");
1784 }
1785 goto error;
1786 }
1787
1788 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1789 ua_chan->name, app->pid);
1790
1791error:
1792 health_code_update();
1793 return ret;
1794}
1795
1796/*
1797 * Enable the specified channel on to UST tracer for the UST session.
1798 */
1799static int enable_ust_channel(struct ust_app *app,
1800 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1801{
1802 int ret;
1803
1804 health_code_update();
1805
1806 pthread_mutex_lock(&app->sock_lock);
1807 ret = lttng_ust_ctl_enable(app->sock, ua_chan->obj);
1808 pthread_mutex_unlock(&app->sock_lock);
1809 if (ret < 0) {
1810 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1811 ERR("UST app channel %s enable failed for app (pid: %d) "
1812 "and session handle %d with ret %d",
1813 ua_chan->name, app->pid, ua_sess->handle, ret);
1814 } else {
1815 /*
1816 * This is normal behavior, an application can die during the
1817 * creation process. Don't report an error so the execution can
1818 * continue normally.
1819 */
1820 ret = 0;
1821 DBG3("UST app enable channel failed. Application is dead.");
1822 }
1823 goto error;
1824 }
1825
1826 ua_chan->enabled = 1;
1827
1828 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1829 ua_chan->name, app->pid);
1830
1831error:
1832 health_code_update();
1833 return ret;
1834}
1835
1836/*
1837 * Enable the specified event on to UST tracer for the UST session.
1838 */
1839static int enable_ust_object(
1840 struct ust_app *app, struct lttng_ust_abi_object_data *ust_object)
1841{
1842 int ret;
1843
1844 health_code_update();
1845
1846 pthread_mutex_lock(&app->sock_lock);
1847 ret = lttng_ust_ctl_enable(app->sock, ust_object);
1848 pthread_mutex_unlock(&app->sock_lock);
1849 if (ret < 0) {
1850 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1851 ERR("UST app enable failed for object %p app (pid: %d) with ret %d",
1852 ust_object, app->pid, ret);
1853 } else {
1854 /*
1855 * This is normal behavior, an application can die during the
1856 * creation process. Don't report an error so the execution can
1857 * continue normally.
1858 */
1859 ret = 0;
1860 DBG3("Failed to enable UST app object. Application is dead.");
1861 }
1862 goto error;
1863 }
1864
1865 DBG2("UST app object %p enabled successfully for app (pid: %d)",
1866 ust_object, app->pid);
1867
1868error:
1869 health_code_update();
1870 return ret;
1871}
1872
1873/*
1874 * Send channel and stream buffer to application.
1875 *
1876 * Return 0 on success. On error, a negative value is returned.
1877 */
1878static int send_channel_pid_to_ust(struct ust_app *app,
1879 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1880{
1881 int ret;
1882 struct ust_app_stream *stream, *stmp;
1883
1884 assert(app);
1885 assert(ua_sess);
1886 assert(ua_chan);
1887
1888 health_code_update();
1889
1890 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1891 app->sock);
1892
1893 /* Send channel to the application. */
1894 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1895 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1896 ret = -ENOTCONN; /* Caused by app exiting. */
1897 goto error;
1898 } else if (ret < 0) {
1899 goto error;
1900 }
1901
1902 health_code_update();
1903
1904 /* Send all streams to application. */
1905 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1906 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1907 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1908 ret = -ENOTCONN; /* Caused by app exiting. */
1909 goto error;
1910 } else if (ret < 0) {
1911 goto error;
1912 }
1913 /* We don't need the stream anymore once sent to the tracer. */
1914 cds_list_del(&stream->list);
1915 delete_ust_app_stream(-1, stream, app);
1916 }
1917 /* Flag the channel that it is sent to the application. */
1918 ua_chan->is_sent = 1;
1919
1920error:
1921 health_code_update();
1922 return ret;
1923}
1924
1925/*
1926 * Create the specified event onto the UST tracer for a UST session.
1927 *
1928 * Should be called with session mutex held.
1929 */
1930static
1931int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1932 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1933{
1934 int ret = 0;
1935
1936 health_code_update();
1937
1938 /* Create UST event on tracer */
1939 pthread_mutex_lock(&app->sock_lock);
1940 ret = lttng_ust_ctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1941 &ua_event->obj);
1942 pthread_mutex_unlock(&app->sock_lock);
1943 if (ret < 0) {
1944 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1945 abort();
1946 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1947 ua_event->attr.name, app->pid, ret);
1948 } else {
1949 /*
1950 * This is normal behavior, an application can die during the
1951 * creation process. Don't report an error so the execution can
1952 * continue normally.
1953 */
1954 ret = 0;
1955 DBG3("UST app create event failed. Application is dead.");
1956 }
1957 goto error;
1958 }
1959
1960 ua_event->handle = ua_event->obj->handle;
1961
1962 DBG2("UST app event %s created successfully for pid:%d object: %p",
1963 ua_event->attr.name, app->pid, ua_event->obj);
1964
1965 health_code_update();
1966
1967 /* Set filter if one is present. */
1968 if (ua_event->filter) {
1969 ret = set_ust_object_filter(app, ua_event->filter, ua_event->obj);
1970 if (ret < 0) {
1971 goto error;
1972 }
1973 }
1974
1975 /* Set exclusions for the event */
1976 if (ua_event->exclusion) {
1977 ret = set_ust_object_exclusions(app, ua_event->exclusion, ua_event->obj);
1978 if (ret < 0) {
1979 goto error;
1980 }
1981 }
1982
1983 /* If event not enabled, disable it on the tracer */
1984 if (ua_event->enabled) {
1985 /*
1986 * We now need to explicitly enable the event, since it
1987 * is now disabled at creation.
1988 */
1989 ret = enable_ust_object(app, ua_event->obj);
1990 if (ret < 0) {
1991 /*
1992 * If we hit an EPERM, something is wrong with our enable call. If
1993 * we get an EEXIST, there is a problem on the tracer side since we
1994 * just created it.
1995 */
1996 switch (ret) {
1997 case -LTTNG_UST_ERR_PERM:
1998 /* Code flow problem */
1999 assert(0);
2000 case -LTTNG_UST_ERR_EXIST:
2001 /* It's OK for our use case. */
2002 ret = 0;
2003 break;
2004 default:
2005 break;
2006 }
2007 goto error;
2008 }
2009 }
2010
2011error:
2012 health_code_update();
2013 return ret;
2014}
2015
2016static int init_ust_event_notifier_from_event_rule(
2017 const struct lttng_event_rule *rule,
2018 struct lttng_ust_abi_event_notifier *event_notifier)
2019{
2020 enum lttng_event_rule_status status;
2021 enum lttng_ust_abi_loglevel_type ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2022 int loglevel = -1, ret = 0;
2023 const char *pattern;
2024
2025 /* For now only LTTNG_EVENT_RULE_TYPE_TRACEPOINT are supported. */
2026 assert(lttng_event_rule_get_type(rule) ==
2027 LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2028
2029 memset(event_notifier, 0, sizeof(*event_notifier));
2030
2031 if (lttng_event_rule_targets_agent_domain(rule)) {
2032 /*
2033 * Special event for agents
2034 * The actual meat of the event is in the filter that will be
2035 * attached later on.
2036 * Set the default values for the agent event.
2037 */
2038 pattern = event_get_default_agent_ust_name(
2039 lttng_event_rule_get_domain_type(rule));
2040 loglevel = 0;
2041 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2042 } else {
2043 const struct lttng_log_level_rule *log_level_rule;
2044
2045 status = lttng_event_rule_tracepoint_get_pattern(rule, &pattern);
2046 if (status != LTTNG_EVENT_RULE_STATUS_OK) {
2047 /* At this point, this is a fatal error. */
2048 abort();
2049 }
2050
2051 status = lttng_event_rule_tracepoint_get_log_level_rule(
2052 rule, &log_level_rule);
2053 if (status == LTTNG_EVENT_RULE_STATUS_UNSET) {
2054 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_ALL;
2055 } else if (status == LTTNG_EVENT_RULE_STATUS_OK) {
2056 enum lttng_log_level_rule_status llr_status;
2057
2058 switch (lttng_log_level_rule_get_type(log_level_rule)) {
2059 case LTTNG_LOG_LEVEL_RULE_TYPE_EXACTLY:
2060 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_SINGLE;
2061 llr_status = lttng_log_level_rule_exactly_get_level(
2062 log_level_rule, &loglevel);
2063 break;
2064 case LTTNG_LOG_LEVEL_RULE_TYPE_AT_LEAST_AS_SEVERE_AS:
2065 ust_loglevel_type = LTTNG_UST_ABI_LOGLEVEL_RANGE;
2066 llr_status = lttng_log_level_rule_at_least_as_severe_as_get_level(
2067 log_level_rule, &loglevel);
2068 break;
2069 default:
2070 abort();
2071 }
2072
2073 assert(llr_status == LTTNG_LOG_LEVEL_RULE_STATUS_OK);
2074 } else {
2075 /* At this point this is a fatal error. */
2076 abort();
2077 }
2078 }
2079
2080 event_notifier->event.instrumentation = LTTNG_UST_ABI_TRACEPOINT;
2081 ret = lttng_strncpy(event_notifier->event.name, pattern,
2082 LTTNG_UST_ABI_SYM_NAME_LEN - 1);
2083 if (ret) {
2084 ERR("Failed to copy event rule pattern to notifier: pattern = '%s' ",
2085 pattern);
2086 goto end;
2087 }
2088
2089 event_notifier->event.loglevel_type = ust_loglevel_type;
2090 event_notifier->event.loglevel = loglevel;
2091end:
2092 return ret;
2093}
2094
2095/*
2096 * Create the specified event notifier against the user space tracer of a
2097 * given application.
2098 */
2099static int create_ust_event_notifier(struct ust_app *app,
2100 struct ust_app_event_notifier_rule *ua_event_notifier_rule)
2101{
2102 int ret = 0;
2103 enum lttng_condition_status condition_status;
2104 const struct lttng_condition *condition = NULL;
2105 struct lttng_ust_abi_event_notifier event_notifier;
2106 const struct lttng_event_rule *event_rule = NULL;
2107 unsigned int capture_bytecode_count = 0, i;
2108 enum lttng_condition_status cond_status;
2109
2110 health_code_update();
2111 assert(app->event_notifier_group.object);
2112
2113 condition = lttng_trigger_get_const_condition(
2114 ua_event_notifier_rule->trigger);
2115 assert(condition);
2116 assert(lttng_condition_get_type(condition) ==
2117 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
2118
2119 condition_status = lttng_condition_event_rule_matches_get_rule(
2120 condition, &event_rule);
2121 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
2122
2123 assert(event_rule);
2124 assert(lttng_event_rule_get_type(event_rule) == LTTNG_EVENT_RULE_TYPE_TRACEPOINT);
2125
2126 init_ust_event_notifier_from_event_rule(event_rule, &event_notifier);
2127 event_notifier.event.token = ua_event_notifier_rule->token;
2128 event_notifier.error_counter_index = ua_event_notifier_rule->error_counter_index;
2129
2130 /* Create UST event notifier against the tracer. */
2131 pthread_mutex_lock(&app->sock_lock);
2132 ret = lttng_ust_ctl_create_event_notifier(app->sock, &event_notifier,
2133 app->event_notifier_group.object,
2134 &ua_event_notifier_rule->obj);
2135 pthread_mutex_unlock(&app->sock_lock);
2136 if (ret < 0) {
2137 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2138 ERR("Error ustctl create event notifier: name = '%s', app = '%s' (ppid: %d), ret = %d",
2139 event_notifier.event.name, app->name,
2140 app->ppid, ret);
2141 } else {
2142 /*
2143 * This is normal behavior, an application can die
2144 * during the creation process. Don't report an error so
2145 * the execution can continue normally.
2146 */
2147 ret = 0;
2148 DBG3("UST app create event notifier failed (application is dead): app = '%s' (ppid = %d)",
2149 app->name, app->ppid);
2150 }
2151
2152 goto error;
2153 }
2154
2155 ua_event_notifier_rule->handle = ua_event_notifier_rule->obj->handle;
2156
2157 DBG2("UST app event notifier %s created successfully: app = '%s' (ppid: %d), object: %p",
2158 event_notifier.event.name, app->name, app->ppid,
2159 ua_event_notifier_rule->obj);
2160
2161 health_code_update();
2162
2163 /* Set filter if one is present. */
2164 if (ua_event_notifier_rule->filter) {
2165 ret = set_ust_object_filter(app, ua_event_notifier_rule->filter,
2166 ua_event_notifier_rule->obj);
2167 if (ret < 0) {
2168 goto error;
2169 }
2170 }
2171
2172 /* Set exclusions for the event. */
2173 if (ua_event_notifier_rule->exclusion) {
2174 ret = set_ust_object_exclusions(app,
2175 ua_event_notifier_rule->exclusion,
2176 ua_event_notifier_rule->obj);
2177 if (ret < 0) {
2178 goto error;
2179 }
2180 }
2181
2182 /* Set the capture bytecodes. */
2183 cond_status = lttng_condition_event_rule_matches_get_capture_descriptor_count(
2184 condition, &capture_bytecode_count);
2185 assert(cond_status == LTTNG_CONDITION_STATUS_OK);
2186
2187 for (i = 0; i < capture_bytecode_count; i++) {
2188 const struct lttng_bytecode *capture_bytecode =
2189 lttng_condition_event_rule_matches_get_capture_bytecode_at_index(
2190 condition, i);
2191
2192 ret = set_ust_capture(app, capture_bytecode, i,
2193 ua_event_notifier_rule->obj);
2194 if (ret < 0) {
2195 goto error;
2196 }
2197 }
2198
2199 /*
2200 * We now need to explicitly enable the event, since it
2201 * is disabled at creation.
2202 */
2203 ret = enable_ust_object(app, ua_event_notifier_rule->obj);
2204 if (ret < 0) {
2205 /*
2206 * If we hit an EPERM, something is wrong with our enable call.
2207 * If we get an EEXIST, there is a problem on the tracer side
2208 * since we just created it.
2209 */
2210 switch (ret) {
2211 case -LTTNG_UST_ERR_PERM:
2212 /* Code flow problem. */
2213 abort();
2214 case -LTTNG_UST_ERR_EXIST:
2215 /* It's OK for our use case. */
2216 ret = 0;
2217 break;
2218 default:
2219 break;
2220 }
2221
2222 goto error;
2223 }
2224
2225 ua_event_notifier_rule->enabled = true;
2226
2227error:
2228 health_code_update();
2229 return ret;
2230}
2231
2232/*
2233 * Copy data between an UST app event and a LTT event.
2234 */
2235static void shadow_copy_event(struct ust_app_event *ua_event,
2236 struct ltt_ust_event *uevent)
2237{
2238 size_t exclusion_alloc_size;
2239
2240 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
2241 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
2242
2243 ua_event->enabled = uevent->enabled;
2244
2245 /* Copy event attributes */
2246 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
2247
2248 /* Copy filter bytecode */
2249 if (uevent->filter) {
2250 ua_event->filter = lttng_bytecode_copy(uevent->filter);
2251 /* Filter might be NULL here in case of ENONEM. */
2252 }
2253
2254 /* Copy exclusion data */
2255 if (uevent->exclusion) {
2256 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
2257 LTTNG_UST_ABI_SYM_NAME_LEN * uevent->exclusion->count;
2258 ua_event->exclusion = zmalloc(exclusion_alloc_size);
2259 if (ua_event->exclusion == NULL) {
2260 PERROR("malloc");
2261 } else {
2262 memcpy(ua_event->exclusion, uevent->exclusion,
2263 exclusion_alloc_size);
2264 }
2265 }
2266}
2267
2268/*
2269 * Copy data between an UST app channel and a LTT channel.
2270 */
2271static void shadow_copy_channel(struct ust_app_channel *ua_chan,
2272 struct ltt_ust_channel *uchan)
2273{
2274 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
2275
2276 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
2277 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
2278
2279 ua_chan->tracefile_size = uchan->tracefile_size;
2280 ua_chan->tracefile_count = uchan->tracefile_count;
2281
2282 /* Copy event attributes since the layout is different. */
2283 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
2284 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
2285 ua_chan->attr.overwrite = uchan->attr.overwrite;
2286 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
2287 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
2288 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
2289 ua_chan->attr.output = uchan->attr.output;
2290 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
2291
2292 /*
2293 * Note that the attribute channel type is not set since the channel on the
2294 * tracing registry side does not have this information.
2295 */
2296
2297 ua_chan->enabled = uchan->enabled;
2298 ua_chan->tracing_channel_id = uchan->id;
2299
2300 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
2301}
2302
2303/*
2304 * Copy data between a UST app session and a regular LTT session.
2305 */
2306static void shadow_copy_session(struct ust_app_session *ua_sess,
2307 struct ltt_ust_session *usess, struct ust_app *app)
2308{
2309 struct tm *timeinfo;
2310 char datetime[16];
2311 int ret;
2312 char tmp_shm_path[PATH_MAX];
2313
2314 timeinfo = localtime(&app->registration_time);
2315 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
2316
2317 DBG2("Shadow copy of session handle %d", ua_sess->handle);
2318
2319 ua_sess->tracing_id = usess->id;
2320 ua_sess->id = get_next_session_id();
2321 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.uid, app->uid);
2322 LTTNG_OPTIONAL_SET(&ua_sess->real_credentials.gid, app->gid);
2323 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.uid, usess->uid);
2324 LTTNG_OPTIONAL_SET(&ua_sess->effective_credentials.gid, usess->gid);
2325 ua_sess->buffer_type = usess->buffer_type;
2326 ua_sess->bits_per_long = app->bits_per_long;
2327
2328 /* There is only one consumer object per session possible. */
2329 consumer_output_get(usess->consumer);
2330 ua_sess->consumer = usess->consumer;
2331
2332 ua_sess->output_traces = usess->output_traces;
2333 ua_sess->live_timer_interval = usess->live_timer_interval;
2334 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
2335 &usess->metadata_attr);
2336
2337 switch (ua_sess->buffer_type) {
2338 case LTTNG_BUFFER_PER_PID:
2339 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2340 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
2341 datetime);
2342 break;
2343 case LTTNG_BUFFER_PER_UID:
2344 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
2345 DEFAULT_UST_TRACE_UID_PATH,
2346 lttng_credentials_get_uid(&ua_sess->real_credentials),
2347 app->bits_per_long);
2348 break;
2349 default:
2350 assert(0);
2351 goto error;
2352 }
2353 if (ret < 0) {
2354 PERROR("asprintf UST shadow copy session");
2355 assert(0);
2356 goto error;
2357 }
2358
2359 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
2360 sizeof(ua_sess->root_shm_path));
2361 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
2362 strncpy(ua_sess->shm_path, usess->shm_path,
2363 sizeof(ua_sess->shm_path));
2364 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2365 if (ua_sess->shm_path[0]) {
2366 switch (ua_sess->buffer_type) {
2367 case LTTNG_BUFFER_PER_PID:
2368 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2369 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
2370 app->name, app->pid, datetime);
2371 break;
2372 case LTTNG_BUFFER_PER_UID:
2373 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
2374 "/" DEFAULT_UST_TRACE_UID_PATH,
2375 app->uid, app->bits_per_long);
2376 break;
2377 default:
2378 assert(0);
2379 goto error;
2380 }
2381 if (ret < 0) {
2382 PERROR("sprintf UST shadow copy session");
2383 assert(0);
2384 goto error;
2385 }
2386 strncat(ua_sess->shm_path, tmp_shm_path,
2387 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
2388 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
2389 }
2390 return;
2391
2392error:
2393 consumer_output_put(ua_sess->consumer);
2394}
2395
2396/*
2397 * Lookup sesison wrapper.
2398 */
2399static
2400void __lookup_session_by_app(const struct ltt_ust_session *usess,
2401 struct ust_app *app, struct lttng_ht_iter *iter)
2402{
2403 /* Get right UST app session from app */
2404 lttng_ht_lookup(app->sessions, &usess->id, iter);
2405}
2406
2407/*
2408 * Return ust app session from the app session hashtable using the UST session
2409 * id.
2410 */
2411static struct ust_app_session *lookup_session_by_app(
2412 const struct ltt_ust_session *usess, struct ust_app *app)
2413{
2414 struct lttng_ht_iter iter;
2415 struct lttng_ht_node_u64 *node;
2416
2417 __lookup_session_by_app(usess, app, &iter);
2418 node = lttng_ht_iter_get_node_u64(&iter);
2419 if (node == NULL) {
2420 goto error;
2421 }
2422
2423 return caa_container_of(node, struct ust_app_session, node);
2424
2425error:
2426 return NULL;
2427}
2428
2429/*
2430 * Setup buffer registry per PID for the given session and application. If none
2431 * is found, a new one is created, added to the global registry and
2432 * initialized. If regp is valid, it's set with the newly created object.
2433 *
2434 * Return 0 on success or else a negative value.
2435 */
2436static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
2437 struct ust_app *app, struct buffer_reg_pid **regp)
2438{
2439 int ret = 0;
2440 struct buffer_reg_pid *reg_pid;
2441
2442 assert(ua_sess);
2443 assert(app);
2444
2445 rcu_read_lock();
2446
2447 reg_pid = buffer_reg_pid_find(ua_sess->id);
2448 if (!reg_pid) {
2449 /*
2450 * This is the create channel path meaning that if there is NO
2451 * registry available, we have to create one for this session.
2452 */
2453 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
2454 ua_sess->root_shm_path, ua_sess->shm_path);
2455 if (ret < 0) {
2456 goto error;
2457 }
2458 } else {
2459 goto end;
2460 }
2461
2462 /* Initialize registry. */
2463 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2464 app->bits_per_long, app->uint8_t_alignment,
2465 app->uint16_t_alignment, app->uint32_t_alignment,
2466 app->uint64_t_alignment, app->long_alignment,
2467 app->byte_order, app->version.major, app->version.minor,
2468 reg_pid->root_shm_path, reg_pid->shm_path,
2469 lttng_credentials_get_uid(&ua_sess->effective_credentials),
2470 lttng_credentials_get_gid(&ua_sess->effective_credentials),
2471 ua_sess->tracing_id,
2472 app->uid);
2473 if (ret < 0) {
2474 /*
2475 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2476 * destroy the buffer registry, because it is always expected
2477 * that if the buffer registry can be found, its ust registry is
2478 * non-NULL.
2479 */
2480 buffer_reg_pid_destroy(reg_pid);
2481 goto error;
2482 }
2483
2484 buffer_reg_pid_add(reg_pid);
2485
2486 DBG3("UST app buffer registry per PID created successfully");
2487
2488end:
2489 if (regp) {
2490 *regp = reg_pid;
2491 }
2492error:
2493 rcu_read_unlock();
2494 return ret;
2495}
2496
2497/*
2498 * Setup buffer registry per UID for the given session and application. If none
2499 * is found, a new one is created, added to the global registry and
2500 * initialized. If regp is valid, it's set with the newly created object.
2501 *
2502 * Return 0 on success or else a negative value.
2503 */
2504static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
2505 struct ust_app_session *ua_sess,
2506 struct ust_app *app, struct buffer_reg_uid **regp)
2507{
2508 int ret = 0;
2509 struct buffer_reg_uid *reg_uid;
2510
2511 assert(usess);
2512 assert(app);
2513
2514 rcu_read_lock();
2515
2516 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2517 if (!reg_uid) {
2518 /*
2519 * This is the create channel path meaning that if there is NO
2520 * registry available, we have to create one for this session.
2521 */
2522 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
2523 LTTNG_DOMAIN_UST, &reg_uid,
2524 ua_sess->root_shm_path, ua_sess->shm_path);
2525 if (ret < 0) {
2526 goto error;
2527 }
2528 } else {
2529 goto end;
2530 }
2531
2532 /* Initialize registry. */
2533 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2534 app->bits_per_long, app->uint8_t_alignment,
2535 app->uint16_t_alignment, app->uint32_t_alignment,
2536 app->uint64_t_alignment, app->long_alignment,
2537 app->byte_order, app->version.major,
2538 app->version.minor, reg_uid->root_shm_path,
2539 reg_uid->shm_path, usess->uid, usess->gid,
2540 ua_sess->tracing_id, app->uid);
2541 if (ret < 0) {
2542 /*
2543 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2544 * destroy the buffer registry, because it is always expected
2545 * that if the buffer registry can be found, its ust registry is
2546 * non-NULL.
2547 */
2548 buffer_reg_uid_destroy(reg_uid, NULL);
2549 goto error;
2550 }
2551 /* Add node to teardown list of the session. */
2552 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2553
2554 buffer_reg_uid_add(reg_uid);
2555
2556 DBG3("UST app buffer registry per UID created successfully");
2557end:
2558 if (regp) {
2559 *regp = reg_uid;
2560 }
2561error:
2562 rcu_read_unlock();
2563 return ret;
2564}
2565
2566/*
2567 * Create a session on the tracer side for the given app.
2568 *
2569 * On success, ua_sess_ptr is populated with the session pointer or else left
2570 * untouched. If the session was created, is_created is set to 1. On error,
2571 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2572 * be NULL.
2573 *
2574 * Returns 0 on success or else a negative code which is either -ENOMEM or
2575 * -ENOTCONN which is the default code if the lttng_ust_ctl_create_session fails.
2576 */
2577static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
2578 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2579 int *is_created)
2580{
2581 int ret, created = 0;
2582 struct ust_app_session *ua_sess;
2583
2584 assert(usess);
2585 assert(app);
2586 assert(ua_sess_ptr);
2587
2588 health_code_update();
2589
2590 ua_sess = lookup_session_by_app(usess, app);
2591 if (ua_sess == NULL) {
2592 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2593 app->pid, usess->id);
2594 ua_sess = alloc_ust_app_session();
2595 if (ua_sess == NULL) {
2596 /* Only malloc can failed so something is really wrong */
2597 ret = -ENOMEM;
2598 goto error;
2599 }
2600 shadow_copy_session(ua_sess, usess, app);
2601 created = 1;
2602 }
2603
2604 switch (usess->buffer_type) {
2605 case LTTNG_BUFFER_PER_PID:
2606 /* Init local registry. */
2607 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2608 if (ret < 0) {
2609 delete_ust_app_session(-1, ua_sess, app);
2610 goto error;
2611 }
2612 break;
2613 case LTTNG_BUFFER_PER_UID:
2614 /* Look for a global registry. If none exists, create one. */
2615 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2616 if (ret < 0) {
2617 delete_ust_app_session(-1, ua_sess, app);
2618 goto error;
2619 }
2620 break;
2621 default:
2622 assert(0);
2623 ret = -EINVAL;
2624 goto error;
2625 }
2626
2627 health_code_update();
2628
2629 if (ua_sess->handle == -1) {
2630 pthread_mutex_lock(&app->sock_lock);
2631 ret = lttng_ust_ctl_create_session(app->sock);
2632 pthread_mutex_unlock(&app->sock_lock);
2633 if (ret < 0) {
2634 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2635 ERR("Creating session for app pid %d with ret %d",
2636 app->pid, ret);
2637 } else {
2638 DBG("UST app creating session failed. Application is dead");
2639 /*
2640 * This is normal behavior, an application can die during the
2641 * creation process. Don't report an error so the execution can
2642 * continue normally. This will get flagged ENOTCONN and the
2643 * caller will handle it.
2644 */
2645 ret = 0;
2646 }
2647 delete_ust_app_session(-1, ua_sess, app);
2648 if (ret != -ENOMEM) {
2649 /*
2650 * Tracer is probably gone or got an internal error so let's
2651 * behave like it will soon unregister or not usable.
2652 */
2653 ret = -ENOTCONN;
2654 }
2655 goto error;
2656 }
2657
2658 ua_sess->handle = ret;
2659
2660 /* Add ust app session to app's HT */
2661 lttng_ht_node_init_u64(&ua_sess->node,
2662 ua_sess->tracing_id);
2663 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2664 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2665 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2666 &ua_sess->ust_objd_node);
2667
2668 DBG2("UST app session created successfully with handle %d", ret);
2669 }
2670
2671 *ua_sess_ptr = ua_sess;
2672 if (is_created) {
2673 *is_created = created;
2674 }
2675
2676 /* Everything went well. */
2677 ret = 0;
2678
2679error:
2680 health_code_update();
2681 return ret;
2682}
2683
2684/*
2685 * Match function for a hash table lookup of ust_app_ctx.
2686 *
2687 * It matches an ust app context based on the context type and, in the case
2688 * of perf counters, their name.
2689 */
2690static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2691{
2692 struct ust_app_ctx *ctx;
2693 const struct lttng_ust_context_attr *key;
2694
2695 assert(node);
2696 assert(_key);
2697
2698 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2699 key = _key;
2700
2701 /* Context type */
2702 if (ctx->ctx.ctx != key->ctx) {
2703 goto no_match;
2704 }
2705
2706 switch(key->ctx) {
2707 case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
2708 if (strncmp(key->u.perf_counter.name,
2709 ctx->ctx.u.perf_counter.name,
2710 sizeof(key->u.perf_counter.name))) {
2711 goto no_match;
2712 }
2713 break;
2714 case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
2715 if (strcmp(key->u.app_ctx.provider_name,
2716 ctx->ctx.u.app_ctx.provider_name) ||
2717 strcmp(key->u.app_ctx.ctx_name,
2718 ctx->ctx.u.app_ctx.ctx_name)) {
2719 goto no_match;
2720 }
2721 break;
2722 default:
2723 break;
2724 }
2725
2726 /* Match. */
2727 return 1;
2728
2729no_match:
2730 return 0;
2731}
2732
2733/*
2734 * Lookup for an ust app context from an lttng_ust_context.
2735 *
2736 * Must be called while holding RCU read side lock.
2737 * Return an ust_app_ctx object or NULL on error.
2738 */
2739static
2740struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2741 struct lttng_ust_context_attr *uctx)
2742{
2743 struct lttng_ht_iter iter;
2744 struct lttng_ht_node_ulong *node;
2745 struct ust_app_ctx *app_ctx = NULL;
2746
2747 assert(uctx);
2748 assert(ht);
2749
2750 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2751 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2752 ht_match_ust_app_ctx, uctx, &iter.iter);
2753 node = lttng_ht_iter_get_node_ulong(&iter);
2754 if (!node) {
2755 goto end;
2756 }
2757
2758 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2759
2760end:
2761 return app_ctx;
2762}
2763
2764/*
2765 * Create a context for the channel on the tracer.
2766 *
2767 * Called with UST app session lock held and a RCU read side lock.
2768 */
2769static
2770int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
2771 struct lttng_ust_context_attr *uctx,
2772 struct ust_app *app)
2773{
2774 int ret = 0;
2775 struct ust_app_ctx *ua_ctx;
2776
2777 DBG2("UST app adding context to channel %s", ua_chan->name);
2778
2779 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2780 if (ua_ctx) {
2781 ret = -EEXIST;
2782 goto error;
2783 }
2784
2785 ua_ctx = alloc_ust_app_ctx(uctx);
2786 if (ua_ctx == NULL) {
2787 /* malloc failed */
2788 ret = -ENOMEM;
2789 goto error;
2790 }
2791
2792 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2793 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2794 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2795
2796 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2797 if (ret < 0) {
2798 goto error;
2799 }
2800
2801error:
2802 return ret;
2803}
2804
2805/*
2806 * Enable on the tracer side a ust app event for the session and channel.
2807 *
2808 * Called with UST app session lock held.
2809 */
2810static
2811int enable_ust_app_event(struct ust_app_session *ua_sess,
2812 struct ust_app_event *ua_event, struct ust_app *app)
2813{
2814 int ret;
2815
2816 ret = enable_ust_object(app, ua_event->obj);
2817 if (ret < 0) {
2818 goto error;
2819 }
2820
2821 ua_event->enabled = 1;
2822
2823error:
2824 return ret;
2825}
2826
2827/*
2828 * Disable on the tracer side a ust app event for the session and channel.
2829 */
2830static int disable_ust_app_event(struct ust_app_session *ua_sess,
2831 struct ust_app_event *ua_event, struct ust_app *app)
2832{
2833 int ret;
2834
2835 ret = disable_ust_object(app, ua_event->obj);
2836 if (ret < 0) {
2837 goto error;
2838 }
2839
2840 ua_event->enabled = 0;
2841
2842error:
2843 return ret;
2844}
2845
2846/*
2847 * Lookup ust app channel for session and disable it on the tracer side.
2848 */
2849static
2850int disable_ust_app_channel(struct ust_app_session *ua_sess,
2851 struct ust_app_channel *ua_chan, struct ust_app *app)
2852{
2853 int ret;
2854
2855 ret = disable_ust_channel(app, ua_sess, ua_chan);
2856 if (ret < 0) {
2857 goto error;
2858 }
2859
2860 ua_chan->enabled = 0;
2861
2862error:
2863 return ret;
2864}
2865
2866/*
2867 * Lookup ust app channel for session and enable it on the tracer side. This
2868 * MUST be called with a RCU read side lock acquired.
2869 */
2870static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2871 struct ltt_ust_channel *uchan, struct ust_app *app)
2872{
2873 int ret = 0;
2874 struct lttng_ht_iter iter;
2875 struct lttng_ht_node_str *ua_chan_node;
2876 struct ust_app_channel *ua_chan;
2877
2878 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2879 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2880 if (ua_chan_node == NULL) {
2881 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2882 uchan->name, ua_sess->tracing_id);
2883 goto error;
2884 }
2885
2886 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2887
2888 ret = enable_ust_channel(app, ua_sess, ua_chan);
2889 if (ret < 0) {
2890 goto error;
2891 }
2892
2893error:
2894 return ret;
2895}
2896
2897/*
2898 * Ask the consumer to create a channel and get it if successful.
2899 *
2900 * Called with UST app session lock held.
2901 *
2902 * Return 0 on success or else a negative value.
2903 */
2904static int do_consumer_create_channel(struct ltt_ust_session *usess,
2905 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2906 int bitness, struct ust_registry_session *registry,
2907 uint64_t trace_archive_id)
2908{
2909 int ret;
2910 unsigned int nb_fd = 0;
2911 struct consumer_socket *socket;
2912
2913 assert(usess);
2914 assert(ua_sess);
2915 assert(ua_chan);
2916 assert(registry);
2917
2918 rcu_read_lock();
2919 health_code_update();
2920
2921 /* Get the right consumer socket for the application. */
2922 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2923 if (!socket) {
2924 ret = -EINVAL;
2925 goto error;
2926 }
2927
2928 health_code_update();
2929
2930 /* Need one fd for the channel. */
2931 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2932 if (ret < 0) {
2933 ERR("Exhausted number of available FD upon create channel");
2934 goto error;
2935 }
2936
2937 /*
2938 * Ask consumer to create channel. The consumer will return the number of
2939 * stream we have to expect.
2940 */
2941 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2942 registry, usess->current_trace_chunk);
2943 if (ret < 0) {
2944 goto error_ask;
2945 }
2946
2947 /*
2948 * Compute the number of fd needed before receiving them. It must be 2 per
2949 * stream (2 being the default value here).
2950 */
2951 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2952
2953 /* Reserve the amount of file descriptor we need. */
2954 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2955 if (ret < 0) {
2956 ERR("Exhausted number of available FD upon create channel");
2957 goto error_fd_get_stream;
2958 }
2959
2960 health_code_update();
2961
2962 /*
2963 * Now get the channel from the consumer. This call will populate the stream
2964 * list of that channel and set the ust objects.
2965 */
2966 if (usess->consumer->enabled) {
2967 ret = ust_consumer_get_channel(socket, ua_chan);
2968 if (ret < 0) {
2969 goto error_destroy;
2970 }
2971 }
2972
2973 rcu_read_unlock();
2974 return 0;
2975
2976error_destroy:
2977 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2978error_fd_get_stream:
2979 /*
2980 * Initiate a destroy channel on the consumer since we had an error
2981 * handling it on our side. The return value is of no importance since we
2982 * already have a ret value set by the previous error that we need to
2983 * return.
2984 */
2985 (void) ust_consumer_destroy_channel(socket, ua_chan);
2986error_ask:
2987 lttng_fd_put(LTTNG_FD_APPS, 1);
2988error:
2989 health_code_update();
2990 rcu_read_unlock();
2991 return ret;
2992}
2993
2994/*
2995 * Duplicate the ust data object of the ust app stream and save it in the
2996 * buffer registry stream.
2997 *
2998 * Return 0 on success or else a negative value.
2999 */
3000static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
3001 struct ust_app_stream *stream)
3002{
3003 int ret;
3004
3005 assert(reg_stream);
3006 assert(stream);
3007
3008 /* Reserve the amount of file descriptor we need. */
3009 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3010 if (ret < 0) {
3011 ERR("Exhausted number of available FD upon duplicate stream");
3012 goto error;
3013 }
3014
3015 /* Duplicate object for stream once the original is in the registry. */
3016 ret = lttng_ust_ctl_duplicate_ust_object_data(&stream->obj,
3017 reg_stream->obj.ust);
3018 if (ret < 0) {
3019 ERR("Duplicate stream obj from %p to %p failed with ret %d",
3020 reg_stream->obj.ust, stream->obj, ret);
3021 lttng_fd_put(LTTNG_FD_APPS, 2);
3022 goto error;
3023 }
3024 stream->handle = stream->obj->handle;
3025
3026error:
3027 return ret;
3028}
3029
3030/*
3031 * Duplicate the ust data object of the ust app. channel and save it in the
3032 * buffer registry channel.
3033 *
3034 * Return 0 on success or else a negative value.
3035 */
3036static int duplicate_channel_object(struct buffer_reg_channel *buf_reg_chan,
3037 struct ust_app_channel *ua_chan)
3038{
3039 int ret;
3040
3041 assert(buf_reg_chan);
3042 assert(ua_chan);
3043
3044 /* Need two fds for the channel. */
3045 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3046 if (ret < 0) {
3047 ERR("Exhausted number of available FD upon duplicate channel");
3048 goto error_fd_get;
3049 }
3050
3051 /* Duplicate object for stream once the original is in the registry. */
3052 ret = lttng_ust_ctl_duplicate_ust_object_data(&ua_chan->obj, buf_reg_chan->obj.ust);
3053 if (ret < 0) {
3054 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
3055 buf_reg_chan->obj.ust, ua_chan->obj, ret);
3056 goto error;
3057 }
3058 ua_chan->handle = ua_chan->obj->handle;
3059
3060 return 0;
3061
3062error:
3063 lttng_fd_put(LTTNG_FD_APPS, 1);
3064error_fd_get:
3065 return ret;
3066}
3067
3068/*
3069 * For a given channel buffer registry, setup all streams of the given ust
3070 * application channel.
3071 *
3072 * Return 0 on success or else a negative value.
3073 */
3074static int setup_buffer_reg_streams(struct buffer_reg_channel *buf_reg_chan,
3075 struct ust_app_channel *ua_chan,
3076 struct ust_app *app)
3077{
3078 int ret = 0;
3079 struct ust_app_stream *stream, *stmp;
3080
3081 assert(buf_reg_chan);
3082 assert(ua_chan);
3083
3084 DBG2("UST app setup buffer registry stream");
3085
3086 /* Send all streams to application. */
3087 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
3088 struct buffer_reg_stream *reg_stream;
3089
3090 ret = buffer_reg_stream_create(&reg_stream);
3091 if (ret < 0) {
3092 goto error;
3093 }
3094
3095 /*
3096 * Keep original pointer and nullify it in the stream so the delete
3097 * stream call does not release the object.
3098 */
3099 reg_stream->obj.ust = stream->obj;
3100 stream->obj = NULL;
3101 buffer_reg_stream_add(reg_stream, buf_reg_chan);
3102
3103 /* We don't need the streams anymore. */
3104 cds_list_del(&stream->list);
3105 delete_ust_app_stream(-1, stream, app);
3106 }
3107
3108error:
3109 return ret;
3110}
3111
3112/*
3113 * Create a buffer registry channel for the given session registry and
3114 * application channel object. If regp pointer is valid, it's set with the
3115 * created object. Important, the created object is NOT added to the session
3116 * registry hash table.
3117 *
3118 * Return 0 on success else a negative value.
3119 */
3120static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3121 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
3122{
3123 int ret;
3124 struct buffer_reg_channel *buf_reg_chan = NULL;
3125
3126 assert(reg_sess);
3127 assert(ua_chan);
3128
3129 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
3130
3131 /* Create buffer registry channel. */
3132 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &buf_reg_chan);
3133 if (ret < 0) {
3134 goto error_create;
3135 }
3136 assert(buf_reg_chan);
3137 buf_reg_chan->consumer_key = ua_chan->key;
3138 buf_reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
3139 buf_reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
3140
3141 /* Create and add a channel registry to session. */
3142 ret = ust_registry_channel_add(reg_sess->reg.ust,
3143 ua_chan->tracing_channel_id);
3144 if (ret < 0) {
3145 goto error;
3146 }
3147 buffer_reg_channel_add(reg_sess, buf_reg_chan);
3148
3149 if (regp) {
3150 *regp = buf_reg_chan;
3151 }
3152
3153 return 0;
3154
3155error:
3156 /* Safe because the registry channel object was not added to any HT. */
3157 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3158error_create:
3159 return ret;
3160}
3161
3162/*
3163 * Setup buffer registry channel for the given session registry and application
3164 * channel object. If regp pointer is valid, it's set with the created object.
3165 *
3166 * Return 0 on success else a negative value.
3167 */
3168static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
3169 struct ust_app_channel *ua_chan, struct buffer_reg_channel *buf_reg_chan,
3170 struct ust_app *app)
3171{
3172 int ret;
3173
3174 assert(reg_sess);
3175 assert(buf_reg_chan);
3176 assert(ua_chan);
3177 assert(ua_chan->obj);
3178
3179 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
3180
3181 /* Setup all streams for the registry. */
3182 ret = setup_buffer_reg_streams(buf_reg_chan, ua_chan, app);
3183 if (ret < 0) {
3184 goto error;
3185 }
3186
3187 buf_reg_chan->obj.ust = ua_chan->obj;
3188 ua_chan->obj = NULL;
3189
3190 return 0;
3191
3192error:
3193 buffer_reg_channel_remove(reg_sess, buf_reg_chan);
3194 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3195 return ret;
3196}
3197
3198/*
3199 * Send buffer registry channel to the application.
3200 *
3201 * Return 0 on success else a negative value.
3202 */
3203static int send_channel_uid_to_ust(struct buffer_reg_channel *buf_reg_chan,
3204 struct ust_app *app, struct ust_app_session *ua_sess,
3205 struct ust_app_channel *ua_chan)
3206{
3207 int ret;
3208 struct buffer_reg_stream *reg_stream;
3209
3210 assert(buf_reg_chan);
3211 assert(app);
3212 assert(ua_sess);
3213 assert(ua_chan);
3214
3215 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
3216
3217 ret = duplicate_channel_object(buf_reg_chan, ua_chan);
3218 if (ret < 0) {
3219 goto error;
3220 }
3221
3222 /* Send channel to the application. */
3223 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
3224 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3225 ret = -ENOTCONN; /* Caused by app exiting. */
3226 goto error;
3227 } else if (ret < 0) {
3228 goto error;
3229 }
3230
3231 health_code_update();
3232
3233 /* Send all streams to application. */
3234 pthread_mutex_lock(&buf_reg_chan->stream_list_lock);
3235 cds_list_for_each_entry(reg_stream, &buf_reg_chan->streams, lnode) {
3236 struct ust_app_stream stream;
3237
3238 ret = duplicate_stream_object(reg_stream, &stream);
3239 if (ret < 0) {
3240 goto error_stream_unlock;
3241 }
3242
3243 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
3244 if (ret < 0) {
3245 (void) release_ust_app_stream(-1, &stream, app);
3246 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
3247 ret = -ENOTCONN; /* Caused by app exiting. */
3248 }
3249 goto error_stream_unlock;
3250 }
3251
3252 /*
3253 * The return value is not important here. This function will output an
3254 * error if needed.
3255 */
3256 (void) release_ust_app_stream(-1, &stream, app);
3257 }
3258 ua_chan->is_sent = 1;
3259
3260error_stream_unlock:
3261 pthread_mutex_unlock(&buf_reg_chan->stream_list_lock);
3262error:
3263 return ret;
3264}
3265
3266/*
3267 * Create and send to the application the created buffers with per UID buffers.
3268 *
3269 * This MUST be called with a RCU read side lock acquired.
3270 * The session list lock and the session's lock must be acquired.
3271 *
3272 * Return 0 on success else a negative value.
3273 */
3274static int create_channel_per_uid(struct ust_app *app,
3275 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3276 struct ust_app_channel *ua_chan)
3277{
3278 int ret;
3279 struct buffer_reg_uid *reg_uid;
3280 struct buffer_reg_channel *buf_reg_chan;
3281 struct ltt_session *session = NULL;
3282 enum lttng_error_code notification_ret;
3283 struct ust_registry_channel *ust_reg_chan;
3284
3285 assert(app);
3286 assert(usess);
3287 assert(ua_sess);
3288 assert(ua_chan);
3289
3290 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
3291
3292 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
3293 /*
3294 * The session creation handles the creation of this global registry
3295 * object. If none can be find, there is a code flow problem or a
3296 * teardown race.
3297 */
3298 assert(reg_uid);
3299
3300 buf_reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
3301 reg_uid);
3302 if (buf_reg_chan) {
3303 goto send_channel;
3304 }
3305
3306 /* Create the buffer registry channel object. */
3307 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &buf_reg_chan);
3308 if (ret < 0) {
3309 ERR("Error creating the UST channel \"%s\" registry instance",
3310 ua_chan->name);
3311 goto error;
3312 }
3313
3314 session = session_find_by_id(ua_sess->tracing_id);
3315 assert(session);
3316 assert(pthread_mutex_trylock(&session->lock));
3317 assert(session_trylock_list());
3318
3319 /*
3320 * Create the buffers on the consumer side. This call populates the
3321 * ust app channel object with all streams and data object.
3322 */
3323 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3324 app->bits_per_long, reg_uid->registry->reg.ust,
3325 session->most_recent_chunk_id.value);
3326 if (ret < 0) {
3327 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3328 ua_chan->name);
3329
3330 /*
3331 * Let's remove the previously created buffer registry channel so
3332 * it's not visible anymore in the session registry.
3333 */
3334 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
3335 ua_chan->tracing_channel_id, false);
3336 buffer_reg_channel_remove(reg_uid->registry, buf_reg_chan);
3337 buffer_reg_channel_destroy(buf_reg_chan, LTTNG_DOMAIN_UST);
3338 goto error;
3339 }
3340
3341 /*
3342 * Setup the streams and add it to the session registry.
3343 */
3344 ret = setup_buffer_reg_channel(reg_uid->registry,
3345 ua_chan, buf_reg_chan, app);
3346 if (ret < 0) {
3347 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
3348 goto error;
3349 }
3350
3351 /* Notify the notification subsystem of the channel's creation. */
3352 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
3353 ust_reg_chan = ust_registry_channel_find(reg_uid->registry->reg.ust,
3354 ua_chan->tracing_channel_id);
3355 assert(ust_reg_chan);
3356 ust_reg_chan->consumer_key = ua_chan->key;
3357 ust_reg_chan = NULL;
3358 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
3359
3360 notification_ret = notification_thread_command_add_channel(
3361 the_notification_thread_handle, session->name,
3362 lttng_credentials_get_uid(
3363 &ua_sess->effective_credentials),
3364 lttng_credentials_get_gid(
3365 &ua_sess->effective_credentials),
3366 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3367 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3368 if (notification_ret != LTTNG_OK) {
3369 ret = - (int) notification_ret;
3370 ERR("Failed to add channel to notification thread");
3371 goto error;
3372 }
3373
3374send_channel:
3375 /* Send buffers to the application. */
3376 ret = send_channel_uid_to_ust(buf_reg_chan, app, ua_sess, ua_chan);
3377 if (ret < 0) {
3378 if (ret != -ENOTCONN) {
3379 ERR("Error sending channel to application");
3380 }
3381 goto error;
3382 }
3383
3384error:
3385 if (session) {
3386 session_put(session);
3387 }
3388 return ret;
3389}
3390
3391/*
3392 * Create and send to the application the created buffers with per PID buffers.
3393 *
3394 * Called with UST app session lock held.
3395 * The session list lock and the session's lock must be acquired.
3396 *
3397 * Return 0 on success else a negative value.
3398 */
3399static int create_channel_per_pid(struct ust_app *app,
3400 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3401 struct ust_app_channel *ua_chan)
3402{
3403 int ret;
3404 struct ust_registry_session *registry;
3405 enum lttng_error_code cmd_ret;
3406 struct ltt_session *session = NULL;
3407 uint64_t chan_reg_key;
3408 struct ust_registry_channel *ust_reg_chan;
3409
3410 assert(app);
3411 assert(usess);
3412 assert(ua_sess);
3413 assert(ua_chan);
3414
3415 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
3416
3417 rcu_read_lock();
3418
3419 registry = get_session_registry(ua_sess);
3420 /* The UST app session lock is held, registry shall not be null. */
3421 assert(registry);
3422
3423 /* Create and add a new channel registry to session. */
3424 ret = ust_registry_channel_add(registry, ua_chan->key);
3425 if (ret < 0) {
3426 ERR("Error creating the UST channel \"%s\" registry instance",
3427 ua_chan->name);
3428 goto error;
3429 }
3430
3431 session = session_find_by_id(ua_sess->tracing_id);
3432 assert(session);
3433
3434 assert(pthread_mutex_trylock(&session->lock));
3435 assert(session_trylock_list());
3436
3437 /* Create and get channel on the consumer side. */
3438 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
3439 app->bits_per_long, registry,
3440 session->most_recent_chunk_id.value);
3441 if (ret < 0) {
3442 ERR("Error creating UST channel \"%s\" on the consumer daemon",
3443 ua_chan->name);
3444 goto error_remove_from_registry;
3445 }
3446
3447 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
3448 if (ret < 0) {
3449 if (ret != -ENOTCONN) {
3450 ERR("Error sending channel to application");
3451 }
3452 goto error_remove_from_registry;
3453 }
3454
3455 chan_reg_key = ua_chan->key;
3456 pthread_mutex_lock(&registry->lock);
3457 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
3458 assert(ust_reg_chan);
3459 ust_reg_chan->consumer_key = ua_chan->key;
3460 pthread_mutex_unlock(&registry->lock);
3461
3462 cmd_ret = notification_thread_command_add_channel(
3463 the_notification_thread_handle, session->name,
3464 lttng_credentials_get_uid(
3465 &ua_sess->effective_credentials),
3466 lttng_credentials_get_gid(
3467 &ua_sess->effective_credentials),
3468 ua_chan->name, ua_chan->key, LTTNG_DOMAIN_UST,
3469 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3470 if (cmd_ret != LTTNG_OK) {
3471 ret = - (int) cmd_ret;
3472 ERR("Failed to add channel to notification thread");
3473 goto error_remove_from_registry;
3474 }
3475
3476error_remove_from_registry:
3477 if (ret) {
3478 ust_registry_channel_del_free(registry, ua_chan->key, false);
3479 }
3480error:
3481 rcu_read_unlock();
3482 if (session) {
3483 session_put(session);
3484 }
3485 return ret;
3486}
3487
3488/*
3489 * From an already allocated ust app channel, create the channel buffers if
3490 * needed and send them to the application. This MUST be called with a RCU read
3491 * side lock acquired.
3492 *
3493 * Called with UST app session lock held.
3494 *
3495 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3496 * the application exited concurrently.
3497 */
3498static int ust_app_channel_send(struct ust_app *app,
3499 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3500 struct ust_app_channel *ua_chan)
3501{
3502 int ret;
3503
3504 assert(app);
3505 assert(usess);
3506 assert(usess->active);
3507 assert(ua_sess);
3508 assert(ua_chan);
3509
3510 /* Handle buffer type before sending the channel to the application. */
3511 switch (usess->buffer_type) {
3512 case LTTNG_BUFFER_PER_UID:
3513 {
3514 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3515 if (ret < 0) {
3516 goto error;
3517 }
3518 break;
3519 }
3520 case LTTNG_BUFFER_PER_PID:
3521 {
3522 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3523 if (ret < 0) {
3524 goto error;
3525 }
3526 break;
3527 }
3528 default:
3529 assert(0);
3530 ret = -EINVAL;
3531 goto error;
3532 }
3533
3534 /* Initialize ust objd object using the received handle and add it. */
3535 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3536 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
3537
3538 /* If channel is not enabled, disable it on the tracer */
3539 if (!ua_chan->enabled) {
3540 ret = disable_ust_channel(app, ua_sess, ua_chan);
3541 if (ret < 0) {
3542 goto error;
3543 }
3544 }
3545
3546error:
3547 return ret;
3548}
3549
3550/*
3551 * Create UST app channel and return it through ua_chanp if not NULL.
3552 *
3553 * Called with UST app session lock and RCU read-side lock held.
3554 *
3555 * Return 0 on success or else a negative value.
3556 */
3557static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3558 struct ltt_ust_channel *uchan,
3559 enum lttng_ust_abi_chan_type type, struct ltt_ust_session *usess,
3560 struct ust_app_channel **ua_chanp)
3561{
3562 int ret = 0;
3563 struct lttng_ht_iter iter;
3564 struct lttng_ht_node_str *ua_chan_node;
3565 struct ust_app_channel *ua_chan;
3566
3567 /* Lookup channel in the ust app session */
3568 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3569 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
3570 if (ua_chan_node != NULL) {
3571 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3572 goto end;
3573 }
3574
3575 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
3576 if (ua_chan == NULL) {
3577 /* Only malloc can fail here */
3578 ret = -ENOMEM;
3579 goto error;
3580 }
3581 shadow_copy_channel(ua_chan, uchan);
3582
3583 /* Set channel type. */
3584 ua_chan->attr.type = type;
3585
3586 /* Only add the channel if successful on the tracer side. */
3587 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
3588end:
3589 if (ua_chanp) {
3590 *ua_chanp = ua_chan;
3591 }
3592
3593 /* Everything went well. */
3594 return 0;
3595
3596error:
3597 return ret;
3598}
3599
3600/*
3601 * Create UST app event and create it on the tracer side.
3602 *
3603 * Must be called with the RCU read side lock held.
3604 * Called with ust app session mutex held.
3605 */
3606static
3607int create_ust_app_event(struct ust_app_session *ua_sess,
3608 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3609 struct ust_app *app)
3610{
3611 int ret = 0;
3612 struct ust_app_event *ua_event;
3613
3614 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3615 if (ua_event == NULL) {
3616 /* Only failure mode of alloc_ust_app_event(). */
3617 ret = -ENOMEM;
3618 goto end;
3619 }
3620 shadow_copy_event(ua_event, uevent);
3621
3622 /* Create it on the tracer side */
3623 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3624 if (ret < 0) {
3625 /*
3626 * Not found previously means that it does not exist on the
3627 * tracer. If the application reports that the event existed,
3628 * it means there is a bug in the sessiond or lttng-ust
3629 * (or corruption, etc.)
3630 */
3631 if (ret == -LTTNG_UST_ERR_EXIST) {
3632 ERR("Tracer for application reported that an event being created already existed: "
3633 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3634 uevent->attr.name,
3635 app->pid, app->ppid, app->uid,
3636 app->gid);
3637 }
3638 goto error;
3639 }
3640
3641 add_unique_ust_app_event(ua_chan, ua_event);
3642
3643 DBG2("UST app create event completed: app = '%s' (ppid: %d)",
3644 app->name, app->ppid);
3645
3646end:
3647 return ret;
3648
3649error:
3650 /* Valid. Calling here is already in a read side lock */
3651 delete_ust_app_event(-1, ua_event, app);
3652 return ret;
3653}
3654
3655/*
3656 * Create UST app event notifier rule and create it on the tracer side.
3657 *
3658 * Must be called with the RCU read side lock held.
3659 * Called with ust app session mutex held.
3660 */
3661static
3662int create_ust_app_event_notifier_rule(struct lttng_trigger *trigger,
3663 struct ust_app *app)
3664{
3665 int ret = 0;
3666 struct ust_app_event_notifier_rule *ua_event_notifier_rule;
3667
3668 ua_event_notifier_rule = alloc_ust_app_event_notifier_rule(trigger);
3669 if (ua_event_notifier_rule == NULL) {
3670 ret = -ENOMEM;
3671 goto end;
3672 }
3673
3674 /* Create it on the tracer side. */
3675 ret = create_ust_event_notifier(app, ua_event_notifier_rule);
3676 if (ret < 0) {
3677 /*
3678 * Not found previously means that it does not exist on the
3679 * tracer. If the application reports that the event existed,
3680 * it means there is a bug in the sessiond or lttng-ust
3681 * (or corruption, etc.)
3682 */
3683 if (ret == -LTTNG_UST_ERR_EXIST) {
3684 ERR("Tracer for application reported that an event notifier being created already exists: "
3685 "token = \"%" PRIu64 "\", pid = %d, ppid = %d, uid = %d, gid = %d",
3686 lttng_trigger_get_tracer_token(trigger),
3687 app->pid, app->ppid, app->uid,
3688 app->gid);
3689 }
3690 goto error;
3691 }
3692
3693 lttng_ht_add_unique_u64(app->token_to_event_notifier_rule_ht,
3694 &ua_event_notifier_rule->node);
3695
3696 DBG2("UST app create token event rule completed: app = '%s' (ppid: %d), token = %" PRIu64,
3697 app->name, app->ppid, lttng_trigger_get_tracer_token(trigger));
3698
3699 goto end;
3700
3701error:
3702 /* The RCU read side lock is already being held by the caller. */
3703 delete_ust_app_event_notifier_rule(-1, ua_event_notifier_rule, app);
3704end:
3705 return ret;
3706}
3707
3708/*
3709 * Create UST metadata and open it on the tracer side.
3710 *
3711 * Called with UST app session lock held and RCU read side lock.
3712 */
3713static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3714 struct ust_app *app, struct consumer_output *consumer)
3715{
3716 int ret = 0;
3717 struct ust_app_channel *metadata;
3718 struct consumer_socket *socket;
3719 struct ust_registry_session *registry;
3720 struct ltt_session *session = NULL;
3721
3722 assert(ua_sess);
3723 assert(app);
3724 assert(consumer);
3725
3726 registry = get_session_registry(ua_sess);
3727 /* The UST app session is held registry shall not be null. */
3728 assert(registry);
3729
3730 pthread_mutex_lock(&registry->lock);
3731
3732 /* Metadata already exists for this registry or it was closed previously */
3733 if (registry->metadata_key || registry->metadata_closed) {
3734 ret = 0;
3735 goto error;
3736 }
3737
3738 /* Allocate UST metadata */
3739 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3740 if (!metadata) {
3741 /* malloc() failed */
3742 ret = -ENOMEM;
3743 goto error;
3744 }
3745
3746 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3747
3748 /* Need one fd for the channel. */
3749 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3750 if (ret < 0) {
3751 ERR("Exhausted number of available FD upon create metadata");
3752 goto error;
3753 }
3754
3755 /* Get the right consumer socket for the application. */
3756 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3757 if (!socket) {
3758 ret = -EINVAL;
3759 goto error_consumer;
3760 }
3761
3762 /*
3763 * Keep metadata key so we can identify it on the consumer side. Assign it
3764 * to the registry *before* we ask the consumer so we avoid the race of the
3765 * consumer requesting the metadata and the ask_channel call on our side
3766 * did not returned yet.
3767 */
3768 registry->metadata_key = metadata->key;
3769
3770 session = session_find_by_id(ua_sess->tracing_id);
3771 assert(session);
3772
3773 assert(pthread_mutex_trylock(&session->lock));
3774 assert(session_trylock_list());
3775
3776 /*
3777 * Ask the metadata channel creation to the consumer. The metadata object
3778 * will be created by the consumer and kept their. However, the stream is
3779 * never added or monitored until we do a first push metadata to the
3780 * consumer.
3781 */
3782 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3783 registry, session->current_trace_chunk);
3784 if (ret < 0) {
3785 /* Nullify the metadata key so we don't try to close it later on. */
3786 registry->metadata_key = 0;
3787 goto error_consumer;
3788 }
3789
3790 /*
3791 * The setup command will make the metadata stream be sent to the relayd,
3792 * if applicable, and the thread managing the metadatas. This is important
3793 * because after this point, if an error occurs, the only way the stream
3794 * can be deleted is to be monitored in the consumer.
3795 */
3796 ret = consumer_setup_metadata(socket, metadata->key);
3797 if (ret < 0) {
3798 /* Nullify the metadata key so we don't try to close it later on. */
3799 registry->metadata_key = 0;
3800 goto error_consumer;
3801 }
3802
3803 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3804 metadata->key, app->pid);
3805
3806error_consumer:
3807 lttng_fd_put(LTTNG_FD_APPS, 1);
3808 delete_ust_app_channel(-1, metadata, app);
3809error:
3810 pthread_mutex_unlock(&registry->lock);
3811 if (session) {
3812 session_put(session);
3813 }
3814 return ret;
3815}
3816
3817/*
3818 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3819 * acquired before calling this function.
3820 */
3821struct ust_app *ust_app_find_by_pid(pid_t pid)
3822{
3823 struct ust_app *app = NULL;
3824 struct lttng_ht_node_ulong *node;
3825 struct lttng_ht_iter iter;
3826
3827 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3828 node = lttng_ht_iter_get_node_ulong(&iter);
3829 if (node == NULL) {
3830 DBG2("UST app no found with pid %d", pid);
3831 goto error;
3832 }
3833
3834 DBG2("Found UST app by pid %d", pid);
3835
3836 app = caa_container_of(node, struct ust_app, pid_n);
3837
3838error:
3839 return app;
3840}
3841
3842/*
3843 * Allocate and init an UST app object using the registration information and
3844 * the command socket. This is called when the command socket connects to the
3845 * session daemon.
3846 *
3847 * The object is returned on success or else NULL.
3848 */
3849struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3850{
3851 int ret;
3852 struct ust_app *lta = NULL;
3853 struct lttng_pipe *event_notifier_event_source_pipe = NULL;
3854
3855 assert(msg);
3856 assert(sock >= 0);
3857
3858 DBG3("UST app creating application for socket %d", sock);
3859
3860 if ((msg->bits_per_long == 64 &&
3861 (uatomic_read(&the_ust_consumerd64_fd) ==
3862 -EINVAL)) ||
3863 (msg->bits_per_long == 32 &&
3864 (uatomic_read(&the_ust_consumerd32_fd) ==
3865 -EINVAL))) {
3866 ERR("Registration failed: application \"%s\" (pid: %d) has "
3867 "%d-bit long, but no consumerd for this size is available.\n",
3868 msg->name, msg->pid, msg->bits_per_long);
3869 goto error;
3870 }
3871
3872 /*
3873 * Reserve the two file descriptors of the event source pipe. The write
3874 * end will be closed once it is passed to the application, at which
3875 * point a single 'put' will be performed.
3876 */
3877 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
3878 if (ret) {
3879 ERR("Failed to reserve two file descriptors for the event source pipe while creating a new application instance: app = '%s' (ppid: %d)",
3880 msg->name, (int) msg->ppid);
3881 goto error;
3882 }
3883
3884 event_notifier_event_source_pipe = lttng_pipe_open(FD_CLOEXEC);
3885 if (!event_notifier_event_source_pipe) {
3886 PERROR("Failed to open application event source pipe: '%s' (ppid = %d)",
3887 msg->name, msg->ppid);
3888 goto error;
3889 }
3890
3891 lta = zmalloc(sizeof(struct ust_app));
3892 if (lta == NULL) {
3893 PERROR("malloc");
3894 goto error_free_pipe;
3895 }
3896
3897 lta->event_notifier_group.event_pipe = event_notifier_event_source_pipe;
3898
3899 lta->ppid = msg->ppid;
3900 lta->uid = msg->uid;
3901 lta->gid = msg->gid;
3902
3903 lta->bits_per_long = msg->bits_per_long;
3904 lta->uint8_t_alignment = msg->uint8_t_alignment;
3905 lta->uint16_t_alignment = msg->uint16_t_alignment;
3906 lta->uint32_t_alignment = msg->uint32_t_alignment;
3907 lta->uint64_t_alignment = msg->uint64_t_alignment;
3908 lta->long_alignment = msg->long_alignment;
3909 lta->byte_order = msg->byte_order;
3910
3911 lta->v_major = msg->major;
3912 lta->v_minor = msg->minor;
3913 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3914 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3915 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3916 lta->notify_sock = -1;
3917 lta->token_to_event_notifier_rule_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3918
3919 /* Copy name and make sure it's NULL terminated. */
3920 strncpy(lta->name, msg->name, sizeof(lta->name));
3921 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3922
3923 /*
3924 * Before this can be called, when receiving the registration information,
3925 * the application compatibility is checked. So, at this point, the
3926 * application can work with this session daemon.
3927 */
3928 lta->compatible = 1;
3929
3930 lta->pid = msg->pid;
3931 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3932 lta->sock = sock;
3933 pthread_mutex_init(&lta->sock_lock, NULL);
3934 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3935
3936 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3937 return lta;
3938
3939error_free_pipe:
3940 lttng_pipe_destroy(event_notifier_event_source_pipe);
3941 lttng_fd_put(LTTNG_FD_APPS, 2);
3942error:
3943 return NULL;
3944}
3945
3946/*
3947 * For a given application object, add it to every hash table.
3948 */
3949void ust_app_add(struct ust_app *app)
3950{
3951 assert(app);
3952 assert(app->notify_sock >= 0);
3953
3954 app->registration_time = time(NULL);
3955
3956 rcu_read_lock();
3957
3958 /*
3959 * On a re-registration, we want to kick out the previous registration of
3960 * that pid
3961 */
3962 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3963
3964 /*
3965 * The socket _should_ be unique until _we_ call close. So, a add_unique
3966 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3967 * already in the table.
3968 */
3969 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3970
3971 /* Add application to the notify socket hash table. */
3972 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3973 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3974
3975 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3976 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3977 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3978 app->v_minor);
3979
3980 rcu_read_unlock();
3981}
3982
3983/*
3984 * Set the application version into the object.
3985 *
3986 * Return 0 on success else a negative value either an errno code or a
3987 * LTTng-UST error code.
3988 */
3989int ust_app_version(struct ust_app *app)
3990{
3991 int ret;
3992
3993 assert(app);
3994
3995 pthread_mutex_lock(&app->sock_lock);
3996 ret = lttng_ust_ctl_tracer_version(app->sock, &app->version);
3997 pthread_mutex_unlock(&app->sock_lock);
3998 if (ret < 0) {
3999 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4000 ERR("UST app %d version failed with ret %d", app->sock, ret);
4001 } else {
4002 DBG3("UST app %d version failed. Application is dead", app->sock);
4003 }
4004 }
4005
4006 return ret;
4007}
4008
4009/*
4010 * Setup the base event notifier group.
4011 *
4012 * Return 0 on success else a negative value either an errno code or a
4013 * LTTng-UST error code.
4014 */
4015int ust_app_setup_event_notifier_group(struct ust_app *app)
4016{
4017 int ret;
4018 int event_pipe_write_fd;
4019 struct lttng_ust_abi_object_data *event_notifier_group = NULL;
4020 enum lttng_error_code lttng_ret;
4021 enum event_notifier_error_accounting_status event_notifier_error_accounting_status;
4022
4023 assert(app);
4024
4025 /* Get the write side of the pipe. */
4026 event_pipe_write_fd = lttng_pipe_get_writefd(
4027 app->event_notifier_group.event_pipe);
4028
4029 pthread_mutex_lock(&app->sock_lock);
4030 ret = lttng_ust_ctl_create_event_notifier_group(app->sock,
4031 event_pipe_write_fd, &event_notifier_group);
4032 pthread_mutex_unlock(&app->sock_lock);
4033 if (ret < 0) {
4034 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4035 ERR("Failed to create application event notifier group: ret = %d, app socket fd = %d, event_pipe_write_fd = %d",
4036 ret, app->sock, event_pipe_write_fd);
4037 } else {
4038 DBG("Failed to create application event notifier group (application is dead): app socket fd = %d",
4039 app->sock);
4040 }
4041
4042 goto error;
4043 }
4044
4045 ret = lttng_pipe_write_close(app->event_notifier_group.event_pipe);
4046 if (ret) {
4047 ERR("Failed to close write end of the application's event source pipe: app = '%s' (ppid = %d)",
4048 app->name, app->ppid);
4049 goto error;
4050 }
4051
4052 /*
4053 * Release the file descriptor that was reserved for the write-end of
4054 * the pipe.
4055 */
4056 lttng_fd_put(LTTNG_FD_APPS, 1);
4057
4058 lttng_ret = notification_thread_command_add_tracer_event_source(
4059 the_notification_thread_handle,
4060 lttng_pipe_get_readfd(
4061 app->event_notifier_group.event_pipe),
4062 LTTNG_DOMAIN_UST);
4063 if (lttng_ret != LTTNG_OK) {
4064 ERR("Failed to add tracer event source to notification thread");
4065 ret = - 1;
4066 goto error;
4067 }
4068
4069 /* Assign handle only when the complete setup is valid. */
4070 app->event_notifier_group.object = event_notifier_group;
4071
4072 event_notifier_error_accounting_status =
4073 event_notifier_error_accounting_register_app(app);
4074 if (event_notifier_error_accounting_status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
4075 if (event_notifier_error_accounting_status == EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_APP_DEAD) {
4076 DBG3("Failed to setup event notifier error accounting (application is dead): app socket fd = %d",
4077 app->sock);
4078 ret = 0;
4079 goto error_accounting;
4080 }
4081
4082 ERR("Failed to setup event notifier error accounting for app");
4083 ret = -1;
4084 goto error_accounting;
4085 }
4086
4087 return ret;
4088
4089error_accounting:
4090 lttng_ret = notification_thread_command_remove_tracer_event_source(
4091 the_notification_thread_handle,
4092 lttng_pipe_get_readfd(
4093 app->event_notifier_group.event_pipe));
4094 if (lttng_ret != LTTNG_OK) {
4095 ERR("Failed to remove application tracer event source from notification thread");
4096 }
4097
4098error:
4099 lttng_ust_ctl_release_object(app->sock, app->event_notifier_group.object);
4100 free(app->event_notifier_group.object);
4101 app->event_notifier_group.object = NULL;
4102 return ret;
4103}
4104
4105/*
4106 * Unregister app by removing it from the global traceable app list and freeing
4107 * the data struct.
4108 *
4109 * The socket is already closed at this point so no close to sock.
4110 */
4111void ust_app_unregister(int sock)
4112{
4113 struct ust_app *lta;
4114 struct lttng_ht_node_ulong *node;
4115 struct lttng_ht_iter ust_app_sock_iter;
4116 struct lttng_ht_iter iter;
4117 struct ust_app_session *ua_sess;
4118 int ret;
4119
4120 rcu_read_lock();
4121
4122 /* Get the node reference for a call_rcu */
4123 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
4124 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
4125 assert(node);
4126
4127 lta = caa_container_of(node, struct ust_app, sock_n);
4128 DBG("PID %d unregistering with sock %d", lta->pid, sock);
4129
4130 /*
4131 * For per-PID buffers, perform "push metadata" and flush all
4132 * application streams before removing app from hash tables,
4133 * ensuring proper behavior of data_pending check.
4134 * Remove sessions so they are not visible during deletion.
4135 */
4136 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
4137 node.node) {
4138 struct ust_registry_session *registry;
4139
4140 ret = lttng_ht_del(lta->sessions, &iter);
4141 if (ret) {
4142 /* The session was already removed so scheduled for teardown. */
4143 continue;
4144 }
4145
4146 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
4147 (void) ust_app_flush_app_session(lta, ua_sess);
4148 }
4149
4150 /*
4151 * Add session to list for teardown. This is safe since at this point we
4152 * are the only one using this list.
4153 */
4154 pthread_mutex_lock(&ua_sess->lock);
4155
4156 if (ua_sess->deleted) {
4157 pthread_mutex_unlock(&ua_sess->lock);
4158 continue;
4159 }
4160
4161 /*
4162 * Normally, this is done in the delete session process which is
4163 * executed in the call rcu below. However, upon registration we can't
4164 * afford to wait for the grace period before pushing data or else the
4165 * data pending feature can race between the unregistration and stop
4166 * command where the data pending command is sent *before* the grace
4167 * period ended.
4168 *
4169 * The close metadata below nullifies the metadata pointer in the
4170 * session so the delete session will NOT push/close a second time.
4171 */
4172 registry = get_session_registry(ua_sess);
4173 if (registry) {
4174 /* Push metadata for application before freeing the application. */
4175 (void) push_metadata(registry, ua_sess->consumer);
4176
4177 /*
4178 * Don't ask to close metadata for global per UID buffers. Close
4179 * metadata only on destroy trace session in this case. Also, the
4180 * previous push metadata could have flag the metadata registry to
4181 * close so don't send a close command if closed.
4182 */
4183 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
4184 /* And ask to close it for this session registry. */
4185 (void) close_metadata(registry, ua_sess->consumer);
4186 }
4187 }
4188 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
4189
4190 pthread_mutex_unlock(&ua_sess->lock);
4191 }
4192
4193 /* Remove application from PID hash table */
4194 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
4195 assert(!ret);
4196
4197 /*
4198 * Remove application from notify hash table. The thread handling the
4199 * notify socket could have deleted the node so ignore on error because
4200 * either way it's valid. The close of that socket is handled by the
4201 * apps_notify_thread.
4202 */
4203 iter.iter.node = &lta->notify_sock_n.node;
4204 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4205
4206 /*
4207 * Ignore return value since the node might have been removed before by an
4208 * add replace during app registration because the PID can be reassigned by
4209 * the OS.
4210 */
4211 iter.iter.node = &lta->pid_n.node;
4212 ret = lttng_ht_del(ust_app_ht, &iter);
4213 if (ret) {
4214 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
4215 lta->pid);
4216 }
4217
4218 /* Free memory */
4219 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
4220
4221 rcu_read_unlock();
4222 return;
4223}
4224
4225/*
4226 * Fill events array with all events name of all registered apps.
4227 */
4228int ust_app_list_events(struct lttng_event **events)
4229{
4230 int ret, handle;
4231 size_t nbmem, count = 0;
4232 struct lttng_ht_iter iter;
4233 struct ust_app *app;
4234 struct lttng_event *tmp_event;
4235
4236 nbmem = UST_APP_EVENT_LIST_SIZE;
4237 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
4238 if (tmp_event == NULL) {
4239 PERROR("zmalloc ust app events");
4240 ret = -ENOMEM;
4241 goto error;
4242 }
4243
4244 rcu_read_lock();
4245
4246 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4247 struct lttng_ust_abi_tracepoint_iter uiter;
4248
4249 health_code_update();
4250
4251 if (!app->compatible) {
4252 /*
4253 * TODO: In time, we should notice the caller of this error by
4254 * telling him that this is a version error.
4255 */
4256 continue;
4257 }
4258 pthread_mutex_lock(&app->sock_lock);
4259 handle = lttng_ust_ctl_tracepoint_list(app->sock);
4260 if (handle < 0) {
4261 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4262 ERR("UST app list events getting handle failed for app pid %d",
4263 app->pid);
4264 }
4265 pthread_mutex_unlock(&app->sock_lock);
4266 continue;
4267 }
4268
4269 while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle,
4270 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4271 /* Handle ustctl error. */
4272 if (ret < 0) {
4273 int release_ret;
4274
4275 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4276 ERR("UST app tp list get failed for app %d with ret %d",
4277 app->sock, ret);
4278 } else {
4279 DBG3("UST app tp list get failed. Application is dead");
4280 /*
4281 * This is normal behavior, an application can die during the
4282 * creation process. Don't report an error so the execution can
4283 * continue normally. Continue normal execution.
4284 */
4285 break;
4286 }
4287 free(tmp_event);
4288 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4289 if (release_ret < 0 &&
4290 release_ret != -LTTNG_UST_ERR_EXITING &&
4291 release_ret != -EPIPE) {
4292 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4293 }
4294 pthread_mutex_unlock(&app->sock_lock);
4295 goto rcu_error;
4296 }
4297
4298 health_code_update();
4299 if (count >= nbmem) {
4300 /* In case the realloc fails, we free the memory */
4301 struct lttng_event *new_tmp_event;
4302 size_t new_nbmem;
4303
4304 new_nbmem = nbmem << 1;
4305 DBG2("Reallocating event list from %zu to %zu entries",
4306 nbmem, new_nbmem);
4307 new_tmp_event = realloc(tmp_event,
4308 new_nbmem * sizeof(struct lttng_event));
4309 if (new_tmp_event == NULL) {
4310 int release_ret;
4311
4312 PERROR("realloc ust app events");
4313 free(tmp_event);
4314 ret = -ENOMEM;
4315 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4316 if (release_ret < 0 &&
4317 release_ret != -LTTNG_UST_ERR_EXITING &&
4318 release_ret != -EPIPE) {
4319 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4320 }
4321 pthread_mutex_unlock(&app->sock_lock);
4322 goto rcu_error;
4323 }
4324 /* Zero the new memory */
4325 memset(new_tmp_event + nbmem, 0,
4326 (new_nbmem - nbmem) * sizeof(struct lttng_event));
4327 nbmem = new_nbmem;
4328 tmp_event = new_tmp_event;
4329 }
4330 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
4331 tmp_event[count].loglevel = uiter.loglevel;
4332 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
4333 tmp_event[count].pid = app->pid;
4334 tmp_event[count].enabled = -1;
4335 count++;
4336 }
4337 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4338 pthread_mutex_unlock(&app->sock_lock);
4339 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4340 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4341 }
4342 }
4343
4344 ret = count;
4345 *events = tmp_event;
4346
4347 DBG2("UST app list events done (%zu events)", count);
4348
4349rcu_error:
4350 rcu_read_unlock();
4351error:
4352 health_code_update();
4353 return ret;
4354}
4355
4356/*
4357 * Fill events array with all events name of all registered apps.
4358 */
4359int ust_app_list_event_fields(struct lttng_event_field **fields)
4360{
4361 int ret, handle;
4362 size_t nbmem, count = 0;
4363 struct lttng_ht_iter iter;
4364 struct ust_app *app;
4365 struct lttng_event_field *tmp_event;
4366
4367 nbmem = UST_APP_EVENT_LIST_SIZE;
4368 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
4369 if (tmp_event == NULL) {
4370 PERROR("zmalloc ust app event fields");
4371 ret = -ENOMEM;
4372 goto error;
4373 }
4374
4375 rcu_read_lock();
4376
4377 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4378 struct lttng_ust_abi_field_iter uiter;
4379
4380 health_code_update();
4381
4382 if (!app->compatible) {
4383 /*
4384 * TODO: In time, we should notice the caller of this error by
4385 * telling him that this is a version error.
4386 */
4387 continue;
4388 }
4389 pthread_mutex_lock(&app->sock_lock);
4390 handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
4391 if (handle < 0) {
4392 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
4393 ERR("UST app list field getting handle failed for app pid %d",
4394 app->pid);
4395 }
4396 pthread_mutex_unlock(&app->sock_lock);
4397 continue;
4398 }
4399
4400 while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle,
4401 &uiter)) != -LTTNG_UST_ERR_NOENT) {
4402 /* Handle ustctl error. */
4403 if (ret < 0) {
4404 int release_ret;
4405
4406 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
4407 ERR("UST app tp list field failed for app %d with ret %d",
4408 app->sock, ret);
4409 } else {
4410 DBG3("UST app tp list field failed. Application is dead");
4411 /*
4412 * This is normal behavior, an application can die during the
4413 * creation process. Don't report an error so the execution can
4414 * continue normally. Reset list and count for next app.
4415 */
4416 break;
4417 }
4418 free(tmp_event);
4419 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4420 pthread_mutex_unlock(&app->sock_lock);
4421 if (release_ret < 0 &&
4422 release_ret != -LTTNG_UST_ERR_EXITING &&
4423 release_ret != -EPIPE) {
4424 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4425 }
4426 goto rcu_error;
4427 }
4428
4429 health_code_update();
4430 if (count >= nbmem) {
4431 /* In case the realloc fails, we free the memory */
4432 struct lttng_event_field *new_tmp_event;
4433 size_t new_nbmem;
4434
4435 new_nbmem = nbmem << 1;
4436 DBG2("Reallocating event field list from %zu to %zu entries",
4437 nbmem, new_nbmem);
4438 new_tmp_event = realloc(tmp_event,
4439 new_nbmem * sizeof(struct lttng_event_field));
4440 if (new_tmp_event == NULL) {
4441 int release_ret;
4442
4443 PERROR("realloc ust app event fields");
4444 free(tmp_event);
4445 ret = -ENOMEM;
4446 release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
4447 pthread_mutex_unlock(&app->sock_lock);
4448 if (release_ret &&
4449 release_ret != -LTTNG_UST_ERR_EXITING &&
4450 release_ret != -EPIPE) {
4451 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
4452 }
4453 goto rcu_error;
4454 }
4455 /* Zero the new memory */
4456 memset(new_tmp_event + nbmem, 0,
4457 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
4458 nbmem = new_nbmem;
4459 tmp_event = new_tmp_event;
4460 }
4461
4462 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4463 /* Mapping between these enums matches 1 to 1. */
4464 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
4465 tmp_event[count].nowrite = uiter.nowrite;
4466
4467 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
4468 tmp_event[count].event.loglevel = uiter.loglevel;
4469 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
4470 tmp_event[count].event.pid = app->pid;
4471 tmp_event[count].event.enabled = -1;
4472 count++;
4473 }
4474 ret = lttng_ust_ctl_release_handle(app->sock, handle);
4475 pthread_mutex_unlock(&app->sock_lock);
4476 if (ret < 0 &&
4477 ret != -LTTNG_UST_ERR_EXITING &&
4478 ret != -EPIPE) {
4479 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
4480 }
4481 }
4482
4483 ret = count;
4484 *fields = tmp_event;
4485
4486 DBG2("UST app list event fields done (%zu events)", count);
4487
4488rcu_error:
4489 rcu_read_unlock();
4490error:
4491 health_code_update();
4492 return ret;
4493}
4494
4495/*
4496 * Free and clean all traceable apps of the global list.
4497 *
4498 * Should _NOT_ be called with RCU read-side lock held.
4499 */
4500void ust_app_clean_list(void)
4501{
4502 int ret;
4503 struct ust_app *app;
4504 struct lttng_ht_iter iter;
4505
4506 DBG2("UST app cleaning registered apps hash table");
4507
4508 rcu_read_lock();
4509
4510 /* Cleanup notify socket hash table */
4511 if (ust_app_ht_by_notify_sock) {
4512 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
4513 notify_sock_n.node) {
4514 /*
4515 * Assert that all notifiers are gone as all triggers
4516 * are unregistered prior to this clean-up.
4517 */
4518 assert(lttng_ht_get_count(app->token_to_event_notifier_rule_ht) == 0);
4519
4520 ust_app_notify_sock_unregister(app->notify_sock);
4521 }
4522 }
4523
4524 if (ust_app_ht) {
4525 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4526 ret = lttng_ht_del(ust_app_ht, &iter);
4527 assert(!ret);
4528 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4529 }
4530 }
4531
4532 /* Cleanup socket hash table */
4533 if (ust_app_ht_by_sock) {
4534 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
4535 sock_n.node) {
4536 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
4537 assert(!ret);
4538 }
4539 }
4540
4541 rcu_read_unlock();
4542
4543 /* Destroy is done only when the ht is empty */
4544 if (ust_app_ht) {
4545 ht_cleanup_push(ust_app_ht);
4546 }
4547 if (ust_app_ht_by_sock) {
4548 ht_cleanup_push(ust_app_ht_by_sock);
4549 }
4550 if (ust_app_ht_by_notify_sock) {
4551 ht_cleanup_push(ust_app_ht_by_notify_sock);
4552 }
4553}
4554
4555/*
4556 * Init UST app hash table.
4557 */
4558int ust_app_ht_alloc(void)
4559{
4560 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4561 if (!ust_app_ht) {
4562 return -1;
4563 }
4564 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4565 if (!ust_app_ht_by_sock) {
4566 return -1;
4567 }
4568 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
4569 if (!ust_app_ht_by_notify_sock) {
4570 return -1;
4571 }
4572 return 0;
4573}
4574
4575/*
4576 * For a specific UST session, disable the channel for all registered apps.
4577 */
4578int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
4579 struct ltt_ust_channel *uchan)
4580{
4581 int ret = 0;
4582 struct lttng_ht_iter iter;
4583 struct lttng_ht_node_str *ua_chan_node;
4584 struct ust_app *app;
4585 struct ust_app_session *ua_sess;
4586 struct ust_app_channel *ua_chan;
4587
4588 assert(usess->active);
4589 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
4590 uchan->name, usess->id);
4591
4592 rcu_read_lock();
4593
4594 /* For every registered applications */
4595 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4596 struct lttng_ht_iter uiter;
4597 if (!app->compatible) {
4598 /*
4599 * TODO: In time, we should notice the caller of this error by
4600 * telling him that this is a version error.
4601 */
4602 continue;
4603 }
4604 ua_sess = lookup_session_by_app(usess, app);
4605 if (ua_sess == NULL) {
4606 continue;
4607 }
4608
4609 /* Get channel */
4610 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4611 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4612 /* If the session if found for the app, the channel must be there */
4613 assert(ua_chan_node);
4614
4615 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4616 /* The channel must not be already disabled */
4617 assert(ua_chan->enabled == 1);
4618
4619 /* Disable channel onto application */
4620 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
4621 if (ret < 0) {
4622 /* XXX: We might want to report this error at some point... */
4623 continue;
4624 }
4625 }
4626
4627 rcu_read_unlock();
4628 return ret;
4629}
4630
4631/*
4632 * For a specific UST session, enable the channel for all registered apps.
4633 */
4634int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
4635 struct ltt_ust_channel *uchan)
4636{
4637 int ret = 0;
4638 struct lttng_ht_iter iter;
4639 struct ust_app *app;
4640 struct ust_app_session *ua_sess;
4641
4642 assert(usess->active);
4643 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
4644 uchan->name, usess->id);
4645
4646 rcu_read_lock();
4647
4648 /* For every registered applications */
4649 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4650 if (!app->compatible) {
4651 /*
4652 * TODO: In time, we should notice the caller of this error by
4653 * telling him that this is a version error.
4654 */
4655 continue;
4656 }
4657 ua_sess = lookup_session_by_app(usess, app);
4658 if (ua_sess == NULL) {
4659 continue;
4660 }
4661
4662 /* Enable channel onto application */
4663 ret = enable_ust_app_channel(ua_sess, uchan, app);
4664 if (ret < 0) {
4665 /* XXX: We might want to report this error at some point... */
4666 continue;
4667 }
4668 }
4669
4670 rcu_read_unlock();
4671 return ret;
4672}
4673
4674/*
4675 * Disable an event in a channel and for a specific session.
4676 */
4677int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4678 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4679{
4680 int ret = 0;
4681 struct lttng_ht_iter iter, uiter;
4682 struct lttng_ht_node_str *ua_chan_node;
4683 struct ust_app *app;
4684 struct ust_app_session *ua_sess;
4685 struct ust_app_channel *ua_chan;
4686 struct ust_app_event *ua_event;
4687
4688 assert(usess->active);
4689 DBG("UST app disabling event %s for all apps in channel "
4690 "%s for session id %" PRIu64,
4691 uevent->attr.name, uchan->name, usess->id);
4692
4693 rcu_read_lock();
4694
4695 /* For all registered applications */
4696 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4697 if (!app->compatible) {
4698 /*
4699 * TODO: In time, we should notice the caller of this error by
4700 * telling him that this is a version error.
4701 */
4702 continue;
4703 }
4704 ua_sess = lookup_session_by_app(usess, app);
4705 if (ua_sess == NULL) {
4706 /* Next app */
4707 continue;
4708 }
4709
4710 /* Lookup channel in the ust app session */
4711 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4712 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4713 if (ua_chan_node == NULL) {
4714 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
4715 "Skipping", uchan->name, usess->id, app->pid);
4716 continue;
4717 }
4718 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4719
4720 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4721 uevent->filter, uevent->attr.loglevel,
4722 uevent->exclusion);
4723 if (ua_event == NULL) {
4724 DBG2("Event %s not found in channel %s for app pid %d."
4725 "Skipping", uevent->attr.name, uchan->name, app->pid);
4726 continue;
4727 }
4728
4729 ret = disable_ust_app_event(ua_sess, ua_event, app);
4730 if (ret < 0) {
4731 /* XXX: Report error someday... */
4732 continue;
4733 }
4734 }
4735
4736 rcu_read_unlock();
4737 return ret;
4738}
4739
4740/* The ua_sess lock must be held by the caller. */
4741static
4742int ust_app_channel_create(struct ltt_ust_session *usess,
4743 struct ust_app_session *ua_sess,
4744 struct ltt_ust_channel *uchan, struct ust_app *app,
4745 struct ust_app_channel **_ua_chan)
4746{
4747 int ret = 0;
4748 struct ust_app_channel *ua_chan = NULL;
4749
4750 assert(ua_sess);
4751 ASSERT_LOCKED(ua_sess->lock);
4752
4753 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4754 sizeof(uchan->name))) {
4755 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4756 &uchan->attr);
4757 ret = 0;
4758 } else {
4759 struct ltt_ust_context *uctx = NULL;
4760
4761 /*
4762 * Create channel onto application and synchronize its
4763 * configuration.
4764 */
4765 ret = ust_app_channel_allocate(ua_sess, uchan,
4766 LTTNG_UST_ABI_CHAN_PER_CPU, usess,
4767 &ua_chan);
4768 if (ret < 0) {
4769 goto error;
4770 }
4771
4772 ret = ust_app_channel_send(app, usess,
4773 ua_sess, ua_chan);
4774 if (ret) {
4775 goto error;
4776 }
4777
4778 /* Add contexts. */
4779 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4780 ret = create_ust_app_channel_context(ua_chan,
4781 &uctx->ctx, app);
4782 if (ret) {
4783 goto error;
4784 }
4785 }
4786 }
4787
4788error:
4789 if (ret < 0) {
4790 switch (ret) {
4791 case -ENOTCONN:
4792 /*
4793 * The application's socket is not valid. Either a bad socket
4794 * or a timeout on it. We can't inform the caller that for a
4795 * specific app, the session failed so lets continue here.
4796 */
4797 ret = 0; /* Not an error. */
4798 break;
4799 case -ENOMEM:
4800 default:
4801 break;
4802 }
4803 }
4804
4805 if (ret == 0 && _ua_chan) {
4806 /*
4807 * Only return the application's channel on success. Note
4808 * that the channel can still be part of the application's
4809 * channel hashtable on error.
4810 */
4811 *_ua_chan = ua_chan;
4812 }
4813 return ret;
4814}
4815
4816/*
4817 * Enable event for a specific session and channel on the tracer.
4818 */
4819int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4820 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4821{
4822 int ret = 0;
4823 struct lttng_ht_iter iter, uiter;
4824 struct lttng_ht_node_str *ua_chan_node;
4825 struct ust_app *app;
4826 struct ust_app_session *ua_sess;
4827 struct ust_app_channel *ua_chan;
4828 struct ust_app_event *ua_event;
4829
4830 assert(usess->active);
4831 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4832 uevent->attr.name, usess->id);
4833
4834 /*
4835 * NOTE: At this point, this function is called only if the session and
4836 * channel passed are already created for all apps. and enabled on the
4837 * tracer also.
4838 */
4839
4840 rcu_read_lock();
4841
4842 /* For all registered applications */
4843 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4844 if (!app->compatible) {
4845 /*
4846 * TODO: In time, we should notice the caller of this error by
4847 * telling him that this is a version error.
4848 */
4849 continue;
4850 }
4851 ua_sess = lookup_session_by_app(usess, app);
4852 if (!ua_sess) {
4853 /* The application has problem or is probably dead. */
4854 continue;
4855 }
4856
4857 pthread_mutex_lock(&ua_sess->lock);
4858
4859 if (ua_sess->deleted) {
4860 pthread_mutex_unlock(&ua_sess->lock);
4861 continue;
4862 }
4863
4864 /* Lookup channel in the ust app session */
4865 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4866 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4867 /*
4868 * It is possible that the channel cannot be found is
4869 * the channel/event creation occurs concurrently with
4870 * an application exit.
4871 */
4872 if (!ua_chan_node) {
4873 pthread_mutex_unlock(&ua_sess->lock);
4874 continue;
4875 }
4876
4877 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4878
4879 /* Get event node */
4880 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4881 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4882 if (ua_event == NULL) {
4883 DBG3("UST app enable event %s not found for app PID %d."
4884 "Skipping app", uevent->attr.name, app->pid);
4885 goto next_app;
4886 }
4887
4888 ret = enable_ust_app_event(ua_sess, ua_event, app);
4889 if (ret < 0) {
4890 pthread_mutex_unlock(&ua_sess->lock);
4891 goto error;
4892 }
4893 next_app:
4894 pthread_mutex_unlock(&ua_sess->lock);
4895 }
4896
4897error:
4898 rcu_read_unlock();
4899 return ret;
4900}
4901
4902/*
4903 * For a specific existing UST session and UST channel, creates the event for
4904 * all registered apps.
4905 */
4906int ust_app_create_event_glb(struct ltt_ust_session *usess,
4907 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4908{
4909 int ret = 0;
4910 struct lttng_ht_iter iter, uiter;
4911 struct lttng_ht_node_str *ua_chan_node;
4912 struct ust_app *app;
4913 struct ust_app_session *ua_sess;
4914 struct ust_app_channel *ua_chan;
4915
4916 assert(usess->active);
4917 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4918 uevent->attr.name, usess->id);
4919
4920 rcu_read_lock();
4921
4922 /* For all registered applications */
4923 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4924 if (!app->compatible) {
4925 /*
4926 * TODO: In time, we should notice the caller of this error by
4927 * telling him that this is a version error.
4928 */
4929 continue;
4930 }
4931 ua_sess = lookup_session_by_app(usess, app);
4932 if (!ua_sess) {
4933 /* The application has problem or is probably dead. */
4934 continue;
4935 }
4936
4937 pthread_mutex_lock(&ua_sess->lock);
4938
4939 if (ua_sess->deleted) {
4940 pthread_mutex_unlock(&ua_sess->lock);
4941 continue;
4942 }
4943
4944 /* Lookup channel in the ust app session */
4945 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4946 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4947 /* If the channel is not found, there is a code flow error */
4948 assert(ua_chan_node);
4949
4950 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4951
4952 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4953 pthread_mutex_unlock(&ua_sess->lock);
4954 if (ret < 0) {
4955 if (ret != -LTTNG_UST_ERR_EXIST) {
4956 /* Possible value at this point: -ENOMEM. If so, we stop! */
4957 break;
4958 }
4959 DBG2("UST app event %s already exist on app PID %d",
4960 uevent->attr.name, app->pid);
4961 continue;
4962 }
4963 }
4964
4965 rcu_read_unlock();
4966 return ret;
4967}
4968
4969/*
4970 * Start tracing for a specific UST session and app.
4971 *
4972 * Called with UST app session lock held.
4973 *
4974 */
4975static
4976int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4977{
4978 int ret = 0;
4979 struct ust_app_session *ua_sess;
4980
4981 DBG("Starting tracing for ust app pid %d", app->pid);
4982
4983 rcu_read_lock();
4984
4985 if (!app->compatible) {
4986 goto end;
4987 }
4988
4989 ua_sess = lookup_session_by_app(usess, app);
4990 if (ua_sess == NULL) {
4991 /* The session is in teardown process. Ignore and continue. */
4992 goto end;
4993 }
4994
4995 pthread_mutex_lock(&ua_sess->lock);
4996
4997 if (ua_sess->deleted) {
4998 pthread_mutex_unlock(&ua_sess->lock);
4999 goto end;
5000 }
5001
5002 if (ua_sess->enabled) {
5003 pthread_mutex_unlock(&ua_sess->lock);
5004 goto end;
5005 }
5006
5007 /* Upon restart, we skip the setup, already done */
5008 if (ua_sess->started) {
5009 goto skip_setup;
5010 }
5011
5012 health_code_update();
5013
5014skip_setup:
5015 /* This starts the UST tracing */
5016 pthread_mutex_lock(&app->sock_lock);
5017 ret = lttng_ust_ctl_start_session(app->sock, ua_sess->handle);
5018 pthread_mutex_unlock(&app->sock_lock);
5019 if (ret < 0) {
5020 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5021 ERR("Error starting tracing for app pid: %d (ret: %d)",
5022 app->pid, ret);
5023 } else {
5024 DBG("UST app start session failed. Application is dead.");
5025 /*
5026 * This is normal behavior, an application can die during the
5027 * creation process. Don't report an error so the execution can
5028 * continue normally.
5029 */
5030 pthread_mutex_unlock(&ua_sess->lock);
5031 goto end;
5032 }
5033 goto error_unlock;
5034 }
5035
5036 /* Indicate that the session has been started once */
5037 ua_sess->started = 1;
5038 ua_sess->enabled = 1;
5039
5040 pthread_mutex_unlock(&ua_sess->lock);
5041
5042 health_code_update();
5043
5044 /* Quiescent wait after starting trace */
5045 pthread_mutex_lock(&app->sock_lock);
5046 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5047 pthread_mutex_unlock(&app->sock_lock);
5048 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5049 ERR("UST app wait quiescent failed for app pid %d ret %d",
5050 app->pid, ret);
5051 }
5052
5053end:
5054 rcu_read_unlock();
5055 health_code_update();
5056 return 0;
5057
5058error_unlock:
5059 pthread_mutex_unlock(&ua_sess->lock);
5060 rcu_read_unlock();
5061 health_code_update();
5062 return -1;
5063}
5064
5065/*
5066 * Stop tracing for a specific UST session and app.
5067 */
5068static
5069int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
5070{
5071 int ret = 0;
5072 struct ust_app_session *ua_sess;
5073 struct ust_registry_session *registry;
5074
5075 DBG("Stopping tracing for ust app pid %d", app->pid);
5076
5077 rcu_read_lock();
5078
5079 if (!app->compatible) {
5080 goto end_no_session;
5081 }
5082
5083 ua_sess = lookup_session_by_app(usess, app);
5084 if (ua_sess == NULL) {
5085 goto end_no_session;
5086 }
5087
5088 pthread_mutex_lock(&ua_sess->lock);
5089
5090 if (ua_sess->deleted) {
5091 pthread_mutex_unlock(&ua_sess->lock);
5092 goto end_no_session;
5093 }
5094
5095 /*
5096 * If started = 0, it means that stop trace has been called for a session
5097 * that was never started. It's possible since we can have a fail start
5098 * from either the application manager thread or the command thread. Simply
5099 * indicate that this is a stop error.
5100 */
5101 if (!ua_sess->started) {
5102 goto error_rcu_unlock;
5103 }
5104
5105 health_code_update();
5106
5107 /* This inhibits UST tracing */
5108 pthread_mutex_lock(&app->sock_lock);
5109 ret = lttng_ust_ctl_stop_session(app->sock, ua_sess->handle);
5110 pthread_mutex_unlock(&app->sock_lock);
5111 if (ret < 0) {
5112 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5113 ERR("Error stopping tracing for app pid: %d (ret: %d)",
5114 app->pid, ret);
5115 } else {
5116 DBG("UST app stop session failed. Application is dead.");
5117 /*
5118 * This is normal behavior, an application can die during the
5119 * creation process. Don't report an error so the execution can
5120 * continue normally.
5121 */
5122 goto end_unlock;
5123 }
5124 goto error_rcu_unlock;
5125 }
5126
5127 health_code_update();
5128 ua_sess->enabled = 0;
5129
5130 /* Quiescent wait after stopping trace */
5131 pthread_mutex_lock(&app->sock_lock);
5132 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5133 pthread_mutex_unlock(&app->sock_lock);
5134 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5135 ERR("UST app wait quiescent failed for app pid %d ret %d",
5136 app->pid, ret);
5137 }
5138
5139 health_code_update();
5140
5141 registry = get_session_registry(ua_sess);
5142
5143 /* The UST app session is held registry shall not be null. */
5144 assert(registry);
5145
5146 /* Push metadata for application before freeing the application. */
5147 (void) push_metadata(registry, ua_sess->consumer);
5148
5149end_unlock:
5150 pthread_mutex_unlock(&ua_sess->lock);
5151end_no_session:
5152 rcu_read_unlock();
5153 health_code_update();
5154 return 0;
5155
5156error_rcu_unlock:
5157 pthread_mutex_unlock(&ua_sess->lock);
5158 rcu_read_unlock();
5159 health_code_update();
5160 return -1;
5161}
5162
5163static
5164int ust_app_flush_app_session(struct ust_app *app,
5165 struct ust_app_session *ua_sess)
5166{
5167 int ret, retval = 0;
5168 struct lttng_ht_iter iter;
5169 struct ust_app_channel *ua_chan;
5170 struct consumer_socket *socket;
5171
5172 DBG("Flushing app session buffers for ust app pid %d", app->pid);
5173
5174 rcu_read_lock();
5175
5176 if (!app->compatible) {
5177 goto end_not_compatible;
5178 }
5179
5180 pthread_mutex_lock(&ua_sess->lock);
5181
5182 if (ua_sess->deleted) {
5183 goto end_deleted;
5184 }
5185
5186 health_code_update();
5187
5188 /* Flushing buffers */
5189 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5190 ua_sess->consumer);
5191
5192 /* Flush buffers and push metadata. */
5193 switch (ua_sess->buffer_type) {
5194 case LTTNG_BUFFER_PER_PID:
5195 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
5196 node.node) {
5197 health_code_update();
5198 ret = consumer_flush_channel(socket, ua_chan->key);
5199 if (ret) {
5200 ERR("Error flushing consumer channel");
5201 retval = -1;
5202 continue;
5203 }
5204 }
5205 break;
5206 case LTTNG_BUFFER_PER_UID:
5207 default:
5208 assert(0);
5209 break;
5210 }
5211
5212 health_code_update();
5213
5214end_deleted:
5215 pthread_mutex_unlock(&ua_sess->lock);
5216
5217end_not_compatible:
5218 rcu_read_unlock();
5219 health_code_update();
5220 return retval;
5221}
5222
5223/*
5224 * Flush buffers for all applications for a specific UST session.
5225 * Called with UST session lock held.
5226 */
5227static
5228int ust_app_flush_session(struct ltt_ust_session *usess)
5229
5230{
5231 int ret = 0;
5232
5233 DBG("Flushing session buffers for all ust apps");
5234
5235 rcu_read_lock();
5236
5237 /* Flush buffers and push metadata. */
5238 switch (usess->buffer_type) {
5239 case LTTNG_BUFFER_PER_UID:
5240 {
5241 struct buffer_reg_uid *reg;
5242 struct lttng_ht_iter iter;
5243
5244 /* Flush all per UID buffers associated to that session. */
5245 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5246 struct ust_registry_session *ust_session_reg;
5247 struct buffer_reg_channel *buf_reg_chan;
5248 struct consumer_socket *socket;
5249
5250 /* Get consumer socket to use to push the metadata.*/
5251 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5252 usess->consumer);
5253 if (!socket) {
5254 /* Ignore request if no consumer is found for the session. */
5255 continue;
5256 }
5257
5258 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5259 buf_reg_chan, node.node) {
5260 /*
5261 * The following call will print error values so the return
5262 * code is of little importance because whatever happens, we
5263 * have to try them all.
5264 */
5265 (void) consumer_flush_channel(socket, buf_reg_chan->consumer_key);
5266 }
5267
5268 ust_session_reg = reg->registry->reg.ust;
5269 /* Push metadata. */
5270 (void) push_metadata(ust_session_reg, usess->consumer);
5271 }
5272 break;
5273 }
5274 case LTTNG_BUFFER_PER_PID:
5275 {
5276 struct ust_app_session *ua_sess;
5277 struct lttng_ht_iter iter;
5278 struct ust_app *app;
5279
5280 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5281 ua_sess = lookup_session_by_app(usess, app);
5282 if (ua_sess == NULL) {
5283 continue;
5284 }
5285 (void) ust_app_flush_app_session(app, ua_sess);
5286 }
5287 break;
5288 }
5289 default:
5290 ret = -1;
5291 assert(0);
5292 break;
5293 }
5294
5295 rcu_read_unlock();
5296 health_code_update();
5297 return ret;
5298}
5299
5300static
5301int ust_app_clear_quiescent_app_session(struct ust_app *app,
5302 struct ust_app_session *ua_sess)
5303{
5304 int ret = 0;
5305 struct lttng_ht_iter iter;
5306 struct ust_app_channel *ua_chan;
5307 struct consumer_socket *socket;
5308
5309 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
5310
5311 rcu_read_lock();
5312
5313 if (!app->compatible) {
5314 goto end_not_compatible;
5315 }
5316
5317 pthread_mutex_lock(&ua_sess->lock);
5318
5319 if (ua_sess->deleted) {
5320 goto end_unlock;
5321 }
5322
5323 health_code_update();
5324
5325 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5326 ua_sess->consumer);
5327 if (!socket) {
5328 ERR("Failed to find consumer (%" PRIu32 ") socket",
5329 app->bits_per_long);
5330 ret = -1;
5331 goto end_unlock;
5332 }
5333
5334 /* Clear quiescent state. */
5335 switch (ua_sess->buffer_type) {
5336 case LTTNG_BUFFER_PER_PID:
5337 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
5338 ua_chan, node.node) {
5339 health_code_update();
5340 ret = consumer_clear_quiescent_channel(socket,
5341 ua_chan->key);
5342 if (ret) {
5343 ERR("Error clearing quiescent state for consumer channel");
5344 ret = -1;
5345 continue;
5346 }
5347 }
5348 break;
5349 case LTTNG_BUFFER_PER_UID:
5350 default:
5351 assert(0);
5352 ret = -1;
5353 break;
5354 }
5355
5356 health_code_update();
5357
5358end_unlock:
5359 pthread_mutex_unlock(&ua_sess->lock);
5360
5361end_not_compatible:
5362 rcu_read_unlock();
5363 health_code_update();
5364 return ret;
5365}
5366
5367/*
5368 * Clear quiescent state in each stream for all applications for a
5369 * specific UST session.
5370 * Called with UST session lock held.
5371 */
5372static
5373int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
5374
5375{
5376 int ret = 0;
5377
5378 DBG("Clearing stream quiescent state for all ust apps");
5379
5380 rcu_read_lock();
5381
5382 switch (usess->buffer_type) {
5383 case LTTNG_BUFFER_PER_UID:
5384 {
5385 struct lttng_ht_iter iter;
5386 struct buffer_reg_uid *reg;
5387
5388 /*
5389 * Clear quiescent for all per UID buffers associated to
5390 * that session.
5391 */
5392 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5393 struct consumer_socket *socket;
5394 struct buffer_reg_channel *buf_reg_chan;
5395
5396 /* Get associated consumer socket.*/
5397 socket = consumer_find_socket_by_bitness(
5398 reg->bits_per_long, usess->consumer);
5399 if (!socket) {
5400 /*
5401 * Ignore request if no consumer is found for
5402 * the session.
5403 */
5404 continue;
5405 }
5406
5407 cds_lfht_for_each_entry(reg->registry->channels->ht,
5408 &iter.iter, buf_reg_chan, node.node) {
5409 /*
5410 * The following call will print error values so
5411 * the return code is of little importance
5412 * because whatever happens, we have to try them
5413 * all.
5414 */
5415 (void) consumer_clear_quiescent_channel(socket,
5416 buf_reg_chan->consumer_key);
5417 }
5418 }
5419 break;
5420 }
5421 case LTTNG_BUFFER_PER_PID:
5422 {
5423 struct ust_app_session *ua_sess;
5424 struct lttng_ht_iter iter;
5425 struct ust_app *app;
5426
5427 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
5428 pid_n.node) {
5429 ua_sess = lookup_session_by_app(usess, app);
5430 if (ua_sess == NULL) {
5431 continue;
5432 }
5433 (void) ust_app_clear_quiescent_app_session(app,
5434 ua_sess);
5435 }
5436 break;
5437 }
5438 default:
5439 ret = -1;
5440 assert(0);
5441 break;
5442 }
5443
5444 rcu_read_unlock();
5445 health_code_update();
5446 return ret;
5447}
5448
5449/*
5450 * Destroy a specific UST session in apps.
5451 */
5452static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
5453{
5454 int ret;
5455 struct ust_app_session *ua_sess;
5456 struct lttng_ht_iter iter;
5457 struct lttng_ht_node_u64 *node;
5458
5459 DBG("Destroy tracing for ust app pid %d", app->pid);
5460
5461 rcu_read_lock();
5462
5463 if (!app->compatible) {
5464 goto end;
5465 }
5466
5467 __lookup_session_by_app(usess, app, &iter);
5468 node = lttng_ht_iter_get_node_u64(&iter);
5469 if (node == NULL) {
5470 /* Session is being or is deleted. */
5471 goto end;
5472 }
5473 ua_sess = caa_container_of(node, struct ust_app_session, node);
5474
5475 health_code_update();
5476 destroy_app_session(app, ua_sess);
5477
5478 health_code_update();
5479
5480 /* Quiescent wait after stopping trace */
5481 pthread_mutex_lock(&app->sock_lock);
5482 ret = lttng_ust_ctl_wait_quiescent(app->sock);
5483 pthread_mutex_unlock(&app->sock_lock);
5484 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5485 ERR("UST app wait quiescent failed for app pid %d ret %d",
5486 app->pid, ret);
5487 }
5488end:
5489 rcu_read_unlock();
5490 health_code_update();
5491 return 0;
5492}
5493
5494/*
5495 * Start tracing for the UST session.
5496 */
5497int ust_app_start_trace_all(struct ltt_ust_session *usess)
5498{
5499 struct lttng_ht_iter iter;
5500 struct ust_app *app;
5501
5502 DBG("Starting all UST traces");
5503
5504 /*
5505 * Even though the start trace might fail, flag this session active so
5506 * other application coming in are started by default.
5507 */
5508 usess->active = 1;
5509
5510 rcu_read_lock();
5511
5512 /*
5513 * In a start-stop-start use-case, we need to clear the quiescent state
5514 * of each channel set by the prior stop command, thus ensuring that a
5515 * following stop or destroy is sure to grab a timestamp_end near those
5516 * operations, even if the packet is empty.
5517 */
5518 (void) ust_app_clear_quiescent_session(usess);
5519
5520 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5521 ust_app_global_update(usess, app);
5522 }
5523
5524 rcu_read_unlock();
5525
5526 return 0;
5527}
5528
5529/*
5530 * Start tracing for the UST session.
5531 * Called with UST session lock held.
5532 */
5533int ust_app_stop_trace_all(struct ltt_ust_session *usess)
5534{
5535 int ret = 0;
5536 struct lttng_ht_iter iter;
5537 struct ust_app *app;
5538
5539 DBG("Stopping all UST traces");
5540
5541 /*
5542 * Even though the stop trace might fail, flag this session inactive so
5543 * other application coming in are not started by default.
5544 */
5545 usess->active = 0;
5546
5547 rcu_read_lock();
5548
5549 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5550 ret = ust_app_stop_trace(usess, app);
5551 if (ret < 0) {
5552 /* Continue to next apps even on error */
5553 continue;
5554 }
5555 }
5556
5557 (void) ust_app_flush_session(usess);
5558
5559 rcu_read_unlock();
5560
5561 return 0;
5562}
5563
5564/*
5565 * Destroy app UST session.
5566 */
5567int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
5568{
5569 int ret = 0;
5570 struct lttng_ht_iter iter;
5571 struct ust_app *app;
5572
5573 DBG("Destroy all UST traces");
5574
5575 rcu_read_lock();
5576
5577 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5578 ret = destroy_trace(usess, app);
5579 if (ret < 0) {
5580 /* Continue to next apps even on error */
5581 continue;
5582 }
5583 }
5584
5585 rcu_read_unlock();
5586
5587 return 0;
5588}
5589
5590/* The ua_sess lock must be held by the caller. */
5591static
5592int find_or_create_ust_app_channel(
5593 struct ltt_ust_session *usess,
5594 struct ust_app_session *ua_sess,
5595 struct ust_app *app,
5596 struct ltt_ust_channel *uchan,
5597 struct ust_app_channel **ua_chan)
5598{
5599 int ret = 0;
5600 struct lttng_ht_iter iter;
5601 struct lttng_ht_node_str *ua_chan_node;
5602
5603 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5604 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5605 if (ua_chan_node) {
5606 *ua_chan = caa_container_of(ua_chan_node,
5607 struct ust_app_channel, node);
5608 goto end;
5609 }
5610
5611 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5612 if (ret) {
5613 goto end;
5614 }
5615end:
5616 return ret;
5617}
5618
5619static
5620int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5621 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5622 struct ust_app *app)
5623{
5624 int ret = 0;
5625 struct ust_app_event *ua_event = NULL;
5626
5627 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5628 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5629 if (!ua_event) {
5630 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5631 if (ret < 0) {
5632 goto end;
5633 }
5634 } else {
5635 if (ua_event->enabled != uevent->enabled) {
5636 ret = uevent->enabled ?
5637 enable_ust_app_event(ua_sess, ua_event, app) :
5638 disable_ust_app_event(ua_sess, ua_event, app);
5639 }
5640 }
5641
5642end:
5643 return ret;
5644}
5645
5646/* Called with RCU read-side lock held. */
5647static
5648void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
5649{
5650 int ret = 0;
5651 enum lttng_error_code ret_code;
5652 enum lttng_trigger_status t_status;
5653 struct lttng_ht_iter app_trigger_iter;
5654 struct lttng_triggers *triggers = NULL;
5655 struct ust_app_event_notifier_rule *event_notifier_rule;
5656 unsigned int count, i;
5657
5658 /*
5659 * Currrently, registering or unregistering a trigger with an
5660 * event rule condition causes a full synchronization of the event
5661 * notifiers.
5662 *
5663 * The first step attempts to add an event notifier for all registered
5664 * triggers that apply to the user space tracers. Then, the
5665 * application's event notifiers rules are all checked against the list
5666 * of registered triggers. Any event notifier that doesn't have a
5667 * matching trigger can be assumed to have been disabled.
5668 *
5669 * All of this is inefficient, but is put in place to get the feature
5670 * rolling as it is simpler at this moment. It will be optimized Soon™
5671 * to allow the state of enabled
5672 * event notifiers to be synchronized in a piece-wise way.
5673 */
5674
5675 /* Get all triggers using uid 0 (root) */
5676 ret_code = notification_thread_command_list_triggers(
5677 the_notification_thread_handle, 0, &triggers);
5678 if (ret_code != LTTNG_OK) {
5679 goto end;
5680 }
5681
5682 assert(triggers);
5683
5684 t_status = lttng_triggers_get_count(triggers, &count);
5685 if (t_status != LTTNG_TRIGGER_STATUS_OK) {
5686 goto end;
5687 }
5688
5689 for (i = 0; i < count; i++) {
5690 struct lttng_condition *condition;
5691 struct lttng_event_rule *event_rule;
5692 struct lttng_trigger *trigger;
5693 const struct ust_app_event_notifier_rule *looked_up_event_notifier_rule;
5694 enum lttng_condition_status condition_status;
5695 uint64_t token;
5696
5697 trigger = lttng_triggers_borrow_mutable_at_index(triggers, i);
5698 assert(trigger);
5699
5700 token = lttng_trigger_get_tracer_token(trigger);
5701 condition = lttng_trigger_get_condition(trigger);
5702
5703 if (lttng_condition_get_type(condition) !=
5704 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES) {
5705 /* Does not apply */
5706 continue;
5707 }
5708
5709 condition_status =
5710 lttng_condition_event_rule_matches_borrow_rule_mutable(
5711 condition, &event_rule);
5712 assert(condition_status == LTTNG_CONDITION_STATUS_OK);
5713
5714 if (lttng_event_rule_get_domain_type(event_rule) == LTTNG_DOMAIN_KERNEL) {
5715 /* Skip kernel related triggers. */
5716 continue;
5717 }
5718
5719 /*
5720 * Find or create the associated token event rule. The caller
5721 * holds the RCU read lock, so this is safe to call without
5722 * explicitly acquiring it here.
5723 */
5724 looked_up_event_notifier_rule = find_ust_app_event_notifier_rule(
5725 app->token_to_event_notifier_rule_ht, token);
5726 if (!looked_up_event_notifier_rule) {
5727 ret = create_ust_app_event_notifier_rule(trigger, app);
5728 if (ret < 0) {
5729 goto end;
5730 }
5731 }
5732 }
5733
5734 rcu_read_lock();
5735 /* Remove all unknown event sources from the app. */
5736 cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
5737 &app_trigger_iter.iter, event_notifier_rule,
5738 node.node) {
5739 const uint64_t app_token = event_notifier_rule->token;
5740 bool found = false;
5741
5742 /*
5743 * Check if the app event trigger still exists on the
5744 * notification side.
5745 */
5746 for (i = 0; i < count; i++) {
5747 uint64_t notification_thread_token;
5748 const struct lttng_trigger *trigger =
5749 lttng_triggers_get_at_index(
5750 triggers, i);
5751
5752 assert(trigger);
5753
5754 notification_thread_token =
5755 lttng_trigger_get_tracer_token(trigger);
5756
5757 if (notification_thread_token == app_token) {
5758 found = true;
5759 break;
5760 }
5761 }
5762
5763 if (found) {
5764 /* Still valid. */
5765 continue;
5766 }
5767
5768 /*
5769 * This trigger was unregistered, disable it on the tracer's
5770 * side.
5771 */
5772 ret = lttng_ht_del(app->token_to_event_notifier_rule_ht,
5773 &app_trigger_iter);
5774 assert(ret == 0);
5775
5776 /* Callee logs errors. */
5777 (void) disable_ust_object(app, event_notifier_rule->obj);
5778
5779 delete_ust_app_event_notifier_rule(
5780 app->sock, event_notifier_rule, app);
5781 }
5782
5783 rcu_read_unlock();
5784
5785end:
5786 lttng_triggers_destroy(triggers);
5787 return;
5788}
5789
5790/*
5791 * RCU read lock must be held by the caller.
5792 */
5793static
5794void ust_app_synchronize_all_channels(struct ltt_ust_session *usess,
5795 struct ust_app_session *ua_sess,
5796 struct ust_app *app)
5797{
5798 int ret = 0;
5799 struct cds_lfht_iter uchan_iter;
5800 struct ltt_ust_channel *uchan;
5801
5802 assert(usess);
5803 assert(ua_sess);
5804 assert(app);
5805
5806 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5807 uchan, node.node) {
5808 struct ust_app_channel *ua_chan;
5809 struct cds_lfht_iter uevent_iter;
5810 struct ltt_ust_event *uevent;
5811
5812 /*
5813 * Search for a matching ust_app_channel. If none is found,
5814 * create it. Creating the channel will cause the ua_chan
5815 * structure to be allocated, the channel buffers to be
5816 * allocated (if necessary) and sent to the application, and
5817 * all enabled contexts will be added to the channel.
5818 */
5819 ret = find_or_create_ust_app_channel(usess, ua_sess,
5820 app, uchan, &ua_chan);
5821 if (ret) {
5822 /* Tracer is probably gone or ENOMEM. */
5823 goto end;
5824 }
5825
5826 if (!ua_chan) {
5827 /* ua_chan will be NULL for the metadata channel */
5828 continue;
5829 }
5830
5831 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
5832 node.node) {
5833 ret = ust_app_channel_synchronize_event(ua_chan,
5834 uevent, ua_sess, app);
5835 if (ret) {
5836 goto end;
5837 }
5838 }
5839
5840 if (ua_chan->enabled != uchan->enabled) {
5841 ret = uchan->enabled ?
5842 enable_ust_app_channel(ua_sess, uchan, app) :
5843 disable_ust_app_channel(ua_sess, ua_chan, app);
5844 if (ret) {
5845 goto end;
5846 }
5847 }
5848 }
5849end:
5850 return;
5851}
5852
5853/*
5854 * The caller must ensure that the application is compatible and is tracked
5855 * by the process attribute trackers.
5856 */
5857static
5858void ust_app_synchronize(struct ltt_ust_session *usess,
5859 struct ust_app *app)
5860{
5861 int ret = 0;
5862 struct ust_app_session *ua_sess = NULL;
5863
5864 /*
5865 * The application's configuration should only be synchronized for
5866 * active sessions.
5867 */
5868 assert(usess->active);
5869
5870 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
5871 if (ret < 0) {
5872 /* Tracer is probably gone or ENOMEM. */
5873 goto error;
5874 }
5875 assert(ua_sess);
5876
5877 pthread_mutex_lock(&ua_sess->lock);
5878 if (ua_sess->deleted) {
5879 pthread_mutex_unlock(&ua_sess->lock);
5880 goto end;
5881 }
5882
5883 rcu_read_lock();
5884
5885 ust_app_synchronize_all_channels(usess, ua_sess, app);
5886
5887 /*
5888 * Create the metadata for the application. This returns gracefully if a
5889 * metadata was already set for the session.
5890 *
5891 * The metadata channel must be created after the data channels as the
5892 * consumer daemon assumes this ordering. When interacting with a relay
5893 * daemon, the consumer will use this assumption to send the
5894 * "STREAMS_SENT" message to the relay daemon.
5895 */
5896 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
5897 if (ret < 0) {
5898 goto error_unlock;
5899 }
5900
5901 rcu_read_unlock();
5902
5903end:
5904 pthread_mutex_unlock(&ua_sess->lock);
5905 /* Everything went well at this point. */
5906 return;
5907
5908error_unlock:
5909 rcu_read_unlock();
5910 pthread_mutex_unlock(&ua_sess->lock);
5911error:
5912 if (ua_sess) {
5913 destroy_app_session(app, ua_sess);
5914 }
5915 return;
5916}
5917
5918static
5919void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5920{
5921 struct ust_app_session *ua_sess;
5922
5923 ua_sess = lookup_session_by_app(usess, app);
5924 if (ua_sess == NULL) {
5925 return;
5926 }
5927 destroy_app_session(app, ua_sess);
5928}
5929
5930/*
5931 * Add channels/events from UST global domain to registered apps at sock.
5932 *
5933 * Called with session lock held.
5934 * Called with RCU read-side lock held.
5935 */
5936void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5937{
5938 assert(usess);
5939 assert(usess->active);
5940
5941 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5942 app->sock, usess->id);
5943
5944 if (!app->compatible) {
5945 return;
5946 }
5947 if (trace_ust_id_tracker_lookup(LTTNG_PROCESS_ATTR_VIRTUAL_PROCESS_ID,
5948 usess, app->pid) &&
5949 trace_ust_id_tracker_lookup(
5950 LTTNG_PROCESS_ATTR_VIRTUAL_USER_ID,
5951 usess, app->uid) &&
5952 trace_ust_id_tracker_lookup(
5953 LTTNG_PROCESS_ATTR_VIRTUAL_GROUP_ID,
5954 usess, app->gid)) {
5955 /*
5956 * Synchronize the application's internal tracing configuration
5957 * and start tracing.
5958 */
5959 ust_app_synchronize(usess, app);
5960 ust_app_start_trace(usess, app);
5961 } else {
5962 ust_app_global_destroy(usess, app);
5963 }
5964}
5965
5966/*
5967 * Add all event notifiers to an application.
5968 *
5969 * Called with session lock held.
5970 * Called with RCU read-side lock held.
5971 */
5972void ust_app_global_update_event_notifier_rules(struct ust_app *app)
5973{
5974 DBG2("UST application global event notifier rules update: app = '%s' (ppid: %d)",
5975 app->name, app->ppid);
5976
5977 if (!app->compatible) {
5978 return;
5979 }
5980
5981 if (app->event_notifier_group.object == NULL) {
5982 WARN("UST app global update of event notifiers for app skipped since communication handle is null: app = '%s' (ppid: %d)",
5983 app->name, app->ppid);
5984 return;
5985 }
5986
5987 ust_app_synchronize_event_notifier_rules(app);
5988}
5989
5990/*
5991 * Called with session lock held.
5992 */
5993void ust_app_global_update_all(struct ltt_ust_session *usess)
5994{
5995 struct lttng_ht_iter iter;
5996 struct ust_app *app;
5997
5998 rcu_read_lock();
5999 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6000 ust_app_global_update(usess, app);
6001 }
6002 rcu_read_unlock();
6003}
6004
6005void ust_app_global_update_all_event_notifier_rules(void)
6006{
6007 struct lttng_ht_iter iter;
6008 struct ust_app *app;
6009
6010 rcu_read_lock();
6011 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6012 ust_app_global_update_event_notifier_rules(app);
6013 }
6014
6015 rcu_read_unlock();
6016}
6017
6018/*
6019 * Add context to a specific channel for global UST domain.
6020 */
6021int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
6022 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
6023{
6024 int ret = 0;
6025 struct lttng_ht_node_str *ua_chan_node;
6026 struct lttng_ht_iter iter, uiter;
6027 struct ust_app_channel *ua_chan = NULL;
6028 struct ust_app_session *ua_sess;
6029 struct ust_app *app;
6030
6031 assert(usess->active);
6032
6033 rcu_read_lock();
6034 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6035 if (!app->compatible) {
6036 /*
6037 * TODO: In time, we should notice the caller of this error by
6038 * telling him that this is a version error.
6039 */
6040 continue;
6041 }
6042 ua_sess = lookup_session_by_app(usess, app);
6043 if (ua_sess == NULL) {
6044 continue;
6045 }
6046
6047 pthread_mutex_lock(&ua_sess->lock);
6048
6049 if (ua_sess->deleted) {
6050 pthread_mutex_unlock(&ua_sess->lock);
6051 continue;
6052 }
6053
6054 /* Lookup channel in the ust app session */
6055 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
6056 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6057 if (ua_chan_node == NULL) {
6058 goto next_app;
6059 }
6060 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
6061 node);
6062 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
6063 if (ret < 0) {
6064 goto next_app;
6065 }
6066 next_app:
6067 pthread_mutex_unlock(&ua_sess->lock);
6068 }
6069
6070 rcu_read_unlock();
6071 return ret;
6072}
6073
6074/*
6075 * Receive registration and populate the given msg structure.
6076 *
6077 * On success return 0 else a negative value returned by the ustctl call.
6078 */
6079int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
6080{
6081 int ret;
6082 uint32_t pid, ppid, uid, gid;
6083
6084 assert(msg);
6085
6086 ret = lttng_ust_ctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
6087 &pid, &ppid, &uid, &gid,
6088 &msg->bits_per_long,
6089 &msg->uint8_t_alignment,
6090 &msg->uint16_t_alignment,
6091 &msg->uint32_t_alignment,
6092 &msg->uint64_t_alignment,
6093 &msg->long_alignment,
6094 &msg->byte_order,
6095 msg->name);
6096 if (ret < 0) {
6097 switch (-ret) {
6098 case EPIPE:
6099 case ECONNRESET:
6100 case LTTNG_UST_ERR_EXITING:
6101 DBG3("UST app recv reg message failed. Application died");
6102 break;
6103 case LTTNG_UST_ERR_UNSUP_MAJOR:
6104 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
6105 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
6106 LTTNG_UST_ABI_MINOR_VERSION);
6107 break;
6108 default:
6109 ERR("UST app recv reg message failed with ret %d", ret);
6110 break;
6111 }
6112 goto error;
6113 }
6114 msg->pid = (pid_t) pid;
6115 msg->ppid = (pid_t) ppid;
6116 msg->uid = (uid_t) uid;
6117 msg->gid = (gid_t) gid;
6118
6119error:
6120 return ret;
6121}
6122
6123/*
6124 * Return a ust app session object using the application object and the
6125 * session object descriptor has a key. If not found, NULL is returned.
6126 * A RCU read side lock MUST be acquired when calling this function.
6127*/
6128static struct ust_app_session *find_session_by_objd(struct ust_app *app,
6129 int objd)
6130{
6131 struct lttng_ht_node_ulong *node;
6132 struct lttng_ht_iter iter;
6133 struct ust_app_session *ua_sess = NULL;
6134
6135 assert(app);
6136
6137 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
6138 node = lttng_ht_iter_get_node_ulong(&iter);
6139 if (node == NULL) {
6140 DBG2("UST app session find by objd %d not found", objd);
6141 goto error;
6142 }
6143
6144 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
6145
6146error:
6147 return ua_sess;
6148}
6149
6150/*
6151 * Return a ust app channel object using the application object and the channel
6152 * object descriptor has a key. If not found, NULL is returned. A RCU read side
6153 * lock MUST be acquired before calling this function.
6154 */
6155static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
6156 int objd)
6157{
6158 struct lttng_ht_node_ulong *node;
6159 struct lttng_ht_iter iter;
6160 struct ust_app_channel *ua_chan = NULL;
6161
6162 assert(app);
6163
6164 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
6165 node = lttng_ht_iter_get_node_ulong(&iter);
6166 if (node == NULL) {
6167 DBG2("UST app channel find by objd %d not found", objd);
6168 goto error;
6169 }
6170
6171 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
6172
6173error:
6174 return ua_chan;
6175}
6176
6177/*
6178 * Reply to a register channel notification from an application on the notify
6179 * socket. The channel metadata is also created.
6180 *
6181 * The session UST registry lock is acquired in this function.
6182 *
6183 * On success 0 is returned else a negative value.
6184 */
6185static int reply_ust_register_channel(int sock, int cobjd,
6186 size_t nr_fields, struct lttng_ust_ctl_field *fields)
6187{
6188 int ret, ret_code = 0;
6189 uint32_t chan_id;
6190 uint64_t chan_reg_key;
6191 enum lttng_ust_ctl_channel_header type;
6192 struct ust_app *app;
6193 struct ust_app_channel *ua_chan;
6194 struct ust_app_session *ua_sess;
6195 struct ust_registry_session *registry;
6196 struct ust_registry_channel *ust_reg_chan;
6197
6198 rcu_read_lock();
6199
6200 /* Lookup application. If not found, there is a code flow error. */
6201 app = find_app_by_notify_sock(sock);
6202 if (!app) {
6203 DBG("Application socket %d is being torn down. Abort event notify",
6204 sock);
6205 ret = 0;
6206 goto error_rcu_unlock;
6207 }
6208
6209 /* Lookup channel by UST object descriptor. */
6210 ua_chan = find_channel_by_objd(app, cobjd);
6211 if (!ua_chan) {
6212 DBG("Application channel is being torn down. Abort event notify");
6213 ret = 0;
6214 goto error_rcu_unlock;
6215 }
6216
6217 assert(ua_chan->session);
6218 ua_sess = ua_chan->session;
6219
6220 /* Get right session registry depending on the session buffer type. */
6221 registry = get_session_registry(ua_sess);
6222 if (!registry) {
6223 DBG("Application session is being torn down. Abort event notify");
6224 ret = 0;
6225 goto error_rcu_unlock;
6226 };
6227
6228 /* Depending on the buffer type, a different channel key is used. */
6229 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6230 chan_reg_key = ua_chan->tracing_channel_id;
6231 } else {
6232 chan_reg_key = ua_chan->key;
6233 }
6234
6235 pthread_mutex_lock(&registry->lock);
6236
6237 ust_reg_chan = ust_registry_channel_find(registry, chan_reg_key);
6238 assert(ust_reg_chan);
6239
6240 if (!ust_reg_chan->register_done) {
6241 /*
6242 * TODO: eventually use the registry event count for
6243 * this channel to better guess header type for per-pid
6244 * buffers.
6245 */
6246 type = LTTNG_UST_CTL_CHANNEL_HEADER_LARGE;
6247 ust_reg_chan->nr_ctx_fields = nr_fields;
6248 ust_reg_chan->ctx_fields = fields;
6249 fields = NULL;
6250 ust_reg_chan->header_type = type;
6251 } else {
6252 /* Get current already assigned values. */
6253 type = ust_reg_chan->header_type;
6254 }
6255 /* Channel id is set during the object creation. */
6256 chan_id = ust_reg_chan->chan_id;
6257
6258 /* Append to metadata */
6259 if (!ust_reg_chan->metadata_dumped) {
6260 ret_code = ust_metadata_channel_statedump(registry, ust_reg_chan);
6261 if (ret_code) {
6262 ERR("Error appending channel metadata (errno = %d)", ret_code);
6263 goto reply;
6264 }
6265 }
6266
6267reply:
6268 DBG3("UST app replying to register channel key %" PRIu64
6269 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
6270 ret_code);
6271
6272 ret = lttng_ust_ctl_reply_register_channel(sock, chan_id, type, ret_code);
6273 if (ret < 0) {
6274 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6275 ERR("UST app reply channel failed with ret %d", ret);
6276 } else {
6277 DBG3("UST app reply channel failed. Application died");
6278 }
6279 goto error;
6280 }
6281
6282 /* This channel registry registration is completed. */
6283 ust_reg_chan->register_done = 1;
6284
6285error:
6286 pthread_mutex_unlock(&registry->lock);
6287error_rcu_unlock:
6288 rcu_read_unlock();
6289 free(fields);
6290 return ret;
6291}
6292
6293/*
6294 * Add event to the UST channel registry. When the event is added to the
6295 * registry, the metadata is also created. Once done, this replies to the
6296 * application with the appropriate error code.
6297 *
6298 * The session UST registry lock is acquired in the function.
6299 *
6300 * On success 0 is returned else a negative value.
6301 */
6302static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
6303 char *sig, size_t nr_fields, struct lttng_ust_ctl_field *fields,
6304 int loglevel_value, char *model_emf_uri)
6305{
6306 int ret, ret_code;
6307 uint32_t event_id = 0;
6308 uint64_t chan_reg_key;
6309 struct ust_app *app;
6310 struct ust_app_channel *ua_chan;
6311 struct ust_app_session *ua_sess;
6312 struct ust_registry_session *registry;
6313
6314 rcu_read_lock();
6315
6316 /* Lookup application. If not found, there is a code flow error. */
6317 app = find_app_by_notify_sock(sock);
6318 if (!app) {
6319 DBG("Application socket %d is being torn down. Abort event notify",
6320 sock);
6321 ret = 0;
6322 goto error_rcu_unlock;
6323 }
6324
6325 /* Lookup channel by UST object descriptor. */
6326 ua_chan = find_channel_by_objd(app, cobjd);
6327 if (!ua_chan) {
6328 DBG("Application channel is being torn down. Abort event notify");
6329 ret = 0;
6330 goto error_rcu_unlock;
6331 }
6332
6333 assert(ua_chan->session);
6334 ua_sess = ua_chan->session;
6335
6336 registry = get_session_registry(ua_sess);
6337 if (!registry) {
6338 DBG("Application session is being torn down. Abort event notify");
6339 ret = 0;
6340 goto error_rcu_unlock;
6341 }
6342
6343 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
6344 chan_reg_key = ua_chan->tracing_channel_id;
6345 } else {
6346 chan_reg_key = ua_chan->key;
6347 }
6348
6349 pthread_mutex_lock(&registry->lock);
6350
6351 /*
6352 * From this point on, this call acquires the ownership of the sig, fields
6353 * and model_emf_uri meaning any free are done inside it if needed. These
6354 * three variables MUST NOT be read/write after this.
6355 */
6356 ret_code = ust_registry_create_event(registry, chan_reg_key,
6357 sobjd, cobjd, name, sig, nr_fields, fields,
6358 loglevel_value, model_emf_uri, ua_sess->buffer_type,
6359 &event_id, app);
6360 sig = NULL;
6361 fields = NULL;
6362 model_emf_uri = NULL;
6363
6364 /*
6365 * The return value is returned to ustctl so in case of an error, the
6366 * application can be notified. In case of an error, it's important not to
6367 * return a negative error or else the application will get closed.
6368 */
6369 ret = lttng_ust_ctl_reply_register_event(sock, event_id, ret_code);
6370 if (ret < 0) {
6371 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6372 ERR("UST app reply event failed with ret %d", ret);
6373 } else {
6374 DBG3("UST app reply event failed. Application died");
6375 }
6376 /*
6377 * No need to wipe the create event since the application socket will
6378 * get close on error hence cleaning up everything by itself.
6379 */
6380 goto error;
6381 }
6382
6383 DBG3("UST registry event %s with id %" PRId32 " added successfully",
6384 name, event_id);
6385
6386error:
6387 pthread_mutex_unlock(&registry->lock);
6388error_rcu_unlock:
6389 rcu_read_unlock();
6390 free(sig);
6391 free(fields);
6392 free(model_emf_uri);
6393 return ret;
6394}
6395
6396/*
6397 * Add enum to the UST session registry. Once done, this replies to the
6398 * application with the appropriate error code.
6399 *
6400 * The session UST registry lock is acquired within this function.
6401 *
6402 * On success 0 is returned else a negative value.
6403 */
6404static int add_enum_ust_registry(int sock, int sobjd, char *name,
6405 struct lttng_ust_ctl_enum_entry *entries, size_t nr_entries)
6406{
6407 int ret = 0, ret_code;
6408 struct ust_app *app;
6409 struct ust_app_session *ua_sess;
6410 struct ust_registry_session *registry;
6411 uint64_t enum_id = -1ULL;
6412
6413 rcu_read_lock();
6414
6415 /* Lookup application. If not found, there is a code flow error. */
6416 app = find_app_by_notify_sock(sock);
6417 if (!app) {
6418 /* Return an error since this is not an error */
6419 DBG("Application socket %d is being torn down. Aborting enum registration",
6420 sock);
6421 free(entries);
6422 goto error_rcu_unlock;
6423 }
6424
6425 /* Lookup session by UST object descriptor. */
6426 ua_sess = find_session_by_objd(app, sobjd);
6427 if (!ua_sess) {
6428 /* Return an error since this is not an error */
6429 DBG("Application session is being torn down (session not found). Aborting enum registration.");
6430 free(entries);
6431 goto error_rcu_unlock;
6432 }
6433
6434 registry = get_session_registry(ua_sess);
6435 if (!registry) {
6436 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
6437 free(entries);
6438 goto error_rcu_unlock;
6439 }
6440
6441 pthread_mutex_lock(&registry->lock);
6442
6443 /*
6444 * From this point on, the callee acquires the ownership of
6445 * entries. The variable entries MUST NOT be read/written after
6446 * call.
6447 */
6448 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
6449 entries, nr_entries, &enum_id);
6450 entries = NULL;
6451
6452 /*
6453 * The return value is returned to ustctl so in case of an error, the
6454 * application can be notified. In case of an error, it's important not to
6455 * return a negative error or else the application will get closed.
6456 */
6457 ret = lttng_ust_ctl_reply_register_enum(sock, enum_id, ret_code);
6458 if (ret < 0) {
6459 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6460 ERR("UST app reply enum failed with ret %d", ret);
6461 } else {
6462 DBG3("UST app reply enum failed. Application died");
6463 }
6464 /*
6465 * No need to wipe the create enum since the application socket will
6466 * get close on error hence cleaning up everything by itself.
6467 */
6468 goto error;
6469 }
6470
6471 DBG3("UST registry enum %s added successfully or already found", name);
6472
6473error:
6474 pthread_mutex_unlock(&registry->lock);
6475error_rcu_unlock:
6476 rcu_read_unlock();
6477 return ret;
6478}
6479
6480/*
6481 * Handle application notification through the given notify socket.
6482 *
6483 * Return 0 on success or else a negative value.
6484 */
6485int ust_app_recv_notify(int sock)
6486{
6487 int ret;
6488 enum lttng_ust_ctl_notify_cmd cmd;
6489
6490 DBG3("UST app receiving notify from sock %d", sock);
6491
6492 ret = lttng_ust_ctl_recv_notify(sock, &cmd);
6493 if (ret < 0) {
6494 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6495 ERR("UST app recv notify failed with ret %d", ret);
6496 } else {
6497 DBG3("UST app recv notify failed. Application died");
6498 }
6499 goto error;
6500 }
6501
6502 switch (cmd) {
6503 case LTTNG_UST_CTL_NOTIFY_CMD_EVENT:
6504 {
6505 int sobjd, cobjd, loglevel_value;
6506 char name[LTTNG_UST_ABI_SYM_NAME_LEN], *sig, *model_emf_uri;
6507 size_t nr_fields;
6508 struct lttng_ust_ctl_field *fields;
6509
6510 DBG2("UST app ustctl register event received");
6511
6512 ret = lttng_ust_ctl_recv_register_event(sock, &sobjd, &cobjd, name,
6513 &loglevel_value, &sig, &nr_fields, &fields,
6514 &model_emf_uri);
6515 if (ret < 0) {
6516 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6517 ERR("UST app recv event failed with ret %d", ret);
6518 } else {
6519 DBG3("UST app recv event failed. Application died");
6520 }
6521 goto error;
6522 }
6523
6524 /*
6525 * Add event to the UST registry coming from the notify socket. This
6526 * call will free if needed the sig, fields and model_emf_uri. This
6527 * code path loses the ownsership of these variables and transfer them
6528 * to the this function.
6529 */
6530 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
6531 fields, loglevel_value, model_emf_uri);
6532 if (ret < 0) {
6533 goto error;
6534 }
6535
6536 break;
6537 }
6538 case LTTNG_UST_CTL_NOTIFY_CMD_CHANNEL:
6539 {
6540 int sobjd, cobjd;
6541 size_t nr_fields;
6542 struct lttng_ust_ctl_field *fields;
6543
6544 DBG2("UST app ustctl register channel received");
6545
6546 ret = lttng_ust_ctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
6547 &fields);
6548 if (ret < 0) {
6549 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6550 ERR("UST app recv channel failed with ret %d", ret);
6551 } else {
6552 DBG3("UST app recv channel failed. Application died");
6553 }
6554 goto error;
6555 }
6556
6557 /*
6558 * The fields ownership are transfered to this function call meaning
6559 * that if needed it will be freed. After this, it's invalid to access
6560 * fields or clean it up.
6561 */
6562 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
6563 fields);
6564 if (ret < 0) {
6565 goto error;
6566 }
6567
6568 break;
6569 }
6570 case LTTNG_UST_CTL_NOTIFY_CMD_ENUM:
6571 {
6572 int sobjd;
6573 char name[LTTNG_UST_ABI_SYM_NAME_LEN];
6574 size_t nr_entries;
6575 struct lttng_ust_ctl_enum_entry *entries;
6576
6577 DBG2("UST app ustctl register enum received");
6578
6579 ret = lttng_ust_ctl_recv_register_enum(sock, &sobjd, name,
6580 &entries, &nr_entries);
6581 if (ret < 0) {
6582 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
6583 ERR("UST app recv enum failed with ret %d", ret);
6584 } else {
6585 DBG3("UST app recv enum failed. Application died");
6586 }
6587 goto error;
6588 }
6589
6590 /* Callee assumes ownership of entries */
6591 ret = add_enum_ust_registry(sock, sobjd, name,
6592 entries, nr_entries);
6593 if (ret < 0) {
6594 goto error;
6595 }
6596
6597 break;
6598 }
6599 default:
6600 /* Should NEVER happen. */
6601 assert(0);
6602 }
6603
6604error:
6605 return ret;
6606}
6607
6608/*
6609 * Once the notify socket hangs up, this is called. First, it tries to find the
6610 * corresponding application. On failure, the call_rcu to close the socket is
6611 * executed. If an application is found, it tries to delete it from the notify
6612 * socket hash table. Whathever the result, it proceeds to the call_rcu.
6613 *
6614 * Note that an object needs to be allocated here so on ENOMEM failure, the
6615 * call RCU is not done but the rest of the cleanup is.
6616 */
6617void ust_app_notify_sock_unregister(int sock)
6618{
6619 int err_enomem = 0;
6620 struct lttng_ht_iter iter;
6621 struct ust_app *app;
6622 struct ust_app_notify_sock_obj *obj;
6623
6624 assert(sock >= 0);
6625
6626 rcu_read_lock();
6627
6628 obj = zmalloc(sizeof(*obj));
6629 if (!obj) {
6630 /*
6631 * An ENOMEM is kind of uncool. If this strikes we continue the
6632 * procedure but the call_rcu will not be called. In this case, we
6633 * accept the fd leak rather than possibly creating an unsynchronized
6634 * state between threads.
6635 *
6636 * TODO: The notify object should be created once the notify socket is
6637 * registered and stored independantely from the ust app object. The
6638 * tricky part is to synchronize the teardown of the application and
6639 * this notify object. Let's keep that in mind so we can avoid this
6640 * kind of shenanigans with ENOMEM in the teardown path.
6641 */
6642 err_enomem = 1;
6643 } else {
6644 obj->fd = sock;
6645 }
6646
6647 DBG("UST app notify socket unregister %d", sock);
6648
6649 /*
6650 * Lookup application by notify socket. If this fails, this means that the
6651 * hash table delete has already been done by the application
6652 * unregistration process so we can safely close the notify socket in a
6653 * call RCU.
6654 */
6655 app = find_app_by_notify_sock(sock);
6656 if (!app) {
6657 goto close_socket;
6658 }
6659
6660 iter.iter.node = &app->notify_sock_n.node;
6661
6662 /*
6663 * Whatever happens here either we fail or succeed, in both cases we have
6664 * to close the socket after a grace period to continue to the call RCU
6665 * here. If the deletion is successful, the application is not visible
6666 * anymore by other threads and is it fails it means that it was already
6667 * deleted from the hash table so either way we just have to close the
6668 * socket.
6669 */
6670 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
6671
6672close_socket:
6673 rcu_read_unlock();
6674
6675 /*
6676 * Close socket after a grace period to avoid for the socket to be reused
6677 * before the application object is freed creating potential race between
6678 * threads trying to add unique in the global hash table.
6679 */
6680 if (!err_enomem) {
6681 call_rcu(&obj->head, close_notify_sock_rcu);
6682 }
6683}
6684
6685/*
6686 * Destroy a ust app data structure and free its memory.
6687 */
6688void ust_app_destroy(struct ust_app *app)
6689{
6690 if (!app) {
6691 return;
6692 }
6693
6694 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
6695}
6696
6697/*
6698 * Take a snapshot for a given UST session. The snapshot is sent to the given
6699 * output.
6700 *
6701 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6702 */
6703enum lttng_error_code ust_app_snapshot_record(
6704 const struct ltt_ust_session *usess,
6705 const struct consumer_output *output, int wait,
6706 uint64_t nb_packets_per_stream)
6707{
6708 int ret = 0;
6709 enum lttng_error_code status = LTTNG_OK;
6710 struct lttng_ht_iter iter;
6711 struct ust_app *app;
6712 char *trace_path = NULL;
6713
6714 assert(usess);
6715 assert(output);
6716
6717 rcu_read_lock();
6718
6719 switch (usess->buffer_type) {
6720 case LTTNG_BUFFER_PER_UID:
6721 {
6722 struct buffer_reg_uid *reg;
6723
6724 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6725 struct buffer_reg_channel *buf_reg_chan;
6726 struct consumer_socket *socket;
6727 char pathname[PATH_MAX];
6728 size_t consumer_path_offset = 0;
6729
6730 if (!reg->registry->reg.ust->metadata_key) {
6731 /* Skip since no metadata is present */
6732 continue;
6733 }
6734
6735 /* Get consumer socket to use to push the metadata.*/
6736 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6737 usess->consumer);
6738 if (!socket) {
6739 status = LTTNG_ERR_INVALID;
6740 goto error;
6741 }
6742
6743 memset(pathname, 0, sizeof(pathname));
6744 ret = snprintf(pathname, sizeof(pathname),
6745 DEFAULT_UST_TRACE_UID_PATH,
6746 reg->uid, reg->bits_per_long);
6747 if (ret < 0) {
6748 PERROR("snprintf snapshot path");
6749 status = LTTNG_ERR_INVALID;
6750 goto error;
6751 }
6752 /* Free path allowed on previous iteration. */
6753 free(trace_path);
6754 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6755 &consumer_path_offset);
6756 if (!trace_path) {
6757 status = LTTNG_ERR_INVALID;
6758 goto error;
6759 }
6760 /* Add the UST default trace dir to path. */
6761 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6762 buf_reg_chan, node.node) {
6763 status = consumer_snapshot_channel(socket,
6764 buf_reg_chan->consumer_key,
6765 output, 0, usess->uid,
6766 usess->gid, &trace_path[consumer_path_offset], wait,
6767 nb_packets_per_stream);
6768 if (status != LTTNG_OK) {
6769 goto error;
6770 }
6771 }
6772 status = consumer_snapshot_channel(socket,
6773 reg->registry->reg.ust->metadata_key, output, 1,
6774 usess->uid, usess->gid, &trace_path[consumer_path_offset],
6775 wait, 0);
6776 if (status != LTTNG_OK) {
6777 goto error;
6778 }
6779 }
6780 break;
6781 }
6782 case LTTNG_BUFFER_PER_PID:
6783 {
6784 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6785 struct consumer_socket *socket;
6786 struct lttng_ht_iter chan_iter;
6787 struct ust_app_channel *ua_chan;
6788 struct ust_app_session *ua_sess;
6789 struct ust_registry_session *registry;
6790 char pathname[PATH_MAX];
6791 size_t consumer_path_offset = 0;
6792
6793 ua_sess = lookup_session_by_app(usess, app);
6794 if (!ua_sess) {
6795 /* Session not associated with this app. */
6796 continue;
6797 }
6798
6799 /* Get the right consumer socket for the application. */
6800 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6801 output);
6802 if (!socket) {
6803 status = LTTNG_ERR_INVALID;
6804 goto error;
6805 }
6806
6807 /* Add the UST default trace dir to path. */
6808 memset(pathname, 0, sizeof(pathname));
6809 ret = snprintf(pathname, sizeof(pathname), "%s",
6810 ua_sess->path);
6811 if (ret < 0) {
6812 status = LTTNG_ERR_INVALID;
6813 PERROR("snprintf snapshot path");
6814 goto error;
6815 }
6816 /* Free path allowed on previous iteration. */
6817 free(trace_path);
6818 trace_path = setup_channel_trace_path(usess->consumer, pathname,
6819 &consumer_path_offset);
6820 if (!trace_path) {
6821 status = LTTNG_ERR_INVALID;
6822 goto error;
6823 }
6824 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6825 ua_chan, node.node) {
6826 status = consumer_snapshot_channel(socket,
6827 ua_chan->key, output, 0,
6828 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6829 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6830 &trace_path[consumer_path_offset], wait,
6831 nb_packets_per_stream);
6832 switch (status) {
6833 case LTTNG_OK:
6834 break;
6835 case LTTNG_ERR_CHAN_NOT_FOUND:
6836 continue;
6837 default:
6838 goto error;
6839 }
6840 }
6841
6842 registry = get_session_registry(ua_sess);
6843 if (!registry) {
6844 DBG("Application session is being torn down. Skip application.");
6845 continue;
6846 }
6847 status = consumer_snapshot_channel(socket,
6848 registry->metadata_key, output, 1,
6849 lttng_credentials_get_uid(&ua_sess->effective_credentials),
6850 lttng_credentials_get_gid(&ua_sess->effective_credentials),
6851 &trace_path[consumer_path_offset], wait, 0);
6852 switch (status) {
6853 case LTTNG_OK:
6854 break;
6855 case LTTNG_ERR_CHAN_NOT_FOUND:
6856 continue;
6857 default:
6858 goto error;
6859 }
6860 }
6861 break;
6862 }
6863 default:
6864 assert(0);
6865 break;
6866 }
6867
6868error:
6869 free(trace_path);
6870 rcu_read_unlock();
6871 return status;
6872}
6873
6874/*
6875 * Return the size taken by one more packet per stream.
6876 */
6877uint64_t ust_app_get_size_one_more_packet_per_stream(
6878 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
6879{
6880 uint64_t tot_size = 0;
6881 struct ust_app *app;
6882 struct lttng_ht_iter iter;
6883
6884 assert(usess);
6885
6886 switch (usess->buffer_type) {
6887 case LTTNG_BUFFER_PER_UID:
6888 {
6889 struct buffer_reg_uid *reg;
6890
6891 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6892 struct buffer_reg_channel *buf_reg_chan;
6893
6894 rcu_read_lock();
6895 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6896 buf_reg_chan, node.node) {
6897 if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
6898 /*
6899 * Don't take channel into account if we
6900 * already grab all its packets.
6901 */
6902 continue;
6903 }
6904 tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
6905 }
6906 rcu_read_unlock();
6907 }
6908 break;
6909 }
6910 case LTTNG_BUFFER_PER_PID:
6911 {
6912 rcu_read_lock();
6913 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6914 struct ust_app_channel *ua_chan;
6915 struct ust_app_session *ua_sess;
6916 struct lttng_ht_iter chan_iter;
6917
6918 ua_sess = lookup_session_by_app(usess, app);
6919 if (!ua_sess) {
6920 /* Session not associated with this app. */
6921 continue;
6922 }
6923
6924 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6925 ua_chan, node.node) {
6926 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6927 /*
6928 * Don't take channel into account if we
6929 * already grab all its packets.
6930 */
6931 continue;
6932 }
6933 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
6934 }
6935 }
6936 rcu_read_unlock();
6937 break;
6938 }
6939 default:
6940 assert(0);
6941 break;
6942 }
6943
6944 return tot_size;
6945}
6946
6947int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6948 struct cds_list_head *buffer_reg_uid_list,
6949 struct consumer_output *consumer, uint64_t uchan_id,
6950 int overwrite, uint64_t *discarded, uint64_t *lost)
6951{
6952 int ret;
6953 uint64_t consumer_chan_key;
6954
6955 *discarded = 0;
6956 *lost = 0;
6957
6958 ret = buffer_reg_uid_consumer_channel_key(
6959 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
6960 if (ret < 0) {
6961 /* Not found */
6962 ret = 0;
6963 goto end;
6964 }
6965
6966 if (overwrite) {
6967 ret = consumer_get_lost_packets(ust_session_id,
6968 consumer_chan_key, consumer, lost);
6969 } else {
6970 ret = consumer_get_discarded_events(ust_session_id,
6971 consumer_chan_key, consumer, discarded);
6972 }
6973
6974end:
6975 return ret;
6976}
6977
6978int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6979 struct ltt_ust_channel *uchan,
6980 struct consumer_output *consumer, int overwrite,
6981 uint64_t *discarded, uint64_t *lost)
6982{
6983 int ret = 0;
6984 struct lttng_ht_iter iter;
6985 struct lttng_ht_node_str *ua_chan_node;
6986 struct ust_app *app;
6987 struct ust_app_session *ua_sess;
6988 struct ust_app_channel *ua_chan;
6989
6990 *discarded = 0;
6991 *lost = 0;
6992
6993 rcu_read_lock();
6994 /*
6995 * Iterate over every registered applications. Sum counters for
6996 * all applications containing requested session and channel.
6997 */
6998 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6999 struct lttng_ht_iter uiter;
7000
7001 ua_sess = lookup_session_by_app(usess, app);
7002 if (ua_sess == NULL) {
7003 continue;
7004 }
7005
7006 /* Get channel */
7007 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
7008 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
7009 /* If the session is found for the app, the channel must be there */
7010 assert(ua_chan_node);
7011
7012 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
7013
7014 if (overwrite) {
7015 uint64_t _lost;
7016
7017 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
7018 consumer, &_lost);
7019 if (ret < 0) {
7020 break;
7021 }
7022 (*lost) += _lost;
7023 } else {
7024 uint64_t _discarded;
7025
7026 ret = consumer_get_discarded_events(usess->id,
7027 ua_chan->key, consumer, &_discarded);
7028 if (ret < 0) {
7029 break;
7030 }
7031 (*discarded) += _discarded;
7032 }
7033 }
7034
7035 rcu_read_unlock();
7036 return ret;
7037}
7038
7039static
7040int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
7041 struct ust_app *app)
7042{
7043 int ret = 0;
7044 struct ust_app_session *ua_sess;
7045
7046 DBG("Regenerating the metadata for ust app pid %d", app->pid);
7047
7048 rcu_read_lock();
7049
7050 ua_sess = lookup_session_by_app(usess, app);
7051 if (ua_sess == NULL) {
7052 /* The session is in teardown process. Ignore and continue. */
7053 goto end;
7054 }
7055
7056 pthread_mutex_lock(&ua_sess->lock);
7057
7058 if (ua_sess->deleted) {
7059 goto end_unlock;
7060 }
7061
7062 pthread_mutex_lock(&app->sock_lock);
7063 ret = lttng_ust_ctl_regenerate_statedump(app->sock, ua_sess->handle);
7064 pthread_mutex_unlock(&app->sock_lock);
7065
7066end_unlock:
7067 pthread_mutex_unlock(&ua_sess->lock);
7068
7069end:
7070 rcu_read_unlock();
7071 health_code_update();
7072 return ret;
7073}
7074
7075/*
7076 * Regenerate the statedump for each app in the session.
7077 */
7078int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
7079{
7080 int ret = 0;
7081 struct lttng_ht_iter iter;
7082 struct ust_app *app;
7083
7084 DBG("Regenerating the metadata for all UST apps");
7085
7086 rcu_read_lock();
7087
7088 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7089 if (!app->compatible) {
7090 continue;
7091 }
7092
7093 ret = ust_app_regenerate_statedump(usess, app);
7094 if (ret < 0) {
7095 /* Continue to the next app even on error */
7096 continue;
7097 }
7098 }
7099
7100 rcu_read_unlock();
7101
7102 return 0;
7103}
7104
7105/*
7106 * Rotate all the channels of a session.
7107 *
7108 * Return LTTNG_OK on success or else an LTTng error code.
7109 */
7110enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
7111{
7112 int ret;
7113 enum lttng_error_code cmd_ret = LTTNG_OK;
7114 struct lttng_ht_iter iter;
7115 struct ust_app *app;
7116 struct ltt_ust_session *usess = session->ust_session;
7117
7118 assert(usess);
7119
7120 rcu_read_lock();
7121
7122 switch (usess->buffer_type) {
7123 case LTTNG_BUFFER_PER_UID:
7124 {
7125 struct buffer_reg_uid *reg;
7126
7127 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7128 struct buffer_reg_channel *buf_reg_chan;
7129 struct consumer_socket *socket;
7130
7131 /* Get consumer socket to use to push the metadata.*/
7132 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7133 usess->consumer);
7134 if (!socket) {
7135 cmd_ret = LTTNG_ERR_INVALID;
7136 goto error;
7137 }
7138
7139 /* Rotate the data channels. */
7140 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7141 buf_reg_chan, node.node) {
7142 ret = consumer_rotate_channel(socket,
7143 buf_reg_chan->consumer_key,
7144 usess->uid, usess->gid,
7145 usess->consumer,
7146 /* is_metadata_channel */ false);
7147 if (ret < 0) {
7148 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7149 goto error;
7150 }
7151 }
7152
7153 /*
7154 * The metadata channel might not be present.
7155 *
7156 * Consumer stream allocation can be done
7157 * asynchronously and can fail on intermediary
7158 * operations (i.e add context) and lead to data
7159 * channels created with no metadata channel.
7160 */
7161 if (!reg->registry->reg.ust->metadata_key) {
7162 /* Skip since no metadata is present. */
7163 continue;
7164 }
7165
7166 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7167
7168 ret = consumer_rotate_channel(socket,
7169 reg->registry->reg.ust->metadata_key,
7170 usess->uid, usess->gid,
7171 usess->consumer,
7172 /* is_metadata_channel */ true);
7173 if (ret < 0) {
7174 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7175 goto error;
7176 }
7177 }
7178 break;
7179 }
7180 case LTTNG_BUFFER_PER_PID:
7181 {
7182 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7183 struct consumer_socket *socket;
7184 struct lttng_ht_iter chan_iter;
7185 struct ust_app_channel *ua_chan;
7186 struct ust_app_session *ua_sess;
7187 struct ust_registry_session *registry;
7188
7189 ua_sess = lookup_session_by_app(usess, app);
7190 if (!ua_sess) {
7191 /* Session not associated with this app. */
7192 continue;
7193 }
7194
7195 /* Get the right consumer socket for the application. */
7196 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7197 usess->consumer);
7198 if (!socket) {
7199 cmd_ret = LTTNG_ERR_INVALID;
7200 goto error;
7201 }
7202
7203 registry = get_session_registry(ua_sess);
7204 if (!registry) {
7205 DBG("Application session is being torn down. Skip application.");
7206 continue;
7207 }
7208
7209 /* Rotate the data channels. */
7210 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7211 ua_chan, node.node) {
7212 ret = consumer_rotate_channel(socket,
7213 ua_chan->key,
7214 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7215 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7216 ua_sess->consumer,
7217 /* is_metadata_channel */ false);
7218 if (ret < 0) {
7219 /* Per-PID buffer and application going away. */
7220 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7221 continue;
7222 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7223 goto error;
7224 }
7225 }
7226
7227 /* Rotate the metadata channel. */
7228 (void) push_metadata(registry, usess->consumer);
7229 ret = consumer_rotate_channel(socket,
7230 registry->metadata_key,
7231 lttng_credentials_get_uid(&ua_sess->effective_credentials),
7232 lttng_credentials_get_gid(&ua_sess->effective_credentials),
7233 ua_sess->consumer,
7234 /* is_metadata_channel */ true);
7235 if (ret < 0) {
7236 /* Per-PID buffer and application going away. */
7237 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
7238 continue;
7239 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
7240 goto error;
7241 }
7242 }
7243 break;
7244 }
7245 default:
7246 assert(0);
7247 break;
7248 }
7249
7250 cmd_ret = LTTNG_OK;
7251
7252error:
7253 rcu_read_unlock();
7254 return cmd_ret;
7255}
7256
7257enum lttng_error_code ust_app_create_channel_subdirectories(
7258 const struct ltt_ust_session *usess)
7259{
7260 enum lttng_error_code ret = LTTNG_OK;
7261 struct lttng_ht_iter iter;
7262 enum lttng_trace_chunk_status chunk_status;
7263 char *pathname_index;
7264 int fmt_ret;
7265
7266 assert(usess->current_trace_chunk);
7267 rcu_read_lock();
7268
7269 switch (usess->buffer_type) {
7270 case LTTNG_BUFFER_PER_UID:
7271 {
7272 struct buffer_reg_uid *reg;
7273
7274 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7275 fmt_ret = asprintf(&pathname_index,
7276 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
7277 reg->uid, reg->bits_per_long);
7278 if (fmt_ret < 0) {
7279 ERR("Failed to format channel index directory");
7280 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7281 goto error;
7282 }
7283
7284 /*
7285 * Create the index subdirectory which will take care
7286 * of implicitly creating the channel's path.
7287 */
7288 chunk_status = lttng_trace_chunk_create_subdirectory(
7289 usess->current_trace_chunk,
7290 pathname_index);
7291 free(pathname_index);
7292 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7293 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7294 goto error;
7295 }
7296 }
7297 break;
7298 }
7299 case LTTNG_BUFFER_PER_PID:
7300 {
7301 struct ust_app *app;
7302
7303 /*
7304 * Create the toplevel ust/ directory in case no apps are running.
7305 */
7306 chunk_status = lttng_trace_chunk_create_subdirectory(
7307 usess->current_trace_chunk,
7308 DEFAULT_UST_TRACE_DIR);
7309 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7310 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7311 goto error;
7312 }
7313
7314 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
7315 pid_n.node) {
7316 struct ust_app_session *ua_sess;
7317 struct ust_registry_session *registry;
7318
7319 ua_sess = lookup_session_by_app(usess, app);
7320 if (!ua_sess) {
7321 /* Session not associated with this app. */
7322 continue;
7323 }
7324
7325 registry = get_session_registry(ua_sess);
7326 if (!registry) {
7327 DBG("Application session is being torn down. Skip application.");
7328 continue;
7329 }
7330
7331 fmt_ret = asprintf(&pathname_index,
7332 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
7333 ua_sess->path);
7334 if (fmt_ret < 0) {
7335 ERR("Failed to format channel index directory");
7336 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7337 goto error;
7338 }
7339 /*
7340 * Create the index subdirectory which will take care
7341 * of implicitly creating the channel's path.
7342 */
7343 chunk_status = lttng_trace_chunk_create_subdirectory(
7344 usess->current_trace_chunk,
7345 pathname_index);
7346 free(pathname_index);
7347 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
7348 ret = LTTNG_ERR_CREATE_DIR_FAIL;
7349 goto error;
7350 }
7351 }
7352 break;
7353 }
7354 default:
7355 abort();
7356 }
7357
7358 ret = LTTNG_OK;
7359error:
7360 rcu_read_unlock();
7361 return ret;
7362}
7363
7364/*
7365 * Clear all the channels of a session.
7366 *
7367 * Return LTTNG_OK on success or else an LTTng error code.
7368 */
7369enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
7370{
7371 int ret;
7372 enum lttng_error_code cmd_ret = LTTNG_OK;
7373 struct lttng_ht_iter iter;
7374 struct ust_app *app;
7375 struct ltt_ust_session *usess = session->ust_session;
7376
7377 assert(usess);
7378
7379 rcu_read_lock();
7380
7381 if (usess->active) {
7382 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
7383 cmd_ret = LTTNG_ERR_FATAL;
7384 goto end;
7385 }
7386
7387 switch (usess->buffer_type) {
7388 case LTTNG_BUFFER_PER_UID:
7389 {
7390 struct buffer_reg_uid *reg;
7391
7392 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
7393 struct buffer_reg_channel *buf_reg_chan;
7394 struct consumer_socket *socket;
7395
7396 /* Get consumer socket to use to push the metadata.*/
7397 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
7398 usess->consumer);
7399 if (!socket) {
7400 cmd_ret = LTTNG_ERR_INVALID;
7401 goto error_socket;
7402 }
7403
7404 /* Clear the data channels. */
7405 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
7406 buf_reg_chan, node.node) {
7407 ret = consumer_clear_channel(socket,
7408 buf_reg_chan->consumer_key);
7409 if (ret < 0) {
7410 goto error;
7411 }
7412 }
7413
7414 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
7415
7416 /*
7417 * Clear the metadata channel.
7418 * Metadata channel is not cleared per se but we still need to
7419 * perform a rotation operation on it behind the scene.
7420 */
7421 ret = consumer_clear_channel(socket,
7422 reg->registry->reg.ust->metadata_key);
7423 if (ret < 0) {
7424 goto error;
7425 }
7426 }
7427 break;
7428 }
7429 case LTTNG_BUFFER_PER_PID:
7430 {
7431 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7432 struct consumer_socket *socket;
7433 struct lttng_ht_iter chan_iter;
7434 struct ust_app_channel *ua_chan;
7435 struct ust_app_session *ua_sess;
7436 struct ust_registry_session *registry;
7437
7438 ua_sess = lookup_session_by_app(usess, app);
7439 if (!ua_sess) {
7440 /* Session not associated with this app. */
7441 continue;
7442 }
7443
7444 /* Get the right consumer socket for the application. */
7445 socket = consumer_find_socket_by_bitness(app->bits_per_long,
7446 usess->consumer);
7447 if (!socket) {
7448 cmd_ret = LTTNG_ERR_INVALID;
7449 goto error_socket;
7450 }
7451
7452 registry = get_session_registry(ua_sess);
7453 if (!registry) {
7454 DBG("Application session is being torn down. Skip application.");
7455 continue;
7456 }
7457
7458 /* Clear the data channels. */
7459 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
7460 ua_chan, node.node) {
7461 ret = consumer_clear_channel(socket, ua_chan->key);
7462 if (ret < 0) {
7463 /* Per-PID buffer and application going away. */
7464 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7465 continue;
7466 }
7467 goto error;
7468 }
7469 }
7470
7471 (void) push_metadata(registry, usess->consumer);
7472
7473 /*
7474 * Clear the metadata channel.
7475 * Metadata channel is not cleared per se but we still need to
7476 * perform rotation operation on it behind the scene.
7477 */
7478 ret = consumer_clear_channel(socket, registry->metadata_key);
7479 if (ret < 0) {
7480 /* Per-PID buffer and application going away. */
7481 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7482 continue;
7483 }
7484 goto error;
7485 }
7486 }
7487 break;
7488 }
7489 default:
7490 assert(0);
7491 break;
7492 }
7493
7494 cmd_ret = LTTNG_OK;
7495 goto end;
7496
7497error:
7498 switch (-ret) {
7499 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
7500 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
7501 break;
7502 default:
7503 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
7504 }
7505
7506error_socket:
7507end:
7508 rcu_read_unlock();
7509 return cmd_ret;
7510}
7511
7512/*
7513 * This function skips the metadata channel as the begin/end timestamps of a
7514 * metadata packet are useless.
7515 *
7516 * Moreover, opening a packet after a "clear" will cause problems for live
7517 * sessions as it will introduce padding that was not part of the first trace
7518 * chunk. The relay daemon expects the content of the metadata stream of
7519 * successive metadata trace chunks to be strict supersets of one another.
7520 *
7521 * For example, flushing a packet at the beginning of the metadata stream of
7522 * a trace chunk resulting from a "clear" session command will cause the
7523 * size of the metadata stream of the new trace chunk to not match the size of
7524 * the metadata stream of the original chunk. This will confuse the relay
7525 * daemon as the same "offset" in a metadata stream will no longer point
7526 * to the same content.
7527 */
7528enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
7529{
7530 enum lttng_error_code ret = LTTNG_OK;
7531 struct lttng_ht_iter iter;
7532 struct ltt_ust_session *usess = session->ust_session;
7533
7534 assert(usess);
7535
7536 rcu_read_lock();
7537
7538 switch (usess->buffer_type) {
7539 case LTTNG_BUFFER_PER_UID:
7540 {
7541 struct buffer_reg_uid *reg;
7542
7543 cds_list_for_each_entry (
7544 reg, &usess->buffer_reg_uid_list, lnode) {
7545 struct buffer_reg_channel *buf_reg_chan;
7546 struct consumer_socket *socket;
7547
7548 socket = consumer_find_socket_by_bitness(
7549 reg->bits_per_long, usess->consumer);
7550 if (!socket) {
7551 ret = LTTNG_ERR_FATAL;
7552 goto error;
7553 }
7554
7555 cds_lfht_for_each_entry(reg->registry->channels->ht,
7556 &iter.iter, buf_reg_chan, node.node) {
7557 const int open_ret =
7558 consumer_open_channel_packets(
7559 socket,
7560 buf_reg_chan->consumer_key);
7561
7562 if (open_ret < 0) {
7563 ret = LTTNG_ERR_UNK;
7564 goto error;
7565 }
7566 }
7567 }
7568 break;
7569 }
7570 case LTTNG_BUFFER_PER_PID:
7571 {
7572 struct ust_app *app;
7573
7574 cds_lfht_for_each_entry (
7575 ust_app_ht->ht, &iter.iter, app, pid_n.node) {
7576 struct consumer_socket *socket;
7577 struct lttng_ht_iter chan_iter;
7578 struct ust_app_channel *ua_chan;
7579 struct ust_app_session *ua_sess;
7580 struct ust_registry_session *registry;
7581
7582 ua_sess = lookup_session_by_app(usess, app);
7583 if (!ua_sess) {
7584 /* Session not associated with this app. */
7585 continue;
7586 }
7587
7588 /* Get the right consumer socket for the application. */
7589 socket = consumer_find_socket_by_bitness(
7590 app->bits_per_long, usess->consumer);
7591 if (!socket) {
7592 ret = LTTNG_ERR_FATAL;
7593 goto error;
7594 }
7595
7596 registry = get_session_registry(ua_sess);
7597 if (!registry) {
7598 DBG("Application session is being torn down. Skip application.");
7599 continue;
7600 }
7601
7602 cds_lfht_for_each_entry(ua_sess->channels->ht,
7603 &chan_iter.iter, ua_chan, node.node) {
7604 const int open_ret =
7605 consumer_open_channel_packets(
7606 socket,
7607 ua_chan->key);
7608
7609 if (open_ret < 0) {
7610 /*
7611 * Per-PID buffer and application going
7612 * away.
7613 */
7614 if (open_ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
7615 continue;
7616 }
7617
7618 ret = LTTNG_ERR_UNK;
7619 goto error;
7620 }
7621 }
7622 }
7623 break;
7624 }
7625 default:
7626 abort();
7627 break;
7628 }
7629
7630error:
7631 rcu_read_unlock();
7632 return ret;
7633}
This page took 0.089492 seconds and 4 git commands to generate.