Tests: kernel wildcards
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #define _LGPL_SOURCE
20 #include <errno.h>
21 #include <inttypes.h>
22 #include <pthread.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <unistd.h>
29 #include <urcu/compiler.h>
30 #include <lttng/ust-error.h>
31 #include <signal.h>
32
33 #include <common/common.h>
34 #include <common/sessiond-comm/sessiond-comm.h>
35
36 #include "buffer-registry.h"
37 #include "fd-limit.h"
38 #include "health-sessiond.h"
39 #include "ust-app.h"
40 #include "ust-consumer.h"
41 #include "ust-ctl.h"
42 #include "utils.h"
43
44 static
45 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
46
47 /* Next available channel key. Access under next_channel_key_lock. */
48 static uint64_t _next_channel_key;
49 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /* Next available session ID. Access under next_session_id_lock. */
52 static uint64_t _next_session_id;
53 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
54
55 /*
56 * Return the incremented value of next_channel_key.
57 */
58 static uint64_t get_next_channel_key(void)
59 {
60 uint64_t ret;
61
62 pthread_mutex_lock(&next_channel_key_lock);
63 ret = ++_next_channel_key;
64 pthread_mutex_unlock(&next_channel_key_lock);
65 return ret;
66 }
67
68 /*
69 * Return the atomically incremented value of next_session_id.
70 */
71 static uint64_t get_next_session_id(void)
72 {
73 uint64_t ret;
74
75 pthread_mutex_lock(&next_session_id_lock);
76 ret = ++_next_session_id;
77 pthread_mutex_unlock(&next_session_id_lock);
78 return ret;
79 }
80
81 static void copy_channel_attr_to_ustctl(
82 struct ustctl_consumer_channel_attr *attr,
83 struct lttng_ust_channel_attr *uattr)
84 {
85 /* Copy event attributes since the layout is different. */
86 attr->subbuf_size = uattr->subbuf_size;
87 attr->num_subbuf = uattr->num_subbuf;
88 attr->overwrite = uattr->overwrite;
89 attr->switch_timer_interval = uattr->switch_timer_interval;
90 attr->read_timer_interval = uattr->read_timer_interval;
91 attr->output = uattr->output;
92 }
93
94 /*
95 * Match function for the hash table lookup.
96 *
97 * It matches an ust app event based on three attributes which are the event
98 * name, the filter bytecode and the loglevel.
99 */
100 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
101 {
102 struct ust_app_event *event;
103 const struct ust_app_ht_key *key;
104
105 assert(node);
106 assert(_key);
107
108 event = caa_container_of(node, struct ust_app_event, node.node);
109 key = _key;
110
111 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
112
113 /* Event name */
114 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
115 goto no_match;
116 }
117
118 /* Event loglevel. */
119 if (event->attr.loglevel != key->loglevel) {
120 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
121 && key->loglevel == 0 && event->attr.loglevel == -1) {
122 /*
123 * Match is accepted. This is because on event creation, the
124 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
125 * -1 are accepted for this loglevel type since 0 is the one set by
126 * the API when receiving an enable event.
127 */
128 } else {
129 goto no_match;
130 }
131 }
132
133 /* One of the filters is NULL, fail. */
134 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
135 goto no_match;
136 }
137
138 if (key->filter && event->filter) {
139 /* Both filters exists, check length followed by the bytecode. */
140 if (event->filter->len != key->filter->len ||
141 memcmp(event->filter->data, key->filter->data,
142 event->filter->len) != 0) {
143 goto no_match;
144 }
145 }
146
147 /* One of the exclusions is NULL, fail. */
148 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
149 goto no_match;
150 }
151
152 if (key->exclusion && event->exclusion) {
153 /* Both exclusions exists, check count followed by the names. */
154 if (event->exclusion->count != key->exclusion->count ||
155 memcmp(event->exclusion->names, key->exclusion->names,
156 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
157 goto no_match;
158 }
159 }
160
161
162 /* Match. */
163 return 1;
164
165 no_match:
166 return 0;
167 }
168
169 /*
170 * Unique add of an ust app event in the given ht. This uses the custom
171 * ht_match_ust_app_event match function and the event name as hash.
172 */
173 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
174 struct ust_app_event *event)
175 {
176 struct cds_lfht_node *node_ptr;
177 struct ust_app_ht_key key;
178 struct lttng_ht *ht;
179
180 assert(ua_chan);
181 assert(ua_chan->events);
182 assert(event);
183
184 ht = ua_chan->events;
185 key.name = event->attr.name;
186 key.filter = event->filter;
187 key.loglevel = event->attr.loglevel;
188 key.exclusion = event->exclusion;
189
190 node_ptr = cds_lfht_add_unique(ht->ht,
191 ht->hash_fct(event->node.key, lttng_ht_seed),
192 ht_match_ust_app_event, &key, &event->node.node);
193 assert(node_ptr == &event->node.node);
194 }
195
196 /*
197 * Close the notify socket from the given RCU head object. This MUST be called
198 * through a call_rcu().
199 */
200 static void close_notify_sock_rcu(struct rcu_head *head)
201 {
202 int ret;
203 struct ust_app_notify_sock_obj *obj =
204 caa_container_of(head, struct ust_app_notify_sock_obj, head);
205
206 /* Must have a valid fd here. */
207 assert(obj->fd >= 0);
208
209 ret = close(obj->fd);
210 if (ret) {
211 ERR("close notify sock %d RCU", obj->fd);
212 }
213 lttng_fd_put(LTTNG_FD_APPS, 1);
214
215 free(obj);
216 }
217
218 /*
219 * Return the session registry according to the buffer type of the given
220 * session.
221 *
222 * A registry per UID object MUST exists before calling this function or else
223 * it assert() if not found. RCU read side lock must be acquired.
224 */
225 static struct ust_registry_session *get_session_registry(
226 struct ust_app_session *ua_sess)
227 {
228 struct ust_registry_session *registry = NULL;
229
230 assert(ua_sess);
231
232 switch (ua_sess->buffer_type) {
233 case LTTNG_BUFFER_PER_PID:
234 {
235 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
236 if (!reg_pid) {
237 goto error;
238 }
239 registry = reg_pid->registry->reg.ust;
240 break;
241 }
242 case LTTNG_BUFFER_PER_UID:
243 {
244 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
245 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
246 if (!reg_uid) {
247 goto error;
248 }
249 registry = reg_uid->registry->reg.ust;
250 break;
251 }
252 default:
253 assert(0);
254 };
255
256 error:
257 return registry;
258 }
259
260 /*
261 * Delete ust context safely. RCU read lock must be held before calling
262 * this function.
263 */
264 static
265 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
266 struct ust_app *app)
267 {
268 int ret;
269
270 assert(ua_ctx);
271
272 if (ua_ctx->obj) {
273 pthread_mutex_lock(&app->sock_lock);
274 ret = ustctl_release_object(sock, ua_ctx->obj);
275 pthread_mutex_unlock(&app->sock_lock);
276 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
277 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
278 sock, ua_ctx->obj->handle, ret);
279 }
280 free(ua_ctx->obj);
281 }
282 free(ua_ctx);
283 }
284
285 /*
286 * Delete ust app event safely. RCU read lock must be held before calling
287 * this function.
288 */
289 static
290 void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
291 struct ust_app *app)
292 {
293 int ret;
294
295 assert(ua_event);
296
297 free(ua_event->filter);
298 if (ua_event->exclusion != NULL)
299 free(ua_event->exclusion);
300 if (ua_event->obj != NULL) {
301 pthread_mutex_lock(&app->sock_lock);
302 ret = ustctl_release_object(sock, ua_event->obj);
303 pthread_mutex_unlock(&app->sock_lock);
304 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
305 ERR("UST app sock %d release event obj failed with ret %d",
306 sock, ret);
307 }
308 free(ua_event->obj);
309 }
310 free(ua_event);
311 }
312
313 /*
314 * Release ust data object of the given stream.
315 *
316 * Return 0 on success or else a negative value.
317 */
318 static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
319 struct ust_app *app)
320 {
321 int ret = 0;
322
323 assert(stream);
324
325 if (stream->obj) {
326 pthread_mutex_lock(&app->sock_lock);
327 ret = ustctl_release_object(sock, stream->obj);
328 pthread_mutex_unlock(&app->sock_lock);
329 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
330 ERR("UST app sock %d release stream obj failed with ret %d",
331 sock, ret);
332 }
333 lttng_fd_put(LTTNG_FD_APPS, 2);
334 free(stream->obj);
335 }
336
337 return ret;
338 }
339
340 /*
341 * Delete ust app stream safely. RCU read lock must be held before calling
342 * this function.
343 */
344 static
345 void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
346 struct ust_app *app)
347 {
348 assert(stream);
349
350 (void) release_ust_app_stream(sock, stream, app);
351 free(stream);
352 }
353
354 /*
355 * We need to execute ht_destroy outside of RCU read-side critical
356 * section and outside of call_rcu thread, so we postpone its execution
357 * using ht_cleanup_push. It is simpler than to change the semantic of
358 * the many callers of delete_ust_app_session().
359 */
360 static
361 void delete_ust_app_channel_rcu(struct rcu_head *head)
362 {
363 struct ust_app_channel *ua_chan =
364 caa_container_of(head, struct ust_app_channel, rcu_head);
365
366 ht_cleanup_push(ua_chan->ctx);
367 ht_cleanup_push(ua_chan->events);
368 free(ua_chan);
369 }
370
371 /*
372 * Delete ust app channel safely. RCU read lock must be held before calling
373 * this function.
374 */
375 static
376 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
377 struct ust_app *app)
378 {
379 int ret;
380 struct lttng_ht_iter iter;
381 struct ust_app_event *ua_event;
382 struct ust_app_ctx *ua_ctx;
383 struct ust_app_stream *stream, *stmp;
384 struct ust_registry_session *registry;
385
386 assert(ua_chan);
387
388 DBG3("UST app deleting channel %s", ua_chan->name);
389
390 /* Wipe stream */
391 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
392 cds_list_del(&stream->list);
393 delete_ust_app_stream(sock, stream, app);
394 }
395
396 /* Wipe context */
397 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
398 cds_list_del(&ua_ctx->list);
399 ret = lttng_ht_del(ua_chan->ctx, &iter);
400 assert(!ret);
401 delete_ust_app_ctx(sock, ua_ctx, app);
402 }
403
404 /* Wipe events */
405 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
406 node.node) {
407 ret = lttng_ht_del(ua_chan->events, &iter);
408 assert(!ret);
409 delete_ust_app_event(sock, ua_event, app);
410 }
411
412 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
413 /* Wipe and free registry from session registry. */
414 registry = get_session_registry(ua_chan->session);
415 if (registry) {
416 ust_registry_channel_del_free(registry, ua_chan->key);
417 }
418 }
419
420 if (ua_chan->obj != NULL) {
421 /* Remove channel from application UST object descriptor. */
422 iter.iter.node = &ua_chan->ust_objd_node.node;
423 ret = lttng_ht_del(app->ust_objd, &iter);
424 assert(!ret);
425 pthread_mutex_lock(&app->sock_lock);
426 ret = ustctl_release_object(sock, ua_chan->obj);
427 pthread_mutex_unlock(&app->sock_lock);
428 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
429 ERR("UST app sock %d release channel obj failed with ret %d",
430 sock, ret);
431 }
432 lttng_fd_put(LTTNG_FD_APPS, 1);
433 free(ua_chan->obj);
434 }
435 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
436 }
437
438 int ust_app_register_done(struct ust_app *app)
439 {
440 int ret;
441
442 pthread_mutex_lock(&app->sock_lock);
443 ret = ustctl_register_done(app->sock);
444 pthread_mutex_unlock(&app->sock_lock);
445 return ret;
446 }
447
448 int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
449 {
450 int ret, sock;
451
452 if (app) {
453 pthread_mutex_lock(&app->sock_lock);
454 sock = app->sock;
455 } else {
456 sock = -1;
457 }
458 ret = ustctl_release_object(sock, data);
459 if (app) {
460 pthread_mutex_unlock(&app->sock_lock);
461 }
462 return ret;
463 }
464
465 /*
466 * Push metadata to consumer socket.
467 *
468 * RCU read-side lock must be held to guarantee existance of socket.
469 * Must be called with the ust app session lock held.
470 * Must be called with the registry lock held.
471 *
472 * On success, return the len of metadata pushed or else a negative value.
473 * Returning a -EPIPE return value means we could not send the metadata,
474 * but it can be caused by recoverable errors (e.g. the application has
475 * terminated concurrently).
476 */
477 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
478 struct consumer_socket *socket, int send_zero_data)
479 {
480 int ret;
481 char *metadata_str = NULL;
482 size_t len, offset, new_metadata_len_sent;
483 ssize_t ret_val;
484 uint64_t metadata_key;
485
486 assert(registry);
487 assert(socket);
488
489 metadata_key = registry->metadata_key;
490
491 /*
492 * Means that no metadata was assigned to the session. This can
493 * happens if no start has been done previously.
494 */
495 if (!metadata_key) {
496 return 0;
497 }
498
499 /*
500 * On a push metadata error either the consumer is dead or the
501 * metadata channel has been destroyed because its endpoint
502 * might have died (e.g: relayd), or because the application has
503 * exited. If so, the metadata closed flag is set to 1 so we
504 * deny pushing metadata again which is not valid anymore on the
505 * consumer side.
506 */
507 if (registry->metadata_closed) {
508 return -EPIPE;
509 }
510
511 offset = registry->metadata_len_sent;
512 len = registry->metadata_len - registry->metadata_len_sent;
513 new_metadata_len_sent = registry->metadata_len;
514 if (len == 0) {
515 DBG3("No metadata to push for metadata key %" PRIu64,
516 registry->metadata_key);
517 ret_val = len;
518 if (send_zero_data) {
519 DBG("No metadata to push");
520 goto push_data;
521 }
522 goto end;
523 }
524
525 /* Allocate only what we have to send. */
526 metadata_str = zmalloc(len);
527 if (!metadata_str) {
528 PERROR("zmalloc ust app metadata string");
529 ret_val = -ENOMEM;
530 goto error;
531 }
532 /* Copy what we haven't sent out. */
533 memcpy(metadata_str, registry->metadata + offset, len);
534
535 push_data:
536 pthread_mutex_unlock(&registry->lock);
537 /*
538 * We need to unlock the registry while we push metadata to
539 * break a circular dependency between the consumerd metadata
540 * lock and the sessiond registry lock. Indeed, pushing metadata
541 * to the consumerd awaits that it gets pushed all the way to
542 * relayd, but doing so requires grabbing the metadata lock. If
543 * a concurrent metadata request is being performed by
544 * consumerd, this can try to grab the registry lock on the
545 * sessiond while holding the metadata lock on the consumer
546 * daemon. Those push and pull schemes are performed on two
547 * different bidirectionnal communication sockets.
548 */
549 ret = consumer_push_metadata(socket, metadata_key,
550 metadata_str, len, offset);
551 pthread_mutex_lock(&registry->lock);
552 if (ret < 0) {
553 /*
554 * There is an acceptable race here between the registry
555 * metadata key assignment and the creation on the
556 * consumer. The session daemon can concurrently push
557 * metadata for this registry while being created on the
558 * consumer since the metadata key of the registry is
559 * assigned *before* it is setup to avoid the consumer
560 * to ask for metadata that could possibly be not found
561 * in the session daemon.
562 *
563 * The metadata will get pushed either by the session
564 * being stopped or the consumer requesting metadata if
565 * that race is triggered.
566 */
567 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
568 ret = 0;
569 } else {
570 ERR("Error pushing metadata to consumer");
571 }
572 ret_val = ret;
573 goto error_push;
574 } else {
575 /*
576 * Metadata may have been concurrently pushed, since
577 * we're not holding the registry lock while pushing to
578 * consumer. This is handled by the fact that we send
579 * the metadata content, size, and the offset at which
580 * that metadata belongs. This may arrive out of order
581 * on the consumer side, and the consumer is able to
582 * deal with overlapping fragments. The consumer
583 * supports overlapping fragments, which must be
584 * contiguous starting from offset 0. We keep the
585 * largest metadata_len_sent value of the concurrent
586 * send.
587 */
588 registry->metadata_len_sent =
589 max_t(size_t, registry->metadata_len_sent,
590 new_metadata_len_sent);
591 }
592 free(metadata_str);
593 return len;
594
595 end:
596 error:
597 if (ret_val) {
598 /*
599 * On error, flag the registry that the metadata is
600 * closed. We were unable to push anything and this
601 * means that either the consumer is not responding or
602 * the metadata cache has been destroyed on the
603 * consumer.
604 */
605 registry->metadata_closed = 1;
606 }
607 error_push:
608 free(metadata_str);
609 return ret_val;
610 }
611
612 /*
613 * For a given application and session, push metadata to consumer.
614 * Either sock or consumer is required : if sock is NULL, the default
615 * socket to send the metadata is retrieved from consumer, if sock
616 * is not NULL we use it to send the metadata.
617 * RCU read-side lock must be held while calling this function,
618 * therefore ensuring existance of registry. It also ensures existance
619 * of socket throughout this function.
620 *
621 * Return 0 on success else a negative error.
622 * Returning a -EPIPE return value means we could not send the metadata,
623 * but it can be caused by recoverable errors (e.g. the application has
624 * terminated concurrently).
625 */
626 static int push_metadata(struct ust_registry_session *registry,
627 struct consumer_output *consumer)
628 {
629 int ret_val;
630 ssize_t ret;
631 struct consumer_socket *socket;
632
633 assert(registry);
634 assert(consumer);
635
636 pthread_mutex_lock(&registry->lock);
637 if (registry->metadata_closed) {
638 ret_val = -EPIPE;
639 goto error;
640 }
641
642 /* Get consumer socket to use to push the metadata.*/
643 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
644 consumer);
645 if (!socket) {
646 ret_val = -1;
647 goto error;
648 }
649
650 ret = ust_app_push_metadata(registry, socket, 0);
651 if (ret < 0) {
652 ret_val = ret;
653 goto error;
654 }
655 pthread_mutex_unlock(&registry->lock);
656 return 0;
657
658 error:
659 pthread_mutex_unlock(&registry->lock);
660 return ret_val;
661 }
662
663 /*
664 * Send to the consumer a close metadata command for the given session. Once
665 * done, the metadata channel is deleted and the session metadata pointer is
666 * nullified. The session lock MUST be held unless the application is
667 * in the destroy path.
668 *
669 * Return 0 on success else a negative value.
670 */
671 static int close_metadata(struct ust_registry_session *registry,
672 struct consumer_output *consumer)
673 {
674 int ret;
675 struct consumer_socket *socket;
676
677 assert(registry);
678 assert(consumer);
679
680 rcu_read_lock();
681
682 pthread_mutex_lock(&registry->lock);
683
684 if (!registry->metadata_key || registry->metadata_closed) {
685 ret = 0;
686 goto end;
687 }
688
689 /* Get consumer socket to use to push the metadata.*/
690 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
691 consumer);
692 if (!socket) {
693 ret = -1;
694 goto error;
695 }
696
697 ret = consumer_close_metadata(socket, registry->metadata_key);
698 if (ret < 0) {
699 goto error;
700 }
701
702 error:
703 /*
704 * Metadata closed. Even on error this means that the consumer is not
705 * responding or not found so either way a second close should NOT be emit
706 * for this registry.
707 */
708 registry->metadata_closed = 1;
709 end:
710 pthread_mutex_unlock(&registry->lock);
711 rcu_read_unlock();
712 return ret;
713 }
714
715 /*
716 * We need to execute ht_destroy outside of RCU read-side critical
717 * section and outside of call_rcu thread, so we postpone its execution
718 * using ht_cleanup_push. It is simpler than to change the semantic of
719 * the many callers of delete_ust_app_session().
720 */
721 static
722 void delete_ust_app_session_rcu(struct rcu_head *head)
723 {
724 struct ust_app_session *ua_sess =
725 caa_container_of(head, struct ust_app_session, rcu_head);
726
727 ht_cleanup_push(ua_sess->channels);
728 free(ua_sess);
729 }
730
731 /*
732 * Delete ust app session safely. RCU read lock must be held before calling
733 * this function.
734 */
735 static
736 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
737 struct ust_app *app)
738 {
739 int ret;
740 struct lttng_ht_iter iter;
741 struct ust_app_channel *ua_chan;
742 struct ust_registry_session *registry;
743
744 assert(ua_sess);
745
746 pthread_mutex_lock(&ua_sess->lock);
747
748 assert(!ua_sess->deleted);
749 ua_sess->deleted = true;
750
751 registry = get_session_registry(ua_sess);
752 if (registry) {
753 /* Push metadata for application before freeing the application. */
754 (void) push_metadata(registry, ua_sess->consumer);
755
756 /*
757 * Don't ask to close metadata for global per UID buffers. Close
758 * metadata only on destroy trace session in this case. Also, the
759 * previous push metadata could have flag the metadata registry to
760 * close so don't send a close command if closed.
761 */
762 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
763 /* And ask to close it for this session registry. */
764 (void) close_metadata(registry, ua_sess->consumer);
765 }
766 }
767
768 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
769 node.node) {
770 ret = lttng_ht_del(ua_sess->channels, &iter);
771 assert(!ret);
772 delete_ust_app_channel(sock, ua_chan, app);
773 }
774
775 /* In case of per PID, the registry is kept in the session. */
776 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
777 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
778 if (reg_pid) {
779 buffer_reg_pid_remove(reg_pid);
780 buffer_reg_pid_destroy(reg_pid);
781 }
782 }
783
784 if (ua_sess->handle != -1) {
785 pthread_mutex_lock(&app->sock_lock);
786 ret = ustctl_release_handle(sock, ua_sess->handle);
787 pthread_mutex_unlock(&app->sock_lock);
788 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
789 ERR("UST app sock %d release session handle failed with ret %d",
790 sock, ret);
791 }
792 }
793 pthread_mutex_unlock(&ua_sess->lock);
794
795 consumer_output_put(ua_sess->consumer);
796
797 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
798 }
799
800 /*
801 * Delete a traceable application structure from the global list. Never call
802 * this function outside of a call_rcu call.
803 *
804 * RCU read side lock should _NOT_ be held when calling this function.
805 */
806 static
807 void delete_ust_app(struct ust_app *app)
808 {
809 int ret, sock;
810 struct ust_app_session *ua_sess, *tmp_ua_sess;
811
812 /* Delete ust app sessions info */
813 sock = app->sock;
814 app->sock = -1;
815
816 /* Wipe sessions */
817 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
818 teardown_node) {
819 /* Free every object in the session and the session. */
820 rcu_read_lock();
821 delete_ust_app_session(sock, ua_sess, app);
822 rcu_read_unlock();
823 }
824
825 ht_cleanup_push(app->sessions);
826 ht_cleanup_push(app->ust_objd);
827
828 /*
829 * Wait until we have deleted the application from the sock hash table
830 * before closing this socket, otherwise an application could re-use the
831 * socket ID and race with the teardown, using the same hash table entry.
832 *
833 * It's OK to leave the close in call_rcu. We want it to stay unique for
834 * all RCU readers that could run concurrently with unregister app,
835 * therefore we _need_ to only close that socket after a grace period. So
836 * it should stay in this RCU callback.
837 *
838 * This close() is a very important step of the synchronization model so
839 * every modification to this function must be carefully reviewed.
840 */
841 ret = close(sock);
842 if (ret) {
843 PERROR("close");
844 }
845 lttng_fd_put(LTTNG_FD_APPS, 1);
846
847 DBG2("UST app pid %d deleted", app->pid);
848 free(app);
849 }
850
851 /*
852 * URCU intermediate call to delete an UST app.
853 */
854 static
855 void delete_ust_app_rcu(struct rcu_head *head)
856 {
857 struct lttng_ht_node_ulong *node =
858 caa_container_of(head, struct lttng_ht_node_ulong, head);
859 struct ust_app *app =
860 caa_container_of(node, struct ust_app, pid_n);
861
862 DBG3("Call RCU deleting app PID %d", app->pid);
863 delete_ust_app(app);
864 }
865
866 /*
867 * Delete the session from the application ht and delete the data structure by
868 * freeing every object inside and releasing them.
869 */
870 static void destroy_app_session(struct ust_app *app,
871 struct ust_app_session *ua_sess)
872 {
873 int ret;
874 struct lttng_ht_iter iter;
875
876 assert(app);
877 assert(ua_sess);
878
879 iter.iter.node = &ua_sess->node.node;
880 ret = lttng_ht_del(app->sessions, &iter);
881 if (ret) {
882 /* Already scheduled for teardown. */
883 goto end;
884 }
885
886 /* Once deleted, free the data structure. */
887 delete_ust_app_session(app->sock, ua_sess, app);
888
889 end:
890 return;
891 }
892
893 /*
894 * Alloc new UST app session.
895 */
896 static
897 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
898 {
899 struct ust_app_session *ua_sess;
900
901 /* Init most of the default value by allocating and zeroing */
902 ua_sess = zmalloc(sizeof(struct ust_app_session));
903 if (ua_sess == NULL) {
904 PERROR("malloc");
905 goto error_free;
906 }
907
908 ua_sess->handle = -1;
909 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
910 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
911 pthread_mutex_init(&ua_sess->lock, NULL);
912
913 return ua_sess;
914
915 error_free:
916 return NULL;
917 }
918
919 /*
920 * Alloc new UST app channel.
921 */
922 static
923 struct ust_app_channel *alloc_ust_app_channel(char *name,
924 struct ust_app_session *ua_sess,
925 struct lttng_ust_channel_attr *attr)
926 {
927 struct ust_app_channel *ua_chan;
928
929 /* Init most of the default value by allocating and zeroing */
930 ua_chan = zmalloc(sizeof(struct ust_app_channel));
931 if (ua_chan == NULL) {
932 PERROR("malloc");
933 goto error;
934 }
935
936 /* Setup channel name */
937 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
938 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
939
940 ua_chan->enabled = 1;
941 ua_chan->handle = -1;
942 ua_chan->session = ua_sess;
943 ua_chan->key = get_next_channel_key();
944 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
945 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
946 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
947
948 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
949 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
950
951 /* Copy attributes */
952 if (attr) {
953 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
954 ua_chan->attr.subbuf_size = attr->subbuf_size;
955 ua_chan->attr.num_subbuf = attr->num_subbuf;
956 ua_chan->attr.overwrite = attr->overwrite;
957 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
958 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
959 ua_chan->attr.output = attr->output;
960 }
961 /* By default, the channel is a per cpu channel. */
962 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
963
964 DBG3("UST app channel %s allocated", ua_chan->name);
965
966 return ua_chan;
967
968 error:
969 return NULL;
970 }
971
972 /*
973 * Allocate and initialize a UST app stream.
974 *
975 * Return newly allocated stream pointer or NULL on error.
976 */
977 struct ust_app_stream *ust_app_alloc_stream(void)
978 {
979 struct ust_app_stream *stream = NULL;
980
981 stream = zmalloc(sizeof(*stream));
982 if (stream == NULL) {
983 PERROR("zmalloc ust app stream");
984 goto error;
985 }
986
987 /* Zero could be a valid value for a handle so flag it to -1. */
988 stream->handle = -1;
989
990 error:
991 return stream;
992 }
993
994 /*
995 * Alloc new UST app event.
996 */
997 static
998 struct ust_app_event *alloc_ust_app_event(char *name,
999 struct lttng_ust_event *attr)
1000 {
1001 struct ust_app_event *ua_event;
1002
1003 /* Init most of the default value by allocating and zeroing */
1004 ua_event = zmalloc(sizeof(struct ust_app_event));
1005 if (ua_event == NULL) {
1006 PERROR("malloc");
1007 goto error;
1008 }
1009
1010 ua_event->enabled = 1;
1011 strncpy(ua_event->name, name, sizeof(ua_event->name));
1012 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1013 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
1014
1015 /* Copy attributes */
1016 if (attr) {
1017 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1018 }
1019
1020 DBG3("UST app event %s allocated", ua_event->name);
1021
1022 return ua_event;
1023
1024 error:
1025 return NULL;
1026 }
1027
1028 /*
1029 * Alloc new UST app context.
1030 */
1031 static
1032 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
1033 {
1034 struct ust_app_ctx *ua_ctx;
1035
1036 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1037 if (ua_ctx == NULL) {
1038 goto error;
1039 }
1040
1041 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1042
1043 if (uctx) {
1044 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1045 }
1046
1047 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1048
1049 error:
1050 return ua_ctx;
1051 }
1052
1053 /*
1054 * Allocate a filter and copy the given original filter.
1055 *
1056 * Return allocated filter or NULL on error.
1057 */
1058 static struct lttng_filter_bytecode *copy_filter_bytecode(
1059 struct lttng_filter_bytecode *orig_f)
1060 {
1061 struct lttng_filter_bytecode *filter = NULL;
1062
1063 /* Copy filter bytecode */
1064 filter = zmalloc(sizeof(*filter) + orig_f->len);
1065 if (!filter) {
1066 PERROR("zmalloc alloc filter bytecode");
1067 goto error;
1068 }
1069
1070 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1071
1072 error:
1073 return filter;
1074 }
1075
1076 /*
1077 * Create a liblttng-ust filter bytecode from given bytecode.
1078 *
1079 * Return allocated filter or NULL on error.
1080 */
1081 static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1082 struct lttng_filter_bytecode *orig_f)
1083 {
1084 struct lttng_ust_filter_bytecode *filter = NULL;
1085
1086 /* Copy filter bytecode */
1087 filter = zmalloc(sizeof(*filter) + orig_f->len);
1088 if (!filter) {
1089 PERROR("zmalloc alloc ust filter bytecode");
1090 goto error;
1091 }
1092
1093 assert(sizeof(struct lttng_filter_bytecode) ==
1094 sizeof(struct lttng_ust_filter_bytecode));
1095 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1096 error:
1097 return filter;
1098 }
1099
1100 /*
1101 * Find an ust_app using the sock and return it. RCU read side lock must be
1102 * held before calling this helper function.
1103 */
1104 struct ust_app *ust_app_find_by_sock(int sock)
1105 {
1106 struct lttng_ht_node_ulong *node;
1107 struct lttng_ht_iter iter;
1108
1109 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1110 node = lttng_ht_iter_get_node_ulong(&iter);
1111 if (node == NULL) {
1112 DBG2("UST app find by sock %d not found", sock);
1113 goto error;
1114 }
1115
1116 return caa_container_of(node, struct ust_app, sock_n);
1117
1118 error:
1119 return NULL;
1120 }
1121
1122 /*
1123 * Find an ust_app using the notify sock and return it. RCU read side lock must
1124 * be held before calling this helper function.
1125 */
1126 static struct ust_app *find_app_by_notify_sock(int sock)
1127 {
1128 struct lttng_ht_node_ulong *node;
1129 struct lttng_ht_iter iter;
1130
1131 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1132 &iter);
1133 node = lttng_ht_iter_get_node_ulong(&iter);
1134 if (node == NULL) {
1135 DBG2("UST app find by notify sock %d not found", sock);
1136 goto error;
1137 }
1138
1139 return caa_container_of(node, struct ust_app, notify_sock_n);
1140
1141 error:
1142 return NULL;
1143 }
1144
1145 /*
1146 * Lookup for an ust app event based on event name, filter bytecode and the
1147 * event loglevel.
1148 *
1149 * Return an ust_app_event object or NULL on error.
1150 */
1151 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1152 char *name, struct lttng_filter_bytecode *filter, int loglevel,
1153 const struct lttng_event_exclusion *exclusion)
1154 {
1155 struct lttng_ht_iter iter;
1156 struct lttng_ht_node_str *node;
1157 struct ust_app_event *event = NULL;
1158 struct ust_app_ht_key key;
1159
1160 assert(name);
1161 assert(ht);
1162
1163 /* Setup key for event lookup. */
1164 key.name = name;
1165 key.filter = filter;
1166 key.loglevel = loglevel;
1167 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1168 key.exclusion = exclusion;
1169
1170 /* Lookup using the event name as hash and a custom match fct. */
1171 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1172 ht_match_ust_app_event, &key, &iter.iter);
1173 node = lttng_ht_iter_get_node_str(&iter);
1174 if (node == NULL) {
1175 goto end;
1176 }
1177
1178 event = caa_container_of(node, struct ust_app_event, node);
1179
1180 end:
1181 return event;
1182 }
1183
1184 /*
1185 * Create the channel context on the tracer.
1186 *
1187 * Called with UST app session lock held.
1188 */
1189 static
1190 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1191 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1192 {
1193 int ret;
1194
1195 health_code_update();
1196
1197 pthread_mutex_lock(&app->sock_lock);
1198 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1199 ua_chan->obj, &ua_ctx->obj);
1200 pthread_mutex_unlock(&app->sock_lock);
1201 if (ret < 0) {
1202 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1203 ERR("UST app create channel context failed for app (pid: %d) "
1204 "with ret %d", app->pid, ret);
1205 } else {
1206 /*
1207 * This is normal behavior, an application can die during the
1208 * creation process. Don't report an error so the execution can
1209 * continue normally.
1210 */
1211 ret = 0;
1212 DBG3("UST app disable event failed. Application is dead.");
1213 }
1214 goto error;
1215 }
1216
1217 ua_ctx->handle = ua_ctx->obj->handle;
1218
1219 DBG2("UST app context handle %d created successfully for channel %s",
1220 ua_ctx->handle, ua_chan->name);
1221
1222 error:
1223 health_code_update();
1224 return ret;
1225 }
1226
1227 /*
1228 * Set the filter on the tracer.
1229 */
1230 static
1231 int set_ust_event_filter(struct ust_app_event *ua_event,
1232 struct ust_app *app)
1233 {
1234 int ret;
1235 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
1236
1237 health_code_update();
1238
1239 if (!ua_event->filter) {
1240 ret = 0;
1241 goto error;
1242 }
1243
1244 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1245 if (!ust_bytecode) {
1246 ret = -LTTNG_ERR_NOMEM;
1247 goto error;
1248 }
1249 pthread_mutex_lock(&app->sock_lock);
1250 ret = ustctl_set_filter(app->sock, ust_bytecode,
1251 ua_event->obj);
1252 pthread_mutex_unlock(&app->sock_lock);
1253 if (ret < 0) {
1254 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1255 ERR("UST app event %s filter failed for app (pid: %d) "
1256 "with ret %d", ua_event->attr.name, app->pid, ret);
1257 } else {
1258 /*
1259 * This is normal behavior, an application can die during the
1260 * creation process. Don't report an error so the execution can
1261 * continue normally.
1262 */
1263 ret = 0;
1264 DBG3("UST app filter event failed. Application is dead.");
1265 }
1266 goto error;
1267 }
1268
1269 DBG2("UST filter set successfully for event %s", ua_event->name);
1270
1271 error:
1272 health_code_update();
1273 free(ust_bytecode);
1274 return ret;
1275 }
1276
1277 static
1278 struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1279 struct lttng_event_exclusion *exclusion)
1280 {
1281 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1282 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1283 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1284
1285 ust_exclusion = zmalloc(exclusion_alloc_size);
1286 if (!ust_exclusion) {
1287 PERROR("malloc");
1288 goto end;
1289 }
1290
1291 assert(sizeof(struct lttng_event_exclusion) ==
1292 sizeof(struct lttng_ust_event_exclusion));
1293 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1294 end:
1295 return ust_exclusion;
1296 }
1297
1298 /*
1299 * Set event exclusions on the tracer.
1300 */
1301 static
1302 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1303 struct ust_app *app)
1304 {
1305 int ret;
1306 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1307
1308 health_code_update();
1309
1310 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1311 ret = 0;
1312 goto error;
1313 }
1314
1315 ust_exclusion = create_ust_exclusion_from_exclusion(
1316 ua_event->exclusion);
1317 if (!ust_exclusion) {
1318 ret = -LTTNG_ERR_NOMEM;
1319 goto error;
1320 }
1321 pthread_mutex_lock(&app->sock_lock);
1322 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
1323 pthread_mutex_unlock(&app->sock_lock);
1324 if (ret < 0) {
1325 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1326 ERR("UST app event %s exclusions failed for app (pid: %d) "
1327 "with ret %d", ua_event->attr.name, app->pid, ret);
1328 } else {
1329 /*
1330 * This is normal behavior, an application can die during the
1331 * creation process. Don't report an error so the execution can
1332 * continue normally.
1333 */
1334 ret = 0;
1335 DBG3("UST app event exclusion failed. Application is dead.");
1336 }
1337 goto error;
1338 }
1339
1340 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1341
1342 error:
1343 health_code_update();
1344 free(ust_exclusion);
1345 return ret;
1346 }
1347
1348 /*
1349 * Disable the specified event on to UST tracer for the UST session.
1350 */
1351 static int disable_ust_event(struct ust_app *app,
1352 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1353 {
1354 int ret;
1355
1356 health_code_update();
1357
1358 pthread_mutex_lock(&app->sock_lock);
1359 ret = ustctl_disable(app->sock, ua_event->obj);
1360 pthread_mutex_unlock(&app->sock_lock);
1361 if (ret < 0) {
1362 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1363 ERR("UST app event %s disable failed for app (pid: %d) "
1364 "and session handle %d with ret %d",
1365 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1366 } else {
1367 /*
1368 * This is normal behavior, an application can die during the
1369 * creation process. Don't report an error so the execution can
1370 * continue normally.
1371 */
1372 ret = 0;
1373 DBG3("UST app disable event failed. Application is dead.");
1374 }
1375 goto error;
1376 }
1377
1378 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1379 ua_event->attr.name, app->pid);
1380
1381 error:
1382 health_code_update();
1383 return ret;
1384 }
1385
1386 /*
1387 * Disable the specified channel on to UST tracer for the UST session.
1388 */
1389 static int disable_ust_channel(struct ust_app *app,
1390 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1391 {
1392 int ret;
1393
1394 health_code_update();
1395
1396 pthread_mutex_lock(&app->sock_lock);
1397 ret = ustctl_disable(app->sock, ua_chan->obj);
1398 pthread_mutex_unlock(&app->sock_lock);
1399 if (ret < 0) {
1400 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1401 ERR("UST app channel %s disable failed for app (pid: %d) "
1402 "and session handle %d with ret %d",
1403 ua_chan->name, app->pid, ua_sess->handle, ret);
1404 } else {
1405 /*
1406 * This is normal behavior, an application can die during the
1407 * creation process. Don't report an error so the execution can
1408 * continue normally.
1409 */
1410 ret = 0;
1411 DBG3("UST app disable channel failed. Application is dead.");
1412 }
1413 goto error;
1414 }
1415
1416 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1417 ua_chan->name, app->pid);
1418
1419 error:
1420 health_code_update();
1421 return ret;
1422 }
1423
1424 /*
1425 * Enable the specified channel on to UST tracer for the UST session.
1426 */
1427 static int enable_ust_channel(struct ust_app *app,
1428 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1429 {
1430 int ret;
1431
1432 health_code_update();
1433
1434 pthread_mutex_lock(&app->sock_lock);
1435 ret = ustctl_enable(app->sock, ua_chan->obj);
1436 pthread_mutex_unlock(&app->sock_lock);
1437 if (ret < 0) {
1438 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1439 ERR("UST app channel %s enable failed for app (pid: %d) "
1440 "and session handle %d with ret %d",
1441 ua_chan->name, app->pid, ua_sess->handle, ret);
1442 } else {
1443 /*
1444 * This is normal behavior, an application can die during the
1445 * creation process. Don't report an error so the execution can
1446 * continue normally.
1447 */
1448 ret = 0;
1449 DBG3("UST app enable channel failed. Application is dead.");
1450 }
1451 goto error;
1452 }
1453
1454 ua_chan->enabled = 1;
1455
1456 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1457 ua_chan->name, app->pid);
1458
1459 error:
1460 health_code_update();
1461 return ret;
1462 }
1463
1464 /*
1465 * Enable the specified event on to UST tracer for the UST session.
1466 */
1467 static int enable_ust_event(struct ust_app *app,
1468 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1469 {
1470 int ret;
1471
1472 health_code_update();
1473
1474 pthread_mutex_lock(&app->sock_lock);
1475 ret = ustctl_enable(app->sock, ua_event->obj);
1476 pthread_mutex_unlock(&app->sock_lock);
1477 if (ret < 0) {
1478 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1479 ERR("UST app event %s enable failed for app (pid: %d) "
1480 "and session handle %d with ret %d",
1481 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1482 } else {
1483 /*
1484 * This is normal behavior, an application can die during the
1485 * creation process. Don't report an error so the execution can
1486 * continue normally.
1487 */
1488 ret = 0;
1489 DBG3("UST app enable event failed. Application is dead.");
1490 }
1491 goto error;
1492 }
1493
1494 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1495 ua_event->attr.name, app->pid);
1496
1497 error:
1498 health_code_update();
1499 return ret;
1500 }
1501
1502 /*
1503 * Send channel and stream buffer to application.
1504 *
1505 * Return 0 on success. On error, a negative value is returned.
1506 */
1507 static int send_channel_pid_to_ust(struct ust_app *app,
1508 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1509 {
1510 int ret;
1511 struct ust_app_stream *stream, *stmp;
1512
1513 assert(app);
1514 assert(ua_sess);
1515 assert(ua_chan);
1516
1517 health_code_update();
1518
1519 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1520 app->sock);
1521
1522 /* Send channel to the application. */
1523 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1524 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1525 ret = -ENOTCONN; /* Caused by app exiting. */
1526 goto error;
1527 } else if (ret < 0) {
1528 goto error;
1529 }
1530
1531 health_code_update();
1532
1533 /* Send all streams to application. */
1534 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1535 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1536 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1537 ret = -ENOTCONN; /* Caused by app exiting. */
1538 goto error;
1539 } else if (ret < 0) {
1540 goto error;
1541 }
1542 /* We don't need the stream anymore once sent to the tracer. */
1543 cds_list_del(&stream->list);
1544 delete_ust_app_stream(-1, stream, app);
1545 }
1546 /* Flag the channel that it is sent to the application. */
1547 ua_chan->is_sent = 1;
1548
1549 error:
1550 health_code_update();
1551 return ret;
1552 }
1553
1554 /*
1555 * Create the specified event onto the UST tracer for a UST session.
1556 *
1557 * Should be called with session mutex held.
1558 */
1559 static
1560 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1561 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1562 {
1563 int ret = 0;
1564
1565 health_code_update();
1566
1567 /* Create UST event on tracer */
1568 pthread_mutex_lock(&app->sock_lock);
1569 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1570 &ua_event->obj);
1571 pthread_mutex_unlock(&app->sock_lock);
1572 if (ret < 0) {
1573 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1574 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1575 ua_event->attr.name, app->pid, ret);
1576 } else {
1577 /*
1578 * This is normal behavior, an application can die during the
1579 * creation process. Don't report an error so the execution can
1580 * continue normally.
1581 */
1582 ret = 0;
1583 DBG3("UST app create event failed. Application is dead.");
1584 }
1585 goto error;
1586 }
1587
1588 ua_event->handle = ua_event->obj->handle;
1589
1590 DBG2("UST app event %s created successfully for pid:%d",
1591 ua_event->attr.name, app->pid);
1592
1593 health_code_update();
1594
1595 /* Set filter if one is present. */
1596 if (ua_event->filter) {
1597 ret = set_ust_event_filter(ua_event, app);
1598 if (ret < 0) {
1599 goto error;
1600 }
1601 }
1602
1603 /* Set exclusions for the event */
1604 if (ua_event->exclusion) {
1605 ret = set_ust_event_exclusion(ua_event, app);
1606 if (ret < 0) {
1607 goto error;
1608 }
1609 }
1610
1611 /* If event not enabled, disable it on the tracer */
1612 if (ua_event->enabled) {
1613 /*
1614 * We now need to explicitly enable the event, since it
1615 * is now disabled at creation.
1616 */
1617 ret = enable_ust_event(app, ua_sess, ua_event);
1618 if (ret < 0) {
1619 /*
1620 * If we hit an EPERM, something is wrong with our enable call. If
1621 * we get an EEXIST, there is a problem on the tracer side since we
1622 * just created it.
1623 */
1624 switch (ret) {
1625 case -LTTNG_UST_ERR_PERM:
1626 /* Code flow problem */
1627 assert(0);
1628 case -LTTNG_UST_ERR_EXIST:
1629 /* It's OK for our use case. */
1630 ret = 0;
1631 break;
1632 default:
1633 break;
1634 }
1635 goto error;
1636 }
1637 }
1638
1639 error:
1640 health_code_update();
1641 return ret;
1642 }
1643
1644 /*
1645 * Copy data between an UST app event and a LTT event.
1646 */
1647 static void shadow_copy_event(struct ust_app_event *ua_event,
1648 struct ltt_ust_event *uevent)
1649 {
1650 size_t exclusion_alloc_size;
1651
1652 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1653 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1654
1655 ua_event->enabled = uevent->enabled;
1656
1657 /* Copy event attributes */
1658 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1659
1660 /* Copy filter bytecode */
1661 if (uevent->filter) {
1662 ua_event->filter = copy_filter_bytecode(uevent->filter);
1663 /* Filter might be NULL here in case of ENONEM. */
1664 }
1665
1666 /* Copy exclusion data */
1667 if (uevent->exclusion) {
1668 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
1669 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1670 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1671 if (ua_event->exclusion == NULL) {
1672 PERROR("malloc");
1673 } else {
1674 memcpy(ua_event->exclusion, uevent->exclusion,
1675 exclusion_alloc_size);
1676 }
1677 }
1678 }
1679
1680 /*
1681 * Copy data between an UST app channel and a LTT channel.
1682 */
1683 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1684 struct ltt_ust_channel *uchan)
1685 {
1686 struct lttng_ht_iter iter;
1687 struct ltt_ust_event *uevent;
1688 struct ltt_ust_context *uctx;
1689 struct ust_app_event *ua_event;
1690 struct ust_app_ctx *ua_ctx;
1691
1692 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1693
1694 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1695 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1696
1697 ua_chan->tracefile_size = uchan->tracefile_size;
1698 ua_chan->tracefile_count = uchan->tracefile_count;
1699
1700 /* Copy event attributes since the layout is different. */
1701 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1702 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1703 ua_chan->attr.overwrite = uchan->attr.overwrite;
1704 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1705 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1706 ua_chan->attr.output = uchan->attr.output;
1707 /*
1708 * Note that the attribute channel type is not set since the channel on the
1709 * tracing registry side does not have this information.
1710 */
1711
1712 ua_chan->enabled = uchan->enabled;
1713 ua_chan->tracing_channel_id = uchan->id;
1714
1715 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1716 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1717 if (ua_ctx == NULL) {
1718 continue;
1719 }
1720 lttng_ht_node_init_ulong(&ua_ctx->node,
1721 (unsigned long) ua_ctx->ctx.ctx);
1722 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1723 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1724 }
1725
1726 /* Copy all events from ltt ust channel to ust app channel */
1727 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1728 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1729 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1730 if (ua_event == NULL) {
1731 DBG2("UST event %s not found on shadow copy channel",
1732 uevent->attr.name);
1733 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1734 if (ua_event == NULL) {
1735 continue;
1736 }
1737 shadow_copy_event(ua_event, uevent);
1738 add_unique_ust_app_event(ua_chan, ua_event);
1739 }
1740 }
1741
1742 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1743 }
1744
1745 /*
1746 * Copy data between a UST app session and a regular LTT session.
1747 */
1748 static void shadow_copy_session(struct ust_app_session *ua_sess,
1749 struct ltt_ust_session *usess, struct ust_app *app)
1750 {
1751 struct lttng_ht_node_str *ua_chan_node;
1752 struct lttng_ht_iter iter;
1753 struct ltt_ust_channel *uchan;
1754 struct ust_app_channel *ua_chan;
1755 time_t rawtime;
1756 struct tm *timeinfo;
1757 char datetime[16];
1758 int ret;
1759 char tmp_shm_path[PATH_MAX];
1760
1761 /* Get date and time for unique app path */
1762 time(&rawtime);
1763 timeinfo = localtime(&rawtime);
1764 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1765
1766 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1767
1768 ua_sess->tracing_id = usess->id;
1769 ua_sess->id = get_next_session_id();
1770 ua_sess->uid = app->uid;
1771 ua_sess->gid = app->gid;
1772 ua_sess->euid = usess->uid;
1773 ua_sess->egid = usess->gid;
1774 ua_sess->buffer_type = usess->buffer_type;
1775 ua_sess->bits_per_long = app->bits_per_long;
1776
1777 /* There is only one consumer object per session possible. */
1778 consumer_output_get(usess->consumer);
1779 ua_sess->consumer = usess->consumer;
1780
1781 ua_sess->output_traces = usess->output_traces;
1782 ua_sess->live_timer_interval = usess->live_timer_interval;
1783 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1784 &usess->metadata_attr);
1785
1786 switch (ua_sess->buffer_type) {
1787 case LTTNG_BUFFER_PER_PID:
1788 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1789 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1790 datetime);
1791 break;
1792 case LTTNG_BUFFER_PER_UID:
1793 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1794 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1795 break;
1796 default:
1797 assert(0);
1798 goto error;
1799 }
1800 if (ret < 0) {
1801 PERROR("asprintf UST shadow copy session");
1802 assert(0);
1803 goto error;
1804 }
1805
1806 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1807 sizeof(ua_sess->root_shm_path));
1808 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
1809 strncpy(ua_sess->shm_path, usess->shm_path,
1810 sizeof(ua_sess->shm_path));
1811 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1812 if (ua_sess->shm_path[0]) {
1813 switch (ua_sess->buffer_type) {
1814 case LTTNG_BUFFER_PER_PID:
1815 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1816 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1817 app->name, app->pid, datetime);
1818 break;
1819 case LTTNG_BUFFER_PER_UID:
1820 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1821 DEFAULT_UST_TRACE_UID_PATH,
1822 app->uid, app->bits_per_long);
1823 break;
1824 default:
1825 assert(0);
1826 goto error;
1827 }
1828 if (ret < 0) {
1829 PERROR("sprintf UST shadow copy session");
1830 assert(0);
1831 goto error;
1832 }
1833 strncat(ua_sess->shm_path, tmp_shm_path,
1834 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1835 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1836 }
1837
1838 /* Iterate over all channels in global domain. */
1839 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1840 uchan, node.node) {
1841 struct lttng_ht_iter uiter;
1842
1843 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1844 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1845 if (ua_chan_node != NULL) {
1846 /* Session exist. Contiuing. */
1847 continue;
1848 }
1849
1850 DBG2("Channel %s not found on shadow session copy, creating it",
1851 uchan->name);
1852 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1853 if (ua_chan == NULL) {
1854 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1855 continue;
1856 }
1857 shadow_copy_channel(ua_chan, uchan);
1858 /*
1859 * The concept of metadata channel does not exist on the tracing
1860 * registry side of the session daemon so this can only be a per CPU
1861 * channel and not metadata.
1862 */
1863 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1864
1865 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1866 }
1867 return;
1868
1869 error:
1870 consumer_output_put(ua_sess->consumer);
1871 }
1872
1873 /*
1874 * Lookup sesison wrapper.
1875 */
1876 static
1877 void __lookup_session_by_app(struct ltt_ust_session *usess,
1878 struct ust_app *app, struct lttng_ht_iter *iter)
1879 {
1880 /* Get right UST app session from app */
1881 lttng_ht_lookup(app->sessions, &usess->id, iter);
1882 }
1883
1884 /*
1885 * Return ust app session from the app session hashtable using the UST session
1886 * id.
1887 */
1888 static struct ust_app_session *lookup_session_by_app(
1889 struct ltt_ust_session *usess, struct ust_app *app)
1890 {
1891 struct lttng_ht_iter iter;
1892 struct lttng_ht_node_u64 *node;
1893
1894 __lookup_session_by_app(usess, app, &iter);
1895 node = lttng_ht_iter_get_node_u64(&iter);
1896 if (node == NULL) {
1897 goto error;
1898 }
1899
1900 return caa_container_of(node, struct ust_app_session, node);
1901
1902 error:
1903 return NULL;
1904 }
1905
1906 /*
1907 * Setup buffer registry per PID for the given session and application. If none
1908 * is found, a new one is created, added to the global registry and
1909 * initialized. If regp is valid, it's set with the newly created object.
1910 *
1911 * Return 0 on success or else a negative value.
1912 */
1913 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1914 struct ust_app *app, struct buffer_reg_pid **regp)
1915 {
1916 int ret = 0;
1917 struct buffer_reg_pid *reg_pid;
1918
1919 assert(ua_sess);
1920 assert(app);
1921
1922 rcu_read_lock();
1923
1924 reg_pid = buffer_reg_pid_find(ua_sess->id);
1925 if (!reg_pid) {
1926 /*
1927 * This is the create channel path meaning that if there is NO
1928 * registry available, we have to create one for this session.
1929 */
1930 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
1931 ua_sess->root_shm_path, ua_sess->shm_path);
1932 if (ret < 0) {
1933 goto error;
1934 }
1935 } else {
1936 goto end;
1937 }
1938
1939 /* Initialize registry. */
1940 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1941 app->bits_per_long, app->uint8_t_alignment,
1942 app->uint16_t_alignment, app->uint32_t_alignment,
1943 app->uint64_t_alignment, app->long_alignment,
1944 app->byte_order, app->version.major,
1945 app->version.minor, reg_pid->root_shm_path,
1946 reg_pid->shm_path,
1947 ua_sess->euid, ua_sess->egid);
1948 if (ret < 0) {
1949 /*
1950 * reg_pid->registry->reg.ust is NULL upon error, so we need to
1951 * destroy the buffer registry, because it is always expected
1952 * that if the buffer registry can be found, its ust registry is
1953 * non-NULL.
1954 */
1955 buffer_reg_pid_destroy(reg_pid);
1956 goto error;
1957 }
1958
1959 buffer_reg_pid_add(reg_pid);
1960
1961 DBG3("UST app buffer registry per PID created successfully");
1962
1963 end:
1964 if (regp) {
1965 *regp = reg_pid;
1966 }
1967 error:
1968 rcu_read_unlock();
1969 return ret;
1970 }
1971
1972 /*
1973 * Setup buffer registry per UID for the given session and application. If none
1974 * is found, a new one is created, added to the global registry and
1975 * initialized. If regp is valid, it's set with the newly created object.
1976 *
1977 * Return 0 on success or else a negative value.
1978 */
1979 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1980 struct ust_app_session *ua_sess,
1981 struct ust_app *app, struct buffer_reg_uid **regp)
1982 {
1983 int ret = 0;
1984 struct buffer_reg_uid *reg_uid;
1985
1986 assert(usess);
1987 assert(app);
1988
1989 rcu_read_lock();
1990
1991 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1992 if (!reg_uid) {
1993 /*
1994 * This is the create channel path meaning that if there is NO
1995 * registry available, we have to create one for this session.
1996 */
1997 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1998 LTTNG_DOMAIN_UST, &reg_uid,
1999 ua_sess->root_shm_path, ua_sess->shm_path);
2000 if (ret < 0) {
2001 goto error;
2002 }
2003 } else {
2004 goto end;
2005 }
2006
2007 /* Initialize registry. */
2008 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
2009 app->bits_per_long, app->uint8_t_alignment,
2010 app->uint16_t_alignment, app->uint32_t_alignment,
2011 app->uint64_t_alignment, app->long_alignment,
2012 app->byte_order, app->version.major,
2013 app->version.minor, reg_uid->root_shm_path,
2014 reg_uid->shm_path, usess->uid, usess->gid);
2015 if (ret < 0) {
2016 /*
2017 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2018 * destroy the buffer registry, because it is always expected
2019 * that if the buffer registry can be found, its ust registry is
2020 * non-NULL.
2021 */
2022 buffer_reg_uid_destroy(reg_uid, NULL);
2023 goto error;
2024 }
2025 /* Add node to teardown list of the session. */
2026 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2027
2028 buffer_reg_uid_add(reg_uid);
2029
2030 DBG3("UST app buffer registry per UID created successfully");
2031 end:
2032 if (regp) {
2033 *regp = reg_uid;
2034 }
2035 error:
2036 rcu_read_unlock();
2037 return ret;
2038 }
2039
2040 /*
2041 * Create a session on the tracer side for the given app.
2042 *
2043 * On success, ua_sess_ptr is populated with the session pointer or else left
2044 * untouched. If the session was created, is_created is set to 1. On error,
2045 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2046 * be NULL.
2047 *
2048 * Returns 0 on success or else a negative code which is either -ENOMEM or
2049 * -ENOTCONN which is the default code if the ustctl_create_session fails.
2050 */
2051 static int create_ust_app_session(struct ltt_ust_session *usess,
2052 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2053 int *is_created)
2054 {
2055 int ret, created = 0;
2056 struct ust_app_session *ua_sess;
2057
2058 assert(usess);
2059 assert(app);
2060 assert(ua_sess_ptr);
2061
2062 health_code_update();
2063
2064 ua_sess = lookup_session_by_app(usess, app);
2065 if (ua_sess == NULL) {
2066 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
2067 app->pid, usess->id);
2068 ua_sess = alloc_ust_app_session(app);
2069 if (ua_sess == NULL) {
2070 /* Only malloc can failed so something is really wrong */
2071 ret = -ENOMEM;
2072 goto error;
2073 }
2074 shadow_copy_session(ua_sess, usess, app);
2075 created = 1;
2076 }
2077
2078 switch (usess->buffer_type) {
2079 case LTTNG_BUFFER_PER_PID:
2080 /* Init local registry. */
2081 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
2082 if (ret < 0) {
2083 delete_ust_app_session(-1, ua_sess, app);
2084 goto error;
2085 }
2086 break;
2087 case LTTNG_BUFFER_PER_UID:
2088 /* Look for a global registry. If none exists, create one. */
2089 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
2090 if (ret < 0) {
2091 delete_ust_app_session(-1, ua_sess, app);
2092 goto error;
2093 }
2094 break;
2095 default:
2096 assert(0);
2097 ret = -EINVAL;
2098 goto error;
2099 }
2100
2101 health_code_update();
2102
2103 if (ua_sess->handle == -1) {
2104 pthread_mutex_lock(&app->sock_lock);
2105 ret = ustctl_create_session(app->sock);
2106 pthread_mutex_unlock(&app->sock_lock);
2107 if (ret < 0) {
2108 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2109 ERR("Creating session for app pid %d with ret %d",
2110 app->pid, ret);
2111 } else {
2112 DBG("UST app creating session failed. Application is dead");
2113 /*
2114 * This is normal behavior, an application can die during the
2115 * creation process. Don't report an error so the execution can
2116 * continue normally. This will get flagged ENOTCONN and the
2117 * caller will handle it.
2118 */
2119 ret = 0;
2120 }
2121 delete_ust_app_session(-1, ua_sess, app);
2122 if (ret != -ENOMEM) {
2123 /*
2124 * Tracer is probably gone or got an internal error so let's
2125 * behave like it will soon unregister or not usable.
2126 */
2127 ret = -ENOTCONN;
2128 }
2129 goto error;
2130 }
2131
2132 ua_sess->handle = ret;
2133
2134 /* Add ust app session to app's HT */
2135 lttng_ht_node_init_u64(&ua_sess->node,
2136 ua_sess->tracing_id);
2137 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
2138
2139 DBG2("UST app session created successfully with handle %d", ret);
2140 }
2141
2142 *ua_sess_ptr = ua_sess;
2143 if (is_created) {
2144 *is_created = created;
2145 }
2146
2147 /* Everything went well. */
2148 ret = 0;
2149
2150 error:
2151 health_code_update();
2152 return ret;
2153 }
2154
2155 /*
2156 * Match function for a hash table lookup of ust_app_ctx.
2157 *
2158 * It matches an ust app context based on the context type and, in the case
2159 * of perf counters, their name.
2160 */
2161 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2162 {
2163 struct ust_app_ctx *ctx;
2164 const struct lttng_ust_context *key;
2165
2166 assert(node);
2167 assert(_key);
2168
2169 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2170 key = _key;
2171
2172 /* Context type */
2173 if (ctx->ctx.ctx != key->ctx) {
2174 goto no_match;
2175 }
2176
2177 /* Check the name in the case of perf thread counters. */
2178 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
2179 if (strncmp(key->u.perf_counter.name,
2180 ctx->ctx.u.perf_counter.name,
2181 sizeof(key->u.perf_counter.name))) {
2182 goto no_match;
2183 }
2184 }
2185
2186 /* Match. */
2187 return 1;
2188
2189 no_match:
2190 return 0;
2191 }
2192
2193 /*
2194 * Lookup for an ust app context from an lttng_ust_context.
2195 *
2196 * Must be called while holding RCU read side lock.
2197 * Return an ust_app_ctx object or NULL on error.
2198 */
2199 static
2200 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2201 struct lttng_ust_context *uctx)
2202 {
2203 struct lttng_ht_iter iter;
2204 struct lttng_ht_node_ulong *node;
2205 struct ust_app_ctx *app_ctx = NULL;
2206
2207 assert(uctx);
2208 assert(ht);
2209
2210 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2211 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2212 ht_match_ust_app_ctx, uctx, &iter.iter);
2213 node = lttng_ht_iter_get_node_ulong(&iter);
2214 if (!node) {
2215 goto end;
2216 }
2217
2218 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2219
2220 end:
2221 return app_ctx;
2222 }
2223
2224 /*
2225 * Create a context for the channel on the tracer.
2226 *
2227 * Called with UST app session lock held and a RCU read side lock.
2228 */
2229 static
2230 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2231 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2232 struct ust_app *app)
2233 {
2234 int ret = 0;
2235 struct ust_app_ctx *ua_ctx;
2236
2237 DBG2("UST app adding context to channel %s", ua_chan->name);
2238
2239 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2240 if (ua_ctx) {
2241 ret = -EEXIST;
2242 goto error;
2243 }
2244
2245 ua_ctx = alloc_ust_app_ctx(uctx);
2246 if (ua_ctx == NULL) {
2247 /* malloc failed */
2248 ret = -1;
2249 goto error;
2250 }
2251
2252 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2253 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2254 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2255
2256 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2257 if (ret < 0) {
2258 goto error;
2259 }
2260
2261 error:
2262 return ret;
2263 }
2264
2265 /*
2266 * Enable on the tracer side a ust app event for the session and channel.
2267 *
2268 * Called with UST app session lock held.
2269 */
2270 static
2271 int enable_ust_app_event(struct ust_app_session *ua_sess,
2272 struct ust_app_event *ua_event, struct ust_app *app)
2273 {
2274 int ret;
2275
2276 ret = enable_ust_event(app, ua_sess, ua_event);
2277 if (ret < 0) {
2278 goto error;
2279 }
2280
2281 ua_event->enabled = 1;
2282
2283 error:
2284 return ret;
2285 }
2286
2287 /*
2288 * Disable on the tracer side a ust app event for the session and channel.
2289 */
2290 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2291 struct ust_app_event *ua_event, struct ust_app *app)
2292 {
2293 int ret;
2294
2295 ret = disable_ust_event(app, ua_sess, ua_event);
2296 if (ret < 0) {
2297 goto error;
2298 }
2299
2300 ua_event->enabled = 0;
2301
2302 error:
2303 return ret;
2304 }
2305
2306 /*
2307 * Lookup ust app channel for session and disable it on the tracer side.
2308 */
2309 static
2310 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2311 struct ust_app_channel *ua_chan, struct ust_app *app)
2312 {
2313 int ret;
2314
2315 ret = disable_ust_channel(app, ua_sess, ua_chan);
2316 if (ret < 0) {
2317 goto error;
2318 }
2319
2320 ua_chan->enabled = 0;
2321
2322 error:
2323 return ret;
2324 }
2325
2326 /*
2327 * Lookup ust app channel for session and enable it on the tracer side. This
2328 * MUST be called with a RCU read side lock acquired.
2329 */
2330 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2331 struct ltt_ust_channel *uchan, struct ust_app *app)
2332 {
2333 int ret = 0;
2334 struct lttng_ht_iter iter;
2335 struct lttng_ht_node_str *ua_chan_node;
2336 struct ust_app_channel *ua_chan;
2337
2338 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2339 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2340 if (ua_chan_node == NULL) {
2341 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2342 uchan->name, ua_sess->tracing_id);
2343 goto error;
2344 }
2345
2346 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2347
2348 ret = enable_ust_channel(app, ua_sess, ua_chan);
2349 if (ret < 0) {
2350 goto error;
2351 }
2352
2353 error:
2354 return ret;
2355 }
2356
2357 /*
2358 * Ask the consumer to create a channel and get it if successful.
2359 *
2360 * Return 0 on success or else a negative value.
2361 */
2362 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2363 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2364 int bitness, struct ust_registry_session *registry)
2365 {
2366 int ret;
2367 unsigned int nb_fd = 0;
2368 struct consumer_socket *socket;
2369
2370 assert(usess);
2371 assert(ua_sess);
2372 assert(ua_chan);
2373 assert(registry);
2374
2375 rcu_read_lock();
2376 health_code_update();
2377
2378 /* Get the right consumer socket for the application. */
2379 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2380 if (!socket) {
2381 ret = -EINVAL;
2382 goto error;
2383 }
2384
2385 health_code_update();
2386
2387 /* Need one fd for the channel. */
2388 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2389 if (ret < 0) {
2390 ERR("Exhausted number of available FD upon create channel");
2391 goto error;
2392 }
2393
2394 /*
2395 * Ask consumer to create channel. The consumer will return the number of
2396 * stream we have to expect.
2397 */
2398 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2399 registry);
2400 if (ret < 0) {
2401 goto error_ask;
2402 }
2403
2404 /*
2405 * Compute the number of fd needed before receiving them. It must be 2 per
2406 * stream (2 being the default value here).
2407 */
2408 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2409
2410 /* Reserve the amount of file descriptor we need. */
2411 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2412 if (ret < 0) {
2413 ERR("Exhausted number of available FD upon create channel");
2414 goto error_fd_get_stream;
2415 }
2416
2417 health_code_update();
2418
2419 /*
2420 * Now get the channel from the consumer. This call wil populate the stream
2421 * list of that channel and set the ust objects.
2422 */
2423 if (usess->consumer->enabled) {
2424 ret = ust_consumer_get_channel(socket, ua_chan);
2425 if (ret < 0) {
2426 goto error_destroy;
2427 }
2428 }
2429
2430 rcu_read_unlock();
2431 return 0;
2432
2433 error_destroy:
2434 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2435 error_fd_get_stream:
2436 /*
2437 * Initiate a destroy channel on the consumer since we had an error
2438 * handling it on our side. The return value is of no importance since we
2439 * already have a ret value set by the previous error that we need to
2440 * return.
2441 */
2442 (void) ust_consumer_destroy_channel(socket, ua_chan);
2443 error_ask:
2444 lttng_fd_put(LTTNG_FD_APPS, 1);
2445 error:
2446 health_code_update();
2447 rcu_read_unlock();
2448 return ret;
2449 }
2450
2451 /*
2452 * Duplicate the ust data object of the ust app stream and save it in the
2453 * buffer registry stream.
2454 *
2455 * Return 0 on success or else a negative value.
2456 */
2457 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2458 struct ust_app_stream *stream)
2459 {
2460 int ret;
2461
2462 assert(reg_stream);
2463 assert(stream);
2464
2465 /* Reserve the amount of file descriptor we need. */
2466 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2467 if (ret < 0) {
2468 ERR("Exhausted number of available FD upon duplicate stream");
2469 goto error;
2470 }
2471
2472 /* Duplicate object for stream once the original is in the registry. */
2473 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2474 reg_stream->obj.ust);
2475 if (ret < 0) {
2476 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2477 reg_stream->obj.ust, stream->obj, ret);
2478 lttng_fd_put(LTTNG_FD_APPS, 2);
2479 goto error;
2480 }
2481 stream->handle = stream->obj->handle;
2482
2483 error:
2484 return ret;
2485 }
2486
2487 /*
2488 * Duplicate the ust data object of the ust app. channel and save it in the
2489 * buffer registry channel.
2490 *
2491 * Return 0 on success or else a negative value.
2492 */
2493 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2494 struct ust_app_channel *ua_chan)
2495 {
2496 int ret;
2497
2498 assert(reg_chan);
2499 assert(ua_chan);
2500
2501 /* Need two fds for the channel. */
2502 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2503 if (ret < 0) {
2504 ERR("Exhausted number of available FD upon duplicate channel");
2505 goto error_fd_get;
2506 }
2507
2508 /* Duplicate object for stream once the original is in the registry. */
2509 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2510 if (ret < 0) {
2511 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2512 reg_chan->obj.ust, ua_chan->obj, ret);
2513 goto error;
2514 }
2515 ua_chan->handle = ua_chan->obj->handle;
2516
2517 return 0;
2518
2519 error:
2520 lttng_fd_put(LTTNG_FD_APPS, 1);
2521 error_fd_get:
2522 return ret;
2523 }
2524
2525 /*
2526 * For a given channel buffer registry, setup all streams of the given ust
2527 * application channel.
2528 *
2529 * Return 0 on success or else a negative value.
2530 */
2531 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2532 struct ust_app_channel *ua_chan,
2533 struct ust_app *app)
2534 {
2535 int ret = 0;
2536 struct ust_app_stream *stream, *stmp;
2537
2538 assert(reg_chan);
2539 assert(ua_chan);
2540
2541 DBG2("UST app setup buffer registry stream");
2542
2543 /* Send all streams to application. */
2544 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2545 struct buffer_reg_stream *reg_stream;
2546
2547 ret = buffer_reg_stream_create(&reg_stream);
2548 if (ret < 0) {
2549 goto error;
2550 }
2551
2552 /*
2553 * Keep original pointer and nullify it in the stream so the delete
2554 * stream call does not release the object.
2555 */
2556 reg_stream->obj.ust = stream->obj;
2557 stream->obj = NULL;
2558 buffer_reg_stream_add(reg_stream, reg_chan);
2559
2560 /* We don't need the streams anymore. */
2561 cds_list_del(&stream->list);
2562 delete_ust_app_stream(-1, stream, app);
2563 }
2564
2565 error:
2566 return ret;
2567 }
2568
2569 /*
2570 * Create a buffer registry channel for the given session registry and
2571 * application channel object. If regp pointer is valid, it's set with the
2572 * created object. Important, the created object is NOT added to the session
2573 * registry hash table.
2574 *
2575 * Return 0 on success else a negative value.
2576 */
2577 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2578 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2579 {
2580 int ret;
2581 struct buffer_reg_channel *reg_chan = NULL;
2582
2583 assert(reg_sess);
2584 assert(ua_chan);
2585
2586 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2587
2588 /* Create buffer registry channel. */
2589 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2590 if (ret < 0) {
2591 goto error_create;
2592 }
2593 assert(reg_chan);
2594 reg_chan->consumer_key = ua_chan->key;
2595 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2596 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2597
2598 /* Create and add a channel registry to session. */
2599 ret = ust_registry_channel_add(reg_sess->reg.ust,
2600 ua_chan->tracing_channel_id);
2601 if (ret < 0) {
2602 goto error;
2603 }
2604 buffer_reg_channel_add(reg_sess, reg_chan);
2605
2606 if (regp) {
2607 *regp = reg_chan;
2608 }
2609
2610 return 0;
2611
2612 error:
2613 /* Safe because the registry channel object was not added to any HT. */
2614 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2615 error_create:
2616 return ret;
2617 }
2618
2619 /*
2620 * Setup buffer registry channel for the given session registry and application
2621 * channel object. If regp pointer is valid, it's set with the created object.
2622 *
2623 * Return 0 on success else a negative value.
2624 */
2625 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2626 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2627 struct ust_app *app)
2628 {
2629 int ret;
2630
2631 assert(reg_sess);
2632 assert(reg_chan);
2633 assert(ua_chan);
2634 assert(ua_chan->obj);
2635
2636 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2637
2638 /* Setup all streams for the registry. */
2639 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
2640 if (ret < 0) {
2641 goto error;
2642 }
2643
2644 reg_chan->obj.ust = ua_chan->obj;
2645 ua_chan->obj = NULL;
2646
2647 return 0;
2648
2649 error:
2650 buffer_reg_channel_remove(reg_sess, reg_chan);
2651 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2652 return ret;
2653 }
2654
2655 /*
2656 * Send buffer registry channel to the application.
2657 *
2658 * Return 0 on success else a negative value.
2659 */
2660 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2661 struct ust_app *app, struct ust_app_session *ua_sess,
2662 struct ust_app_channel *ua_chan)
2663 {
2664 int ret;
2665 struct buffer_reg_stream *reg_stream;
2666
2667 assert(reg_chan);
2668 assert(app);
2669 assert(ua_sess);
2670 assert(ua_chan);
2671
2672 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2673
2674 ret = duplicate_channel_object(reg_chan, ua_chan);
2675 if (ret < 0) {
2676 goto error;
2677 }
2678
2679 /* Send channel to the application. */
2680 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2681 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2682 ret = -ENOTCONN; /* Caused by app exiting. */
2683 goto error;
2684 } else if (ret < 0) {
2685 goto error;
2686 }
2687
2688 health_code_update();
2689
2690 /* Send all streams to application. */
2691 pthread_mutex_lock(&reg_chan->stream_list_lock);
2692 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2693 struct ust_app_stream stream;
2694
2695 ret = duplicate_stream_object(reg_stream, &stream);
2696 if (ret < 0) {
2697 goto error_stream_unlock;
2698 }
2699
2700 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2701 if (ret < 0) {
2702 (void) release_ust_app_stream(-1, &stream, app);
2703 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2704 ret = -ENOTCONN; /* Caused by app exiting. */
2705 goto error_stream_unlock;
2706 } else if (ret < 0) {
2707 goto error_stream_unlock;
2708 }
2709 goto error_stream_unlock;
2710 }
2711
2712 /*
2713 * The return value is not important here. This function will output an
2714 * error if needed.
2715 */
2716 (void) release_ust_app_stream(-1, &stream, app);
2717 }
2718 ua_chan->is_sent = 1;
2719
2720 error_stream_unlock:
2721 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2722 error:
2723 return ret;
2724 }
2725
2726 /*
2727 * Create and send to the application the created buffers with per UID buffers.
2728 *
2729 * Return 0 on success else a negative value.
2730 */
2731 static int create_channel_per_uid(struct ust_app *app,
2732 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2733 struct ust_app_channel *ua_chan)
2734 {
2735 int ret;
2736 struct buffer_reg_uid *reg_uid;
2737 struct buffer_reg_channel *reg_chan;
2738
2739 assert(app);
2740 assert(usess);
2741 assert(ua_sess);
2742 assert(ua_chan);
2743
2744 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2745
2746 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2747 /*
2748 * The session creation handles the creation of this global registry
2749 * object. If none can be find, there is a code flow problem or a
2750 * teardown race.
2751 */
2752 assert(reg_uid);
2753
2754 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2755 reg_uid);
2756 if (!reg_chan) {
2757 /* Create the buffer registry channel object. */
2758 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2759 if (ret < 0) {
2760 ERR("Error creating the UST channel \"%s\" registry instance",
2761 ua_chan->name);
2762 goto error;
2763 }
2764 assert(reg_chan);
2765
2766 /*
2767 * Create the buffers on the consumer side. This call populates the
2768 * ust app channel object with all streams and data object.
2769 */
2770 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2771 app->bits_per_long, reg_uid->registry->reg.ust);
2772 if (ret < 0) {
2773 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2774 ua_chan->name);
2775
2776 /*
2777 * Let's remove the previously created buffer registry channel so
2778 * it's not visible anymore in the session registry.
2779 */
2780 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2781 ua_chan->tracing_channel_id);
2782 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2783 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2784 goto error;
2785 }
2786
2787 /*
2788 * Setup the streams and add it to the session registry.
2789 */
2790 ret = setup_buffer_reg_channel(reg_uid->registry,
2791 ua_chan, reg_chan, app);
2792 if (ret < 0) {
2793 ERR("Error setting up UST channel \"%s\"",
2794 ua_chan->name);
2795 goto error;
2796 }
2797
2798 }
2799
2800 /* Send buffers to the application. */
2801 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2802 if (ret < 0) {
2803 if (ret != -ENOTCONN) {
2804 ERR("Error sending channel to application");
2805 }
2806 goto error;
2807 }
2808
2809 error:
2810 return ret;
2811 }
2812
2813 /*
2814 * Create and send to the application the created buffers with per PID buffers.
2815 *
2816 * Return 0 on success else a negative value.
2817 */
2818 static int create_channel_per_pid(struct ust_app *app,
2819 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2820 struct ust_app_channel *ua_chan)
2821 {
2822 int ret;
2823 struct ust_registry_session *registry;
2824
2825 assert(app);
2826 assert(usess);
2827 assert(ua_sess);
2828 assert(ua_chan);
2829
2830 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2831
2832 rcu_read_lock();
2833
2834 registry = get_session_registry(ua_sess);
2835 assert(registry);
2836
2837 /* Create and add a new channel registry to session. */
2838 ret = ust_registry_channel_add(registry, ua_chan->key);
2839 if (ret < 0) {
2840 ERR("Error creating the UST channel \"%s\" registry instance",
2841 ua_chan->name);
2842 goto error;
2843 }
2844
2845 /* Create and get channel on the consumer side. */
2846 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2847 app->bits_per_long, registry);
2848 if (ret < 0) {
2849 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2850 ua_chan->name);
2851 goto error;
2852 }
2853
2854 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2855 if (ret < 0) {
2856 if (ret != -ENOTCONN) {
2857 ERR("Error sending channel to application");
2858 }
2859 goto error;
2860 }
2861
2862 error:
2863 rcu_read_unlock();
2864 return ret;
2865 }
2866
2867 /*
2868 * From an already allocated ust app channel, create the channel buffers if
2869 * need and send it to the application. This MUST be called with a RCU read
2870 * side lock acquired.
2871 *
2872 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2873 * the application exited concurrently.
2874 */
2875 static int do_create_channel(struct ust_app *app,
2876 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2877 struct ust_app_channel *ua_chan)
2878 {
2879 int ret;
2880
2881 assert(app);
2882 assert(usess);
2883 assert(ua_sess);
2884 assert(ua_chan);
2885
2886 /* Handle buffer type before sending the channel to the application. */
2887 switch (usess->buffer_type) {
2888 case LTTNG_BUFFER_PER_UID:
2889 {
2890 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2891 if (ret < 0) {
2892 goto error;
2893 }
2894 break;
2895 }
2896 case LTTNG_BUFFER_PER_PID:
2897 {
2898 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2899 if (ret < 0) {
2900 goto error;
2901 }
2902 break;
2903 }
2904 default:
2905 assert(0);
2906 ret = -EINVAL;
2907 goto error;
2908 }
2909
2910 /* Initialize ust objd object using the received handle and add it. */
2911 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2912 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2913
2914 /* If channel is not enabled, disable it on the tracer */
2915 if (!ua_chan->enabled) {
2916 ret = disable_ust_channel(app, ua_sess, ua_chan);
2917 if (ret < 0) {
2918 goto error;
2919 }
2920 }
2921
2922 error:
2923 return ret;
2924 }
2925
2926 /*
2927 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2928 * newly created channel if not NULL.
2929 *
2930 * Called with UST app session lock and RCU read-side lock held.
2931 *
2932 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2933 * the application exited concurrently.
2934 */
2935 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2936 struct ltt_ust_channel *uchan, struct ust_app *app,
2937 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2938 struct ust_app_channel **ua_chanp)
2939 {
2940 int ret = 0;
2941 struct lttng_ht_iter iter;
2942 struct lttng_ht_node_str *ua_chan_node;
2943 struct ust_app_channel *ua_chan;
2944
2945 /* Lookup channel in the ust app session */
2946 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2947 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2948 if (ua_chan_node != NULL) {
2949 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2950 goto end;
2951 }
2952
2953 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2954 if (ua_chan == NULL) {
2955 /* Only malloc can fail here */
2956 ret = -ENOMEM;
2957 goto error_alloc;
2958 }
2959 shadow_copy_channel(ua_chan, uchan);
2960
2961 /* Set channel type. */
2962 ua_chan->attr.type = type;
2963
2964 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2965 if (ret < 0) {
2966 goto error;
2967 }
2968
2969 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2970 app->pid);
2971
2972 /* Only add the channel if successful on the tracer side. */
2973 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2974
2975 end:
2976 if (ua_chanp) {
2977 *ua_chanp = ua_chan;
2978 }
2979
2980 /* Everything went well. */
2981 return 0;
2982
2983 error:
2984 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2985 error_alloc:
2986 return ret;
2987 }
2988
2989 /*
2990 * Create UST app event and create it on the tracer side.
2991 *
2992 * Called with ust app session mutex held.
2993 */
2994 static
2995 int create_ust_app_event(struct ust_app_session *ua_sess,
2996 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2997 struct ust_app *app)
2998 {
2999 int ret = 0;
3000 struct ust_app_event *ua_event;
3001
3002 /* Get event node */
3003 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3004 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3005 if (ua_event != NULL) {
3006 ret = -EEXIST;
3007 goto end;
3008 }
3009
3010 /* Does not exist so create one */
3011 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3012 if (ua_event == NULL) {
3013 /* Only malloc can failed so something is really wrong */
3014 ret = -ENOMEM;
3015 goto end;
3016 }
3017 shadow_copy_event(ua_event, uevent);
3018
3019 /* Create it on the tracer side */
3020 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
3021 if (ret < 0) {
3022 /* Not found previously means that it does not exist on the tracer */
3023 assert(ret != -LTTNG_UST_ERR_EXIST);
3024 goto error;
3025 }
3026
3027 add_unique_ust_app_event(ua_chan, ua_event);
3028
3029 DBG2("UST app create event %s for PID %d completed", ua_event->name,
3030 app->pid);
3031
3032 end:
3033 return ret;
3034
3035 error:
3036 /* Valid. Calling here is already in a read side lock */
3037 delete_ust_app_event(-1, ua_event, app);
3038 return ret;
3039 }
3040
3041 /*
3042 * Create UST metadata and open it on the tracer side.
3043 *
3044 * Called with UST app session lock held and RCU read side lock.
3045 */
3046 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
3047 struct ust_app *app, struct consumer_output *consumer)
3048 {
3049 int ret = 0;
3050 struct ust_app_channel *metadata;
3051 struct consumer_socket *socket;
3052 struct ust_registry_session *registry;
3053
3054 assert(ua_sess);
3055 assert(app);
3056 assert(consumer);
3057
3058 registry = get_session_registry(ua_sess);
3059 assert(registry);
3060
3061 pthread_mutex_lock(&registry->lock);
3062
3063 /* Metadata already exists for this registry or it was closed previously */
3064 if (registry->metadata_key || registry->metadata_closed) {
3065 ret = 0;
3066 goto error;
3067 }
3068
3069 /* Allocate UST metadata */
3070 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
3071 if (!metadata) {
3072 /* malloc() failed */
3073 ret = -ENOMEM;
3074 goto error;
3075 }
3076
3077 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
3078
3079 /* Need one fd for the channel. */
3080 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3081 if (ret < 0) {
3082 ERR("Exhausted number of available FD upon create metadata");
3083 goto error;
3084 }
3085
3086 /* Get the right consumer socket for the application. */
3087 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3088 if (!socket) {
3089 ret = -EINVAL;
3090 goto error_consumer;
3091 }
3092
3093 /*
3094 * Keep metadata key so we can identify it on the consumer side. Assign it
3095 * to the registry *before* we ask the consumer so we avoid the race of the
3096 * consumer requesting the metadata and the ask_channel call on our side
3097 * did not returned yet.
3098 */
3099 registry->metadata_key = metadata->key;
3100
3101 /*
3102 * Ask the metadata channel creation to the consumer. The metadata object
3103 * will be created by the consumer and kept their. However, the stream is
3104 * never added or monitored until we do a first push metadata to the
3105 * consumer.
3106 */
3107 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
3108 registry);
3109 if (ret < 0) {
3110 /* Nullify the metadata key so we don't try to close it later on. */
3111 registry->metadata_key = 0;
3112 goto error_consumer;
3113 }
3114
3115 /*
3116 * The setup command will make the metadata stream be sent to the relayd,
3117 * if applicable, and the thread managing the metadatas. This is important
3118 * because after this point, if an error occurs, the only way the stream
3119 * can be deleted is to be monitored in the consumer.
3120 */
3121 ret = consumer_setup_metadata(socket, metadata->key);
3122 if (ret < 0) {
3123 /* Nullify the metadata key so we don't try to close it later on. */
3124 registry->metadata_key = 0;
3125 goto error_consumer;
3126 }
3127
3128 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3129 metadata->key, app->pid);
3130
3131 error_consumer:
3132 lttng_fd_put(LTTNG_FD_APPS, 1);
3133 delete_ust_app_channel(-1, metadata, app);
3134 error:
3135 pthread_mutex_unlock(&registry->lock);
3136 return ret;
3137 }
3138
3139 /*
3140 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3141 * acquired before calling this function.
3142 */
3143 struct ust_app *ust_app_find_by_pid(pid_t pid)
3144 {
3145 struct ust_app *app = NULL;
3146 struct lttng_ht_node_ulong *node;
3147 struct lttng_ht_iter iter;
3148
3149 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3150 node = lttng_ht_iter_get_node_ulong(&iter);
3151 if (node == NULL) {
3152 DBG2("UST app no found with pid %d", pid);
3153 goto error;
3154 }
3155
3156 DBG2("Found UST app by pid %d", pid);
3157
3158 app = caa_container_of(node, struct ust_app, pid_n);
3159
3160 error:
3161 return app;
3162 }
3163
3164 /*
3165 * Allocate and init an UST app object using the registration information and
3166 * the command socket. This is called when the command socket connects to the
3167 * session daemon.
3168 *
3169 * The object is returned on success or else NULL.
3170 */
3171 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3172 {
3173 struct ust_app *lta = NULL;
3174
3175 assert(msg);
3176 assert(sock >= 0);
3177
3178 DBG3("UST app creating application for socket %d", sock);
3179
3180 if ((msg->bits_per_long == 64 &&
3181 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3182 || (msg->bits_per_long == 32 &&
3183 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3184 ERR("Registration failed: application \"%s\" (pid: %d) has "
3185 "%d-bit long, but no consumerd for this size is available.\n",
3186 msg->name, msg->pid, msg->bits_per_long);
3187 goto error;
3188 }
3189
3190 lta = zmalloc(sizeof(struct ust_app));
3191 if (lta == NULL) {
3192 PERROR("malloc");
3193 goto error;
3194 }
3195
3196 lta->ppid = msg->ppid;
3197 lta->uid = msg->uid;
3198 lta->gid = msg->gid;
3199
3200 lta->bits_per_long = msg->bits_per_long;
3201 lta->uint8_t_alignment = msg->uint8_t_alignment;
3202 lta->uint16_t_alignment = msg->uint16_t_alignment;
3203 lta->uint32_t_alignment = msg->uint32_t_alignment;
3204 lta->uint64_t_alignment = msg->uint64_t_alignment;
3205 lta->long_alignment = msg->long_alignment;
3206 lta->byte_order = msg->byte_order;
3207
3208 lta->v_major = msg->major;
3209 lta->v_minor = msg->minor;
3210 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3211 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3212 lta->notify_sock = -1;
3213
3214 /* Copy name and make sure it's NULL terminated. */
3215 strncpy(lta->name, msg->name, sizeof(lta->name));
3216 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3217
3218 /*
3219 * Before this can be called, when receiving the registration information,
3220 * the application compatibility is checked. So, at this point, the
3221 * application can work with this session daemon.
3222 */
3223 lta->compatible = 1;
3224
3225 lta->pid = msg->pid;
3226 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3227 lta->sock = sock;
3228 pthread_mutex_init(&lta->sock_lock, NULL);
3229 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3230
3231 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3232 error:
3233 return lta;
3234 }
3235
3236 /*
3237 * For a given application object, add it to every hash table.
3238 */
3239 void ust_app_add(struct ust_app *app)
3240 {
3241 assert(app);
3242 assert(app->notify_sock >= 0);
3243
3244 rcu_read_lock();
3245
3246 /*
3247 * On a re-registration, we want to kick out the previous registration of
3248 * that pid
3249 */
3250 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3251
3252 /*
3253 * The socket _should_ be unique until _we_ call close. So, a add_unique
3254 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3255 * already in the table.
3256 */
3257 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3258
3259 /* Add application to the notify socket hash table. */
3260 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3261 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3262
3263 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3264 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3265 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3266 app->v_minor);
3267
3268 rcu_read_unlock();
3269 }
3270
3271 /*
3272 * Set the application version into the object.
3273 *
3274 * Return 0 on success else a negative value either an errno code or a
3275 * LTTng-UST error code.
3276 */
3277 int ust_app_version(struct ust_app *app)
3278 {
3279 int ret;
3280
3281 assert(app);
3282
3283 pthread_mutex_lock(&app->sock_lock);
3284 ret = ustctl_tracer_version(app->sock, &app->version);
3285 pthread_mutex_unlock(&app->sock_lock);
3286 if (ret < 0) {
3287 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3288 ERR("UST app %d version failed with ret %d", app->sock, ret);
3289 } else {
3290 DBG3("UST app %d version failed. Application is dead", app->sock);
3291 }
3292 }
3293
3294 return ret;
3295 }
3296
3297 /*
3298 * Unregister app by removing it from the global traceable app list and freeing
3299 * the data struct.
3300 *
3301 * The socket is already closed at this point so no close to sock.
3302 */
3303 void ust_app_unregister(int sock)
3304 {
3305 struct ust_app *lta;
3306 struct lttng_ht_node_ulong *node;
3307 struct lttng_ht_iter ust_app_sock_iter;
3308 struct lttng_ht_iter iter;
3309 struct ust_app_session *ua_sess;
3310 int ret;
3311
3312 rcu_read_lock();
3313
3314 /* Get the node reference for a call_rcu */
3315 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3316 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3317 assert(node);
3318
3319 lta = caa_container_of(node, struct ust_app, sock_n);
3320 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3321
3322 /*
3323 * For per-PID buffers, perform "push metadata" and flush all
3324 * application streams before removing app from hash tables,
3325 * ensuring proper behavior of data_pending check.
3326 * Remove sessions so they are not visible during deletion.
3327 */
3328 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3329 node.node) {
3330 struct ust_registry_session *registry;
3331
3332 ret = lttng_ht_del(lta->sessions, &iter);
3333 if (ret) {
3334 /* The session was already removed so scheduled for teardown. */
3335 continue;
3336 }
3337
3338 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3339 (void) ust_app_flush_app_session(lta, ua_sess);
3340 }
3341
3342 /*
3343 * Add session to list for teardown. This is safe since at this point we
3344 * are the only one using this list.
3345 */
3346 pthread_mutex_lock(&ua_sess->lock);
3347
3348 if (ua_sess->deleted) {
3349 pthread_mutex_unlock(&ua_sess->lock);
3350 continue;
3351 }
3352
3353 /*
3354 * Normally, this is done in the delete session process which is
3355 * executed in the call rcu below. However, upon registration we can't
3356 * afford to wait for the grace period before pushing data or else the
3357 * data pending feature can race between the unregistration and stop
3358 * command where the data pending command is sent *before* the grace
3359 * period ended.
3360 *
3361 * The close metadata below nullifies the metadata pointer in the
3362 * session so the delete session will NOT push/close a second time.
3363 */
3364 registry = get_session_registry(ua_sess);
3365 if (registry) {
3366 /* Push metadata for application before freeing the application. */
3367 (void) push_metadata(registry, ua_sess->consumer);
3368
3369 /*
3370 * Don't ask to close metadata for global per UID buffers. Close
3371 * metadata only on destroy trace session in this case. Also, the
3372 * previous push metadata could have flag the metadata registry to
3373 * close so don't send a close command if closed.
3374 */
3375 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3376 /* And ask to close it for this session registry. */
3377 (void) close_metadata(registry, ua_sess->consumer);
3378 }
3379 }
3380 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3381
3382 pthread_mutex_unlock(&ua_sess->lock);
3383 }
3384
3385 /* Remove application from PID hash table */
3386 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3387 assert(!ret);
3388
3389 /*
3390 * Remove application from notify hash table. The thread handling the
3391 * notify socket could have deleted the node so ignore on error because
3392 * either way it's valid. The close of that socket is handled by the other
3393 * thread.
3394 */
3395 iter.iter.node = &lta->notify_sock_n.node;
3396 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3397
3398 /*
3399 * Ignore return value since the node might have been removed before by an
3400 * add replace during app registration because the PID can be reassigned by
3401 * the OS.
3402 */
3403 iter.iter.node = &lta->pid_n.node;
3404 ret = lttng_ht_del(ust_app_ht, &iter);
3405 if (ret) {
3406 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3407 lta->pid);
3408 }
3409
3410 /* Free memory */
3411 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3412
3413 rcu_read_unlock();
3414 return;
3415 }
3416
3417 /*
3418 * Fill events array with all events name of all registered apps.
3419 */
3420 int ust_app_list_events(struct lttng_event **events)
3421 {
3422 int ret, handle;
3423 size_t nbmem, count = 0;
3424 struct lttng_ht_iter iter;
3425 struct ust_app *app;
3426 struct lttng_event *tmp_event;
3427
3428 nbmem = UST_APP_EVENT_LIST_SIZE;
3429 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3430 if (tmp_event == NULL) {
3431 PERROR("zmalloc ust app events");
3432 ret = -ENOMEM;
3433 goto error;
3434 }
3435
3436 rcu_read_lock();
3437
3438 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3439 struct lttng_ust_tracepoint_iter uiter;
3440
3441 health_code_update();
3442
3443 if (!app->compatible) {
3444 /*
3445 * TODO: In time, we should notice the caller of this error by
3446 * telling him that this is a version error.
3447 */
3448 continue;
3449 }
3450 pthread_mutex_lock(&app->sock_lock);
3451 handle = ustctl_tracepoint_list(app->sock);
3452 if (handle < 0) {
3453 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3454 ERR("UST app list events getting handle failed for app pid %d",
3455 app->pid);
3456 }
3457 pthread_mutex_unlock(&app->sock_lock);
3458 continue;
3459 }
3460
3461 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3462 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3463 /* Handle ustctl error. */
3464 if (ret < 0) {
3465 int release_ret;
3466
3467 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3468 ERR("UST app tp list get failed for app %d with ret %d",
3469 app->sock, ret);
3470 } else {
3471 DBG3("UST app tp list get failed. Application is dead");
3472 /*
3473 * This is normal behavior, an application can die during the
3474 * creation process. Don't report an error so the execution can
3475 * continue normally. Continue normal execution.
3476 */
3477 break;
3478 }
3479 free(tmp_event);
3480 release_ret = ustctl_release_handle(app->sock, handle);
3481 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3482 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3483 }
3484 pthread_mutex_unlock(&app->sock_lock);
3485 goto rcu_error;
3486 }
3487
3488 health_code_update();
3489 if (count >= nbmem) {
3490 /* In case the realloc fails, we free the memory */
3491 struct lttng_event *new_tmp_event;
3492 size_t new_nbmem;
3493
3494 new_nbmem = nbmem << 1;
3495 DBG2("Reallocating event list from %zu to %zu entries",
3496 nbmem, new_nbmem);
3497 new_tmp_event = realloc(tmp_event,
3498 new_nbmem * sizeof(struct lttng_event));
3499 if (new_tmp_event == NULL) {
3500 int release_ret;
3501
3502 PERROR("realloc ust app events");
3503 free(tmp_event);
3504 ret = -ENOMEM;
3505 release_ret = ustctl_release_handle(app->sock, handle);
3506 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3507 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3508 }
3509 pthread_mutex_unlock(&app->sock_lock);
3510 goto rcu_error;
3511 }
3512 /* Zero the new memory */
3513 memset(new_tmp_event + nbmem, 0,
3514 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3515 nbmem = new_nbmem;
3516 tmp_event = new_tmp_event;
3517 }
3518 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3519 tmp_event[count].loglevel = uiter.loglevel;
3520 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3521 tmp_event[count].pid = app->pid;
3522 tmp_event[count].enabled = -1;
3523 count++;
3524 }
3525 ret = ustctl_release_handle(app->sock, handle);
3526 pthread_mutex_unlock(&app->sock_lock);
3527 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3528 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3529 }
3530 }
3531
3532 ret = count;
3533 *events = tmp_event;
3534
3535 DBG2("UST app list events done (%zu events)", count);
3536
3537 rcu_error:
3538 rcu_read_unlock();
3539 error:
3540 health_code_update();
3541 return ret;
3542 }
3543
3544 /*
3545 * Fill events array with all events name of all registered apps.
3546 */
3547 int ust_app_list_event_fields(struct lttng_event_field **fields)
3548 {
3549 int ret, handle;
3550 size_t nbmem, count = 0;
3551 struct lttng_ht_iter iter;
3552 struct ust_app *app;
3553 struct lttng_event_field *tmp_event;
3554
3555 nbmem = UST_APP_EVENT_LIST_SIZE;
3556 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3557 if (tmp_event == NULL) {
3558 PERROR("zmalloc ust app event fields");
3559 ret = -ENOMEM;
3560 goto error;
3561 }
3562
3563 rcu_read_lock();
3564
3565 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3566 struct lttng_ust_field_iter uiter;
3567
3568 health_code_update();
3569
3570 if (!app->compatible) {
3571 /*
3572 * TODO: In time, we should notice the caller of this error by
3573 * telling him that this is a version error.
3574 */
3575 continue;
3576 }
3577 pthread_mutex_lock(&app->sock_lock);
3578 handle = ustctl_tracepoint_field_list(app->sock);
3579 if (handle < 0) {
3580 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3581 ERR("UST app list field getting handle failed for app pid %d",
3582 app->pid);
3583 }
3584 pthread_mutex_unlock(&app->sock_lock);
3585 continue;
3586 }
3587
3588 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3589 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3590 /* Handle ustctl error. */
3591 if (ret < 0) {
3592 int release_ret;
3593
3594 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3595 ERR("UST app tp list field failed for app %d with ret %d",
3596 app->sock, ret);
3597 } else {
3598 DBG3("UST app tp list field failed. Application is dead");
3599 /*
3600 * This is normal behavior, an application can die during the
3601 * creation process. Don't report an error so the execution can
3602 * continue normally. Reset list and count for next app.
3603 */
3604 break;
3605 }
3606 free(tmp_event);
3607 release_ret = ustctl_release_handle(app->sock, handle);
3608 pthread_mutex_unlock(&app->sock_lock);
3609 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3610 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3611 }
3612 goto rcu_error;
3613 }
3614
3615 health_code_update();
3616 if (count >= nbmem) {
3617 /* In case the realloc fails, we free the memory */
3618 struct lttng_event_field *new_tmp_event;
3619 size_t new_nbmem;
3620
3621 new_nbmem = nbmem << 1;
3622 DBG2("Reallocating event field list from %zu to %zu entries",
3623 nbmem, new_nbmem);
3624 new_tmp_event = realloc(tmp_event,
3625 new_nbmem * sizeof(struct lttng_event_field));
3626 if (new_tmp_event == NULL) {
3627 int release_ret;
3628
3629 PERROR("realloc ust app event fields");
3630 free(tmp_event);
3631 ret = -ENOMEM;
3632 release_ret = ustctl_release_handle(app->sock, handle);
3633 pthread_mutex_unlock(&app->sock_lock);
3634 if (release_ret != -LTTNG_UST_ERR_EXITING && release_ret != -EPIPE) {
3635 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3636 }
3637 goto rcu_error;
3638 }
3639 /* Zero the new memory */
3640 memset(new_tmp_event + nbmem, 0,
3641 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3642 nbmem = new_nbmem;
3643 tmp_event = new_tmp_event;
3644 }
3645
3646 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3647 /* Mapping between these enums matches 1 to 1. */
3648 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3649 tmp_event[count].nowrite = uiter.nowrite;
3650
3651 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3652 tmp_event[count].event.loglevel = uiter.loglevel;
3653 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3654 tmp_event[count].event.pid = app->pid;
3655 tmp_event[count].event.enabled = -1;
3656 count++;
3657 }
3658 ret = ustctl_release_handle(app->sock, handle);
3659 pthread_mutex_unlock(&app->sock_lock);
3660 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3661 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3662 }
3663 }
3664
3665 ret = count;
3666 *fields = tmp_event;
3667
3668 DBG2("UST app list event fields done (%zu events)", count);
3669
3670 rcu_error:
3671 rcu_read_unlock();
3672 error:
3673 health_code_update();
3674 return ret;
3675 }
3676
3677 /*
3678 * Free and clean all traceable apps of the global list.
3679 *
3680 * Should _NOT_ be called with RCU read-side lock held.
3681 */
3682 void ust_app_clean_list(void)
3683 {
3684 int ret;
3685 struct ust_app *app;
3686 struct lttng_ht_iter iter;
3687
3688 DBG2("UST app cleaning registered apps hash table");
3689
3690 rcu_read_lock();
3691
3692 if (ust_app_ht) {
3693 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3694 ret = lttng_ht_del(ust_app_ht, &iter);
3695 assert(!ret);
3696 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3697 }
3698 }
3699
3700 /* Cleanup socket hash table */
3701 if (ust_app_ht_by_sock) {
3702 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3703 sock_n.node) {
3704 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3705 assert(!ret);
3706 }
3707 }
3708
3709 /* Cleanup notify socket hash table */
3710 if (ust_app_ht_by_notify_sock) {
3711 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3712 notify_sock_n.node) {
3713 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3714 assert(!ret);
3715 }
3716 }
3717 rcu_read_unlock();
3718
3719 /* Destroy is done only when the ht is empty */
3720 if (ust_app_ht) {
3721 ht_cleanup_push(ust_app_ht);
3722 }
3723 if (ust_app_ht_by_sock) {
3724 ht_cleanup_push(ust_app_ht_by_sock);
3725 }
3726 if (ust_app_ht_by_notify_sock) {
3727 ht_cleanup_push(ust_app_ht_by_notify_sock);
3728 }
3729 }
3730
3731 /*
3732 * Init UST app hash table.
3733 */
3734 int ust_app_ht_alloc(void)
3735 {
3736 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3737 if (!ust_app_ht) {
3738 return -1;
3739 }
3740 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3741 if (!ust_app_ht_by_sock) {
3742 return -1;
3743 }
3744 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3745 if (!ust_app_ht_by_notify_sock) {
3746 return -1;
3747 }
3748 return 0;
3749 }
3750
3751 /*
3752 * For a specific UST session, disable the channel for all registered apps.
3753 */
3754 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3755 struct ltt_ust_channel *uchan)
3756 {
3757 int ret = 0;
3758 struct lttng_ht_iter iter;
3759 struct lttng_ht_node_str *ua_chan_node;
3760 struct ust_app *app;
3761 struct ust_app_session *ua_sess;
3762 struct ust_app_channel *ua_chan;
3763
3764 if (usess == NULL || uchan == NULL) {
3765 ERR("Disabling UST global channel with NULL values");
3766 ret = -1;
3767 goto error;
3768 }
3769
3770 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3771 uchan->name, usess->id);
3772
3773 rcu_read_lock();
3774
3775 /* For every registered applications */
3776 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3777 struct lttng_ht_iter uiter;
3778 if (!app->compatible) {
3779 /*
3780 * TODO: In time, we should notice the caller of this error by
3781 * telling him that this is a version error.
3782 */
3783 continue;
3784 }
3785 ua_sess = lookup_session_by_app(usess, app);
3786 if (ua_sess == NULL) {
3787 continue;
3788 }
3789
3790 /* Get channel */
3791 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3792 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3793 /* If the session if found for the app, the channel must be there */
3794 assert(ua_chan_node);
3795
3796 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3797 /* The channel must not be already disabled */
3798 assert(ua_chan->enabled == 1);
3799
3800 /* Disable channel onto application */
3801 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3802 if (ret < 0) {
3803 /* XXX: We might want to report this error at some point... */
3804 continue;
3805 }
3806 }
3807
3808 rcu_read_unlock();
3809
3810 error:
3811 return ret;
3812 }
3813
3814 /*
3815 * For a specific UST session, enable the channel for all registered apps.
3816 */
3817 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3818 struct ltt_ust_channel *uchan)
3819 {
3820 int ret = 0;
3821 struct lttng_ht_iter iter;
3822 struct ust_app *app;
3823 struct ust_app_session *ua_sess;
3824
3825 if (usess == NULL || uchan == NULL) {
3826 ERR("Adding UST global channel to NULL values");
3827 ret = -1;
3828 goto error;
3829 }
3830
3831 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3832 uchan->name, usess->id);
3833
3834 rcu_read_lock();
3835
3836 /* For every registered applications */
3837 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3838 if (!app->compatible) {
3839 /*
3840 * TODO: In time, we should notice the caller of this error by
3841 * telling him that this is a version error.
3842 */
3843 continue;
3844 }
3845 ua_sess = lookup_session_by_app(usess, app);
3846 if (ua_sess == NULL) {
3847 continue;
3848 }
3849
3850 /* Enable channel onto application */
3851 ret = enable_ust_app_channel(ua_sess, uchan, app);
3852 if (ret < 0) {
3853 /* XXX: We might want to report this error at some point... */
3854 continue;
3855 }
3856 }
3857
3858 rcu_read_unlock();
3859
3860 error:
3861 return ret;
3862 }
3863
3864 /*
3865 * Disable an event in a channel and for a specific session.
3866 */
3867 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3868 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3869 {
3870 int ret = 0;
3871 struct lttng_ht_iter iter, uiter;
3872 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3873 struct ust_app *app;
3874 struct ust_app_session *ua_sess;
3875 struct ust_app_channel *ua_chan;
3876 struct ust_app_event *ua_event;
3877
3878 DBG("UST app disabling event %s for all apps in channel "
3879 "%s for session id %" PRIu64,
3880 uevent->attr.name, uchan->name, usess->id);
3881
3882 rcu_read_lock();
3883
3884 /* For all registered applications */
3885 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3886 if (!app->compatible) {
3887 /*
3888 * TODO: In time, we should notice the caller of this error by
3889 * telling him that this is a version error.
3890 */
3891 continue;
3892 }
3893 ua_sess = lookup_session_by_app(usess, app);
3894 if (ua_sess == NULL) {
3895 /* Next app */
3896 continue;
3897 }
3898
3899 /* Lookup channel in the ust app session */
3900 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3901 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3902 if (ua_chan_node == NULL) {
3903 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3904 "Skipping", uchan->name, usess->id, app->pid);
3905 continue;
3906 }
3907 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3908
3909 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3910 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3911 if (ua_event_node == NULL) {
3912 DBG2("Event %s not found in channel %s for app pid %d."
3913 "Skipping", uevent->attr.name, uchan->name, app->pid);
3914 continue;
3915 }
3916 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3917
3918 ret = disable_ust_app_event(ua_sess, ua_event, app);
3919 if (ret < 0) {
3920 /* XXX: Report error someday... */
3921 continue;
3922 }
3923 }
3924
3925 rcu_read_unlock();
3926
3927 return ret;
3928 }
3929
3930 /*
3931 * For a specific UST session, create the channel for all registered apps.
3932 */
3933 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3934 struct ltt_ust_channel *uchan)
3935 {
3936 int ret = 0, created;
3937 struct lttng_ht_iter iter;
3938 struct ust_app *app;
3939 struct ust_app_session *ua_sess = NULL;
3940
3941 /* Very wrong code flow */
3942 assert(usess);
3943 assert(uchan);
3944
3945 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3946 uchan->name, usess->id);
3947
3948 rcu_read_lock();
3949
3950 /* For every registered applications */
3951 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3952 if (!app->compatible) {
3953 /*
3954 * TODO: In time, we should notice the caller of this error by
3955 * telling him that this is a version error.
3956 */
3957 continue;
3958 }
3959 if (!trace_ust_pid_tracker_lookup(usess, app->pid)) {
3960 /* Skip. */
3961 continue;
3962 }
3963
3964 /*
3965 * Create session on the tracer side and add it to app session HT. Note
3966 * that if session exist, it will simply return a pointer to the ust
3967 * app session.
3968 */
3969 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3970 if (ret < 0) {
3971 switch (ret) {
3972 case -ENOTCONN:
3973 /*
3974 * The application's socket is not valid. Either a bad socket
3975 * or a timeout on it. We can't inform the caller that for a
3976 * specific app, the session failed so lets continue here.
3977 */
3978 ret = 0; /* Not an error. */
3979 continue;
3980 case -ENOMEM:
3981 default:
3982 goto error_rcu_unlock;
3983 }
3984 }
3985 assert(ua_sess);
3986
3987 pthread_mutex_lock(&ua_sess->lock);
3988
3989 if (ua_sess->deleted) {
3990 pthread_mutex_unlock(&ua_sess->lock);
3991 continue;
3992 }
3993
3994 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3995 sizeof(uchan->name))) {
3996 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3997 ret = 0;
3998 } else {
3999 /* Create channel onto application. We don't need the chan ref. */
4000 ret = create_ust_app_channel(ua_sess, uchan, app,
4001 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
4002 }
4003 pthread_mutex_unlock(&ua_sess->lock);
4004 if (ret < 0) {
4005 /* Cleanup the created session if it's the case. */
4006 if (created) {
4007 destroy_app_session(app, ua_sess);
4008 }
4009 switch (ret) {
4010 case -ENOTCONN:
4011 /*
4012 * The application's socket is not valid. Either a bad socket
4013 * or a timeout on it. We can't inform the caller that for a
4014 * specific app, the session failed so lets continue here.
4015 */
4016 ret = 0; /* Not an error. */
4017 continue;
4018 case -ENOMEM:
4019 default:
4020 goto error_rcu_unlock;
4021 }
4022 }
4023 }
4024
4025 error_rcu_unlock:
4026 rcu_read_unlock();
4027 return ret;
4028 }
4029
4030 /*
4031 * Enable event for a specific session and channel on the tracer.
4032 */
4033 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
4034 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4035 {
4036 int ret = 0;
4037 struct lttng_ht_iter iter, uiter;
4038 struct lttng_ht_node_str *ua_chan_node;
4039 struct ust_app *app;
4040 struct ust_app_session *ua_sess;
4041 struct ust_app_channel *ua_chan;
4042 struct ust_app_event *ua_event;
4043
4044 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
4045 uevent->attr.name, usess->id);
4046
4047 /*
4048 * NOTE: At this point, this function is called only if the session and
4049 * channel passed are already created for all apps. and enabled on the
4050 * tracer also.
4051 */
4052
4053 rcu_read_lock();
4054
4055 /* For all registered applications */
4056 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4057 if (!app->compatible) {
4058 /*
4059 * TODO: In time, we should notice the caller of this error by
4060 * telling him that this is a version error.
4061 */
4062 continue;
4063 }
4064 ua_sess = lookup_session_by_app(usess, app);
4065 if (!ua_sess) {
4066 /* The application has problem or is probably dead. */
4067 continue;
4068 }
4069
4070 pthread_mutex_lock(&ua_sess->lock);
4071
4072 if (ua_sess->deleted) {
4073 pthread_mutex_unlock(&ua_sess->lock);
4074 continue;
4075 }
4076
4077 /* Lookup channel in the ust app session */
4078 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4079 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4080 /*
4081 * It is possible that the channel cannot be found is
4082 * the channel/event creation occurs concurrently with
4083 * an application exit.
4084 */
4085 if (!ua_chan_node) {
4086 pthread_mutex_unlock(&ua_sess->lock);
4087 continue;
4088 }
4089
4090 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4091
4092 /* Get event node */
4093 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4094 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4095 if (ua_event == NULL) {
4096 DBG3("UST app enable event %s not found for app PID %d."
4097 "Skipping app", uevent->attr.name, app->pid);
4098 goto next_app;
4099 }
4100
4101 ret = enable_ust_app_event(ua_sess, ua_event, app);
4102 if (ret < 0) {
4103 pthread_mutex_unlock(&ua_sess->lock);
4104 goto error;
4105 }
4106 next_app:
4107 pthread_mutex_unlock(&ua_sess->lock);
4108 }
4109
4110 error:
4111 rcu_read_unlock();
4112 return ret;
4113 }
4114
4115 /*
4116 * For a specific existing UST session and UST channel, creates the event for
4117 * all registered apps.
4118 */
4119 int ust_app_create_event_glb(struct ltt_ust_session *usess,
4120 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4121 {
4122 int ret = 0;
4123 struct lttng_ht_iter iter, uiter;
4124 struct lttng_ht_node_str *ua_chan_node;
4125 struct ust_app *app;
4126 struct ust_app_session *ua_sess;
4127 struct ust_app_channel *ua_chan;
4128
4129 DBG("UST app creating event %s for all apps for session id %" PRIu64,
4130 uevent->attr.name, usess->id);
4131
4132 rcu_read_lock();
4133
4134 /* For all registered applications */
4135 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4136 if (!app->compatible) {
4137 /*
4138 * TODO: In time, we should notice the caller of this error by
4139 * telling him that this is a version error.
4140 */
4141 continue;
4142 }
4143 ua_sess = lookup_session_by_app(usess, app);
4144 if (!ua_sess) {
4145 /* The application has problem or is probably dead. */
4146 continue;
4147 }
4148
4149 pthread_mutex_lock(&ua_sess->lock);
4150
4151 if (ua_sess->deleted) {
4152 pthread_mutex_unlock(&ua_sess->lock);
4153 continue;
4154 }
4155
4156 /* Lookup channel in the ust app session */
4157 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4158 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4159 /* If the channel is not found, there is a code flow error */
4160 assert(ua_chan_node);
4161
4162 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4163
4164 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4165 pthread_mutex_unlock(&ua_sess->lock);
4166 if (ret < 0) {
4167 if (ret != -LTTNG_UST_ERR_EXIST) {
4168 /* Possible value at this point: -ENOMEM. If so, we stop! */
4169 break;
4170 }
4171 DBG2("UST app event %s already exist on app PID %d",
4172 uevent->attr.name, app->pid);
4173 continue;
4174 }
4175 }
4176
4177 rcu_read_unlock();
4178
4179 return ret;
4180 }
4181
4182 /*
4183 * Start tracing for a specific UST session and app.
4184 */
4185 static
4186 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
4187 {
4188 int ret = 0;
4189 struct ust_app_session *ua_sess;
4190
4191 DBG("Starting tracing for ust app pid %d", app->pid);
4192
4193 rcu_read_lock();
4194
4195 if (!app->compatible) {
4196 goto end;
4197 }
4198
4199 ua_sess = lookup_session_by_app(usess, app);
4200 if (ua_sess == NULL) {
4201 /* The session is in teardown process. Ignore and continue. */
4202 goto end;
4203 }
4204
4205 pthread_mutex_lock(&ua_sess->lock);
4206
4207 if (ua_sess->deleted) {
4208 pthread_mutex_unlock(&ua_sess->lock);
4209 goto end;
4210 }
4211
4212 /* Upon restart, we skip the setup, already done */
4213 if (ua_sess->started) {
4214 goto skip_setup;
4215 }
4216
4217 /* Create directories if consumer is LOCAL and has a path defined. */
4218 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
4219 strlen(usess->consumer->dst.trace_path) > 0) {
4220 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
4221 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
4222 if (ret < 0) {
4223 if (errno != EEXIST) {
4224 ERR("Trace directory creation error");
4225 goto error_unlock;
4226 }
4227 }
4228 }
4229
4230 /*
4231 * Create the metadata for the application. This returns gracefully if a
4232 * metadata was already set for the session.
4233 */
4234 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4235 if (ret < 0) {
4236 goto error_unlock;
4237 }
4238
4239 health_code_update();
4240
4241 skip_setup:
4242 /* This start the UST tracing */
4243 pthread_mutex_lock(&app->sock_lock);
4244 ret = ustctl_start_session(app->sock, ua_sess->handle);
4245 pthread_mutex_unlock(&app->sock_lock);
4246 if (ret < 0) {
4247 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4248 ERR("Error starting tracing for app pid: %d (ret: %d)",
4249 app->pid, ret);
4250 } else {
4251 DBG("UST app start session failed. Application is dead.");
4252 /*
4253 * This is normal behavior, an application can die during the
4254 * creation process. Don't report an error so the execution can
4255 * continue normally.
4256 */
4257 pthread_mutex_unlock(&ua_sess->lock);
4258 goto end;
4259 }
4260 goto error_unlock;
4261 }
4262
4263 /* Indicate that the session has been started once */
4264 ua_sess->started = 1;
4265
4266 pthread_mutex_unlock(&ua_sess->lock);
4267
4268 health_code_update();
4269
4270 /* Quiescent wait after starting trace */
4271 pthread_mutex_lock(&app->sock_lock);
4272 ret = ustctl_wait_quiescent(app->sock);
4273 pthread_mutex_unlock(&app->sock_lock);
4274 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4275 ERR("UST app wait quiescent failed for app pid %d ret %d",
4276 app->pid, ret);
4277 }
4278
4279 end:
4280 rcu_read_unlock();
4281 health_code_update();
4282 return 0;
4283
4284 error_unlock:
4285 pthread_mutex_unlock(&ua_sess->lock);
4286 rcu_read_unlock();
4287 health_code_update();
4288 return -1;
4289 }
4290
4291 /*
4292 * Stop tracing for a specific UST session and app.
4293 */
4294 static
4295 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4296 {
4297 int ret = 0;
4298 struct ust_app_session *ua_sess;
4299 struct ust_registry_session *registry;
4300
4301 DBG("Stopping tracing for ust app pid %d", app->pid);
4302
4303 rcu_read_lock();
4304
4305 if (!app->compatible) {
4306 goto end_no_session;
4307 }
4308
4309 ua_sess = lookup_session_by_app(usess, app);
4310 if (ua_sess == NULL) {
4311 goto end_no_session;
4312 }
4313
4314 pthread_mutex_lock(&ua_sess->lock);
4315
4316 if (ua_sess->deleted) {
4317 pthread_mutex_unlock(&ua_sess->lock);
4318 goto end_no_session;
4319 }
4320
4321 /*
4322 * If started = 0, it means that stop trace has been called for a session
4323 * that was never started. It's possible since we can have a fail start
4324 * from either the application manager thread or the command thread. Simply
4325 * indicate that this is a stop error.
4326 */
4327 if (!ua_sess->started) {
4328 goto error_rcu_unlock;
4329 }
4330
4331 health_code_update();
4332
4333 /* This inhibits UST tracing */
4334 pthread_mutex_lock(&app->sock_lock);
4335 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4336 pthread_mutex_unlock(&app->sock_lock);
4337 if (ret < 0) {
4338 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4339 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4340 app->pid, ret);
4341 } else {
4342 DBG("UST app stop session failed. Application is dead.");
4343 /*
4344 * This is normal behavior, an application can die during the
4345 * creation process. Don't report an error so the execution can
4346 * continue normally.
4347 */
4348 goto end_unlock;
4349 }
4350 goto error_rcu_unlock;
4351 }
4352
4353 health_code_update();
4354
4355 /* Quiescent wait after stopping trace */
4356 pthread_mutex_lock(&app->sock_lock);
4357 ret = ustctl_wait_quiescent(app->sock);
4358 pthread_mutex_unlock(&app->sock_lock);
4359 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4360 ERR("UST app wait quiescent failed for app pid %d ret %d",
4361 app->pid, ret);
4362 }
4363
4364 health_code_update();
4365
4366 registry = get_session_registry(ua_sess);
4367 assert(registry);
4368
4369 /* Push metadata for application before freeing the application. */
4370 (void) push_metadata(registry, ua_sess->consumer);
4371
4372 end_unlock:
4373 pthread_mutex_unlock(&ua_sess->lock);
4374 end_no_session:
4375 rcu_read_unlock();
4376 health_code_update();
4377 return 0;
4378
4379 error_rcu_unlock:
4380 pthread_mutex_unlock(&ua_sess->lock);
4381 rcu_read_unlock();
4382 health_code_update();
4383 return -1;
4384 }
4385
4386 static
4387 int ust_app_flush_app_session(struct ust_app *app,
4388 struct ust_app_session *ua_sess)
4389 {
4390 int ret, retval = 0;
4391 struct lttng_ht_iter iter;
4392 struct ust_app_channel *ua_chan;
4393 struct consumer_socket *socket;
4394
4395 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4396
4397 rcu_read_lock();
4398
4399 if (!app->compatible) {
4400 goto end_not_compatible;
4401 }
4402
4403 pthread_mutex_lock(&ua_sess->lock);
4404
4405 if (ua_sess->deleted) {
4406 goto end_deleted;
4407 }
4408
4409 health_code_update();
4410
4411 /* Flushing buffers */
4412 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4413 ua_sess->consumer);
4414
4415 /* Flush buffers and push metadata. */
4416 switch (ua_sess->buffer_type) {
4417 case LTTNG_BUFFER_PER_PID:
4418 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4419 node.node) {
4420 health_code_update();
4421 assert(ua_chan->is_sent);
4422 ret = consumer_flush_channel(socket, ua_chan->key);
4423 if (ret) {
4424 ERR("Error flushing consumer channel");
4425 retval = -1;
4426 continue;
4427 }
4428 }
4429 break;
4430 case LTTNG_BUFFER_PER_UID:
4431 default:
4432 assert(0);
4433 break;
4434 }
4435
4436 health_code_update();
4437
4438 end_deleted:
4439 pthread_mutex_unlock(&ua_sess->lock);
4440
4441 end_not_compatible:
4442 rcu_read_unlock();
4443 health_code_update();
4444 return retval;
4445 }
4446
4447 /*
4448 * Flush buffers for all applications for a specific UST session.
4449 * Called with UST session lock held.
4450 */
4451 static
4452 int ust_app_flush_session(struct ltt_ust_session *usess)
4453
4454 {
4455 int ret = 0;
4456
4457 DBG("Flushing session buffers for all ust apps");
4458
4459 rcu_read_lock();
4460
4461 /* Flush buffers and push metadata. */
4462 switch (usess->buffer_type) {
4463 case LTTNG_BUFFER_PER_UID:
4464 {
4465 struct buffer_reg_uid *reg;
4466 struct lttng_ht_iter iter;
4467
4468 /* Flush all per UID buffers associated to that session. */
4469 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4470 struct ust_registry_session *ust_session_reg;
4471 struct buffer_reg_channel *reg_chan;
4472 struct consumer_socket *socket;
4473
4474 /* Get consumer socket to use to push the metadata.*/
4475 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4476 usess->consumer);
4477 if (!socket) {
4478 /* Ignore request if no consumer is found for the session. */
4479 continue;
4480 }
4481
4482 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4483 reg_chan, node.node) {
4484 /*
4485 * The following call will print error values so the return
4486 * code is of little importance because whatever happens, we
4487 * have to try them all.
4488 */
4489 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4490 }
4491
4492 ust_session_reg = reg->registry->reg.ust;
4493 /* Push metadata. */
4494 (void) push_metadata(ust_session_reg, usess->consumer);
4495 }
4496 break;
4497 }
4498 case LTTNG_BUFFER_PER_PID:
4499 {
4500 struct ust_app_session *ua_sess;
4501 struct lttng_ht_iter iter;
4502 struct ust_app *app;
4503
4504 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4505 ua_sess = lookup_session_by_app(usess, app);
4506 if (ua_sess == NULL) {
4507 continue;
4508 }
4509 (void) ust_app_flush_app_session(app, ua_sess);
4510 }
4511 break;
4512 }
4513 default:
4514 ret = -1;
4515 assert(0);
4516 break;
4517 }
4518
4519 rcu_read_unlock();
4520 health_code_update();
4521 return ret;
4522 }
4523
4524 /*
4525 * Destroy a specific UST session in apps.
4526 */
4527 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4528 {
4529 int ret;
4530 struct ust_app_session *ua_sess;
4531 struct lttng_ht_iter iter;
4532 struct lttng_ht_node_u64 *node;
4533
4534 DBG("Destroy tracing for ust app pid %d", app->pid);
4535
4536 rcu_read_lock();
4537
4538 if (!app->compatible) {
4539 goto end;
4540 }
4541
4542 __lookup_session_by_app(usess, app, &iter);
4543 node = lttng_ht_iter_get_node_u64(&iter);
4544 if (node == NULL) {
4545 /* Session is being or is deleted. */
4546 goto end;
4547 }
4548 ua_sess = caa_container_of(node, struct ust_app_session, node);
4549
4550 health_code_update();
4551 destroy_app_session(app, ua_sess);
4552
4553 health_code_update();
4554
4555 /* Quiescent wait after stopping trace */
4556 pthread_mutex_lock(&app->sock_lock);
4557 ret = ustctl_wait_quiescent(app->sock);
4558 pthread_mutex_unlock(&app->sock_lock);
4559 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4560 ERR("UST app wait quiescent failed for app pid %d ret %d",
4561 app->pid, ret);
4562 }
4563 end:
4564 rcu_read_unlock();
4565 health_code_update();
4566 return 0;
4567 }
4568
4569 /*
4570 * Start tracing for the UST session.
4571 */
4572 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4573 {
4574 int ret = 0;
4575 struct lttng_ht_iter iter;
4576 struct ust_app *app;
4577
4578 DBG("Starting all UST traces");
4579
4580 rcu_read_lock();
4581
4582 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4583 ret = ust_app_start_trace(usess, app);
4584 if (ret < 0) {
4585 /* Continue to next apps even on error */
4586 continue;
4587 }
4588 }
4589
4590 rcu_read_unlock();
4591
4592 return 0;
4593 }
4594
4595 /*
4596 * Start tracing for the UST session.
4597 * Called with UST session lock held.
4598 */
4599 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4600 {
4601 int ret = 0;
4602 struct lttng_ht_iter iter;
4603 struct ust_app *app;
4604
4605 DBG("Stopping all UST traces");
4606
4607 rcu_read_lock();
4608
4609 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4610 ret = ust_app_stop_trace(usess, app);
4611 if (ret < 0) {
4612 /* Continue to next apps even on error */
4613 continue;
4614 }
4615 }
4616
4617 (void) ust_app_flush_session(usess);
4618
4619 rcu_read_unlock();
4620
4621 return 0;
4622 }
4623
4624 /*
4625 * Destroy app UST session.
4626 */
4627 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4628 {
4629 int ret = 0;
4630 struct lttng_ht_iter iter;
4631 struct ust_app *app;
4632
4633 DBG("Destroy all UST traces");
4634
4635 rcu_read_lock();
4636
4637 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4638 ret = destroy_trace(usess, app);
4639 if (ret < 0) {
4640 /* Continue to next apps even on error */
4641 continue;
4642 }
4643 }
4644
4645 rcu_read_unlock();
4646
4647 return 0;
4648 }
4649
4650 static
4651 void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
4652 {
4653 int ret = 0;
4654 struct lttng_ht_iter iter, uiter;
4655 struct ust_app_session *ua_sess = NULL;
4656 struct ust_app_channel *ua_chan;
4657 struct ust_app_event *ua_event;
4658 struct ust_app_ctx *ua_ctx;
4659 int is_created = 0;
4660
4661 ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
4662 if (ret < 0) {
4663 /* Tracer is probably gone or ENOMEM. */
4664 goto error;
4665 }
4666 if (!is_created) {
4667 /* App session already created. */
4668 goto end;
4669 }
4670 assert(ua_sess);
4671
4672 pthread_mutex_lock(&ua_sess->lock);
4673
4674 if (ua_sess->deleted) {
4675 pthread_mutex_unlock(&ua_sess->lock);
4676 goto end;
4677 }
4678
4679 /*
4680 * We can iterate safely here over all UST app session since the create ust
4681 * app session above made a shadow copy of the UST global domain from the
4682 * ltt ust session.
4683 */
4684 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4685 node.node) {
4686 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4687 if (ret < 0 && ret != -ENOTCONN) {
4688 /*
4689 * Stop everything. On error, the application
4690 * failed, no more file descriptor are available
4691 * or ENOMEM so stopping here is the only thing
4692 * we can do for now. The only exception is
4693 * -ENOTCONN, which indicates that the application
4694 * has exit.
4695 */
4696 goto error_unlock;
4697 }
4698
4699 /*
4700 * Add context using the list so they are enabled in the same order the
4701 * user added them.
4702 */
4703 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4704 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4705 if (ret < 0) {
4706 goto error_unlock;
4707 }
4708 }
4709
4710
4711 /* For each events */
4712 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4713 node.node) {
4714 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4715 if (ret < 0) {
4716 goto error_unlock;
4717 }
4718 }
4719 }
4720
4721 pthread_mutex_unlock(&ua_sess->lock);
4722
4723 if (usess->active) {
4724 ret = ust_app_start_trace(usess, app);
4725 if (ret < 0) {
4726 goto error;
4727 }
4728
4729 DBG2("UST trace started for app pid %d", app->pid);
4730 }
4731 end:
4732 /* Everything went well at this point. */
4733 return;
4734
4735 error_unlock:
4736 pthread_mutex_unlock(&ua_sess->lock);
4737 error:
4738 if (ua_sess) {
4739 destroy_app_session(app, ua_sess);
4740 }
4741 return;
4742 }
4743
4744 static
4745 void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
4746 {
4747 struct ust_app_session *ua_sess;
4748
4749 ua_sess = lookup_session_by_app(usess, app);
4750 if (ua_sess == NULL) {
4751 return;
4752 }
4753 destroy_app_session(app, ua_sess);
4754 }
4755
4756 /*
4757 * Add channels/events from UST global domain to registered apps at sock.
4758 *
4759 * Called with session lock held.
4760 * Called with RCU read-side lock held.
4761 */
4762 void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
4763 {
4764 assert(usess);
4765
4766 DBG2("UST app global update for app sock %d for session id %" PRIu64,
4767 app->sock, usess->id);
4768
4769 if (!app->compatible) {
4770 return;
4771 }
4772
4773 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
4774 ust_app_global_create(usess, app);
4775 } else {
4776 ust_app_global_destroy(usess, app);
4777 }
4778 }
4779
4780 /*
4781 * Called with session lock held.
4782 */
4783 void ust_app_global_update_all(struct ltt_ust_session *usess)
4784 {
4785 struct lttng_ht_iter iter;
4786 struct ust_app *app;
4787
4788 rcu_read_lock();
4789 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4790 ust_app_global_update(usess, app);
4791 }
4792 rcu_read_unlock();
4793 }
4794
4795 /*
4796 * Add context to a specific channel for global UST domain.
4797 */
4798 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4799 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4800 {
4801 int ret = 0;
4802 struct lttng_ht_node_str *ua_chan_node;
4803 struct lttng_ht_iter iter, uiter;
4804 struct ust_app_channel *ua_chan = NULL;
4805 struct ust_app_session *ua_sess;
4806 struct ust_app *app;
4807
4808 rcu_read_lock();
4809
4810 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4811 if (!app->compatible) {
4812 /*
4813 * TODO: In time, we should notice the caller of this error by
4814 * telling him that this is a version error.
4815 */
4816 continue;
4817 }
4818 ua_sess = lookup_session_by_app(usess, app);
4819 if (ua_sess == NULL) {
4820 continue;
4821 }
4822
4823 pthread_mutex_lock(&ua_sess->lock);
4824
4825 if (ua_sess->deleted) {
4826 pthread_mutex_unlock(&ua_sess->lock);
4827 continue;
4828 }
4829
4830 /* Lookup channel in the ust app session */
4831 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4832 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4833 if (ua_chan_node == NULL) {
4834 goto next_app;
4835 }
4836 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4837 node);
4838 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4839 if (ret < 0) {
4840 goto next_app;
4841 }
4842 next_app:
4843 pthread_mutex_unlock(&ua_sess->lock);
4844 }
4845
4846 rcu_read_unlock();
4847 return ret;
4848 }
4849
4850 /*
4851 * Enable event for a channel from a UST session for a specific PID.
4852 */
4853 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4854 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4855 {
4856 int ret = 0;
4857 struct lttng_ht_iter iter;
4858 struct lttng_ht_node_str *ua_chan_node;
4859 struct ust_app *app;
4860 struct ust_app_session *ua_sess;
4861 struct ust_app_channel *ua_chan;
4862 struct ust_app_event *ua_event;
4863
4864 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4865
4866 rcu_read_lock();
4867
4868 app = ust_app_find_by_pid(pid);
4869 if (app == NULL) {
4870 ERR("UST app enable event per PID %d not found", pid);
4871 ret = -1;
4872 goto end;
4873 }
4874
4875 if (!app->compatible) {
4876 ret = 0;
4877 goto end;
4878 }
4879
4880 ua_sess = lookup_session_by_app(usess, app);
4881 if (!ua_sess) {
4882 /* The application has problem or is probably dead. */
4883 ret = 0;
4884 goto end;
4885 }
4886
4887 pthread_mutex_lock(&ua_sess->lock);
4888
4889 if (ua_sess->deleted) {
4890 ret = 0;
4891 goto end_unlock;
4892 }
4893
4894 /* Lookup channel in the ust app session */
4895 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4896 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4897 /* If the channel is not found, there is a code flow error */
4898 assert(ua_chan_node);
4899
4900 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4901
4902 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4903 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4904 if (ua_event == NULL) {
4905 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4906 if (ret < 0) {
4907 goto end_unlock;
4908 }
4909 } else {
4910 ret = enable_ust_app_event(ua_sess, ua_event, app);
4911 if (ret < 0) {
4912 goto end_unlock;
4913 }
4914 }
4915
4916 end_unlock:
4917 pthread_mutex_unlock(&ua_sess->lock);
4918 end:
4919 rcu_read_unlock();
4920 return ret;
4921 }
4922
4923 /*
4924 * Calibrate registered applications.
4925 */
4926 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4927 {
4928 int ret = 0;
4929 struct lttng_ht_iter iter;
4930 struct ust_app *app;
4931
4932 rcu_read_lock();
4933
4934 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4935 if (!app->compatible) {
4936 /*
4937 * TODO: In time, we should notice the caller of this error by
4938 * telling him that this is a version error.
4939 */
4940 continue;
4941 }
4942
4943 health_code_update();
4944
4945 pthread_mutex_lock(&app->sock_lock);
4946 ret = ustctl_calibrate(app->sock, calibrate);
4947 pthread_mutex_unlock(&app->sock_lock);
4948 if (ret < 0) {
4949 switch (ret) {
4950 case -ENOSYS:
4951 /* Means that it's not implemented on the tracer side. */
4952 ret = 0;
4953 break;
4954 default:
4955 DBG2("Calibrate app PID %d returned with error %d",
4956 app->pid, ret);
4957 break;
4958 }
4959 }
4960 }
4961
4962 DBG("UST app global domain calibration finished");
4963
4964 rcu_read_unlock();
4965
4966 health_code_update();
4967
4968 return ret;
4969 }
4970
4971 /*
4972 * Receive registration and populate the given msg structure.
4973 *
4974 * On success return 0 else a negative value returned by the ustctl call.
4975 */
4976 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4977 {
4978 int ret;
4979 uint32_t pid, ppid, uid, gid;
4980
4981 assert(msg);
4982
4983 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4984 &pid, &ppid, &uid, &gid,
4985 &msg->bits_per_long,
4986 &msg->uint8_t_alignment,
4987 &msg->uint16_t_alignment,
4988 &msg->uint32_t_alignment,
4989 &msg->uint64_t_alignment,
4990 &msg->long_alignment,
4991 &msg->byte_order,
4992 msg->name);
4993 if (ret < 0) {
4994 switch (-ret) {
4995 case EPIPE:
4996 case ECONNRESET:
4997 case LTTNG_UST_ERR_EXITING:
4998 DBG3("UST app recv reg message failed. Application died");
4999 break;
5000 case LTTNG_UST_ERR_UNSUP_MAJOR:
5001 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5002 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5003 LTTNG_UST_ABI_MINOR_VERSION);
5004 break;
5005 default:
5006 ERR("UST app recv reg message failed with ret %d", ret);
5007 break;
5008 }
5009 goto error;
5010 }
5011 msg->pid = (pid_t) pid;
5012 msg->ppid = (pid_t) ppid;
5013 msg->uid = (uid_t) uid;
5014 msg->gid = (gid_t) gid;
5015
5016 error:
5017 return ret;
5018 }
5019
5020 /*
5021 * Return a ust app channel object using the application object and the channel
5022 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5023 * lock MUST be acquired before calling this function.
5024 */
5025 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5026 int objd)
5027 {
5028 struct lttng_ht_node_ulong *node;
5029 struct lttng_ht_iter iter;
5030 struct ust_app_channel *ua_chan = NULL;
5031
5032 assert(app);
5033
5034 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5035 node = lttng_ht_iter_get_node_ulong(&iter);
5036 if (node == NULL) {
5037 DBG2("UST app channel find by objd %d not found", objd);
5038 goto error;
5039 }
5040
5041 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5042
5043 error:
5044 return ua_chan;
5045 }
5046
5047 /*
5048 * Reply to a register channel notification from an application on the notify
5049 * socket. The channel metadata is also created.
5050 *
5051 * The session UST registry lock is acquired in this function.
5052 *
5053 * On success 0 is returned else a negative value.
5054 */
5055 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
5056 size_t nr_fields, struct ustctl_field *fields)
5057 {
5058 int ret, ret_code = 0;
5059 uint32_t chan_id, reg_count;
5060 uint64_t chan_reg_key;
5061 enum ustctl_channel_header type;
5062 struct ust_app *app;
5063 struct ust_app_channel *ua_chan;
5064 struct ust_app_session *ua_sess;
5065 struct ust_registry_session *registry;
5066 struct ust_registry_channel *chan_reg;
5067
5068 rcu_read_lock();
5069
5070 /* Lookup application. If not found, there is a code flow error. */
5071 app = find_app_by_notify_sock(sock);
5072 if (!app) {
5073 DBG("Application socket %d is being teardown. Abort event notify",
5074 sock);
5075 ret = 0;
5076 free(fields);
5077 goto error_rcu_unlock;
5078 }
5079
5080 /* Lookup channel by UST object descriptor. */
5081 ua_chan = find_channel_by_objd(app, cobjd);
5082 if (!ua_chan) {
5083 DBG("Application channel is being teardown. Abort event notify");
5084 ret = 0;
5085 free(fields);
5086 goto error_rcu_unlock;
5087 }
5088
5089 assert(ua_chan->session);
5090 ua_sess = ua_chan->session;
5091
5092 /* Get right session registry depending on the session buffer type. */
5093 registry = get_session_registry(ua_sess);
5094 assert(registry);
5095
5096 /* Depending on the buffer type, a different channel key is used. */
5097 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5098 chan_reg_key = ua_chan->tracing_channel_id;
5099 } else {
5100 chan_reg_key = ua_chan->key;
5101 }
5102
5103 pthread_mutex_lock(&registry->lock);
5104
5105 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5106 assert(chan_reg);
5107
5108 if (!chan_reg->register_done) {
5109 reg_count = ust_registry_get_event_count(chan_reg);
5110 if (reg_count < 31) {
5111 type = USTCTL_CHANNEL_HEADER_COMPACT;
5112 } else {
5113 type = USTCTL_CHANNEL_HEADER_LARGE;
5114 }
5115
5116 chan_reg->nr_ctx_fields = nr_fields;
5117 chan_reg->ctx_fields = fields;
5118 chan_reg->header_type = type;
5119 } else {
5120 /* Get current already assigned values. */
5121 type = chan_reg->header_type;
5122 free(fields);
5123 /* Set to NULL so the error path does not do a double free. */
5124 fields = NULL;
5125 }
5126 /* Channel id is set during the object creation. */
5127 chan_id = chan_reg->chan_id;
5128
5129 /* Append to metadata */
5130 if (!chan_reg->metadata_dumped) {
5131 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
5132 if (ret_code) {
5133 ERR("Error appending channel metadata (errno = %d)", ret_code);
5134 goto reply;
5135 }
5136 }
5137
5138 reply:
5139 DBG3("UST app replying to register channel key %" PRIu64
5140 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5141 ret_code);
5142
5143 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5144 if (ret < 0) {
5145 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5146 ERR("UST app reply channel failed with ret %d", ret);
5147 } else {
5148 DBG3("UST app reply channel failed. Application died");
5149 }
5150 goto error;
5151 }
5152
5153 /* This channel registry registration is completed. */
5154 chan_reg->register_done = 1;
5155
5156 error:
5157 pthread_mutex_unlock(&registry->lock);
5158 error_rcu_unlock:
5159 rcu_read_unlock();
5160 if (ret) {
5161 free(fields);
5162 }
5163 return ret;
5164 }
5165
5166 /*
5167 * Add event to the UST channel registry. When the event is added to the
5168 * registry, the metadata is also created. Once done, this replies to the
5169 * application with the appropriate error code.
5170 *
5171 * The session UST registry lock is acquired in the function.
5172 *
5173 * On success 0 is returned else a negative value.
5174 */
5175 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
5176 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
5177 char *model_emf_uri)
5178 {
5179 int ret, ret_code;
5180 uint32_t event_id = 0;
5181 uint64_t chan_reg_key;
5182 struct ust_app *app;
5183 struct ust_app_channel *ua_chan;
5184 struct ust_app_session *ua_sess;
5185 struct ust_registry_session *registry;
5186
5187 rcu_read_lock();
5188
5189 /* Lookup application. If not found, there is a code flow error. */
5190 app = find_app_by_notify_sock(sock);
5191 if (!app) {
5192 DBG("Application socket %d is being teardown. Abort event notify",
5193 sock);
5194 ret = 0;
5195 free(sig);
5196 free(fields);
5197 free(model_emf_uri);
5198 goto error_rcu_unlock;
5199 }
5200
5201 /* Lookup channel by UST object descriptor. */
5202 ua_chan = find_channel_by_objd(app, cobjd);
5203 if (!ua_chan) {
5204 DBG("Application channel is being teardown. Abort event notify");
5205 ret = 0;
5206 free(sig);
5207 free(fields);
5208 free(model_emf_uri);
5209 goto error_rcu_unlock;
5210 }
5211
5212 assert(ua_chan->session);
5213 ua_sess = ua_chan->session;
5214
5215 registry = get_session_registry(ua_sess);
5216 assert(registry);
5217
5218 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5219 chan_reg_key = ua_chan->tracing_channel_id;
5220 } else {
5221 chan_reg_key = ua_chan->key;
5222 }
5223
5224 pthread_mutex_lock(&registry->lock);
5225
5226 /*
5227 * From this point on, this call acquires the ownership of the sig, fields
5228 * and model_emf_uri meaning any free are done inside it if needed. These
5229 * three variables MUST NOT be read/write after this.
5230 */
5231 ret_code = ust_registry_create_event(registry, chan_reg_key,
5232 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
5233 model_emf_uri, ua_sess->buffer_type, &event_id,
5234 app);
5235
5236 /*
5237 * The return value is returned to ustctl so in case of an error, the
5238 * application can be notified. In case of an error, it's important not to
5239 * return a negative error or else the application will get closed.
5240 */
5241 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5242 if (ret < 0) {
5243 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5244 ERR("UST app reply event failed with ret %d", ret);
5245 } else {
5246 DBG3("UST app reply event failed. Application died");
5247 }
5248 /*
5249 * No need to wipe the create event since the application socket will
5250 * get close on error hence cleaning up everything by itself.
5251 */
5252 goto error;
5253 }
5254
5255 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5256 name, event_id);
5257
5258 error:
5259 pthread_mutex_unlock(&registry->lock);
5260 error_rcu_unlock:
5261 rcu_read_unlock();
5262 return ret;
5263 }
5264
5265 /*
5266 * Handle application notification through the given notify socket.
5267 *
5268 * Return 0 on success or else a negative value.
5269 */
5270 int ust_app_recv_notify(int sock)
5271 {
5272 int ret;
5273 enum ustctl_notify_cmd cmd;
5274
5275 DBG3("UST app receiving notify from sock %d", sock);
5276
5277 ret = ustctl_recv_notify(sock, &cmd);
5278 if (ret < 0) {
5279 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5280 ERR("UST app recv notify failed with ret %d", ret);
5281 } else {
5282 DBG3("UST app recv notify failed. Application died");
5283 }
5284 goto error;
5285 }
5286
5287 switch (cmd) {
5288 case USTCTL_NOTIFY_CMD_EVENT:
5289 {
5290 int sobjd, cobjd, loglevel;
5291 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5292 size_t nr_fields;
5293 struct ustctl_field *fields;
5294
5295 DBG2("UST app ustctl register event received");
5296
5297 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
5298 &sig, &nr_fields, &fields, &model_emf_uri);
5299 if (ret < 0) {
5300 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5301 ERR("UST app recv event failed with ret %d", ret);
5302 } else {
5303 DBG3("UST app recv event failed. Application died");
5304 }
5305 goto error;
5306 }
5307
5308 /*
5309 * Add event to the UST registry coming from the notify socket. This
5310 * call will free if needed the sig, fields and model_emf_uri. This
5311 * code path loses the ownsership of these variables and transfer them
5312 * to the this function.
5313 */
5314 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5315 fields, loglevel, model_emf_uri);
5316 if (ret < 0) {
5317 goto error;
5318 }
5319
5320 break;
5321 }
5322 case USTCTL_NOTIFY_CMD_CHANNEL:
5323 {
5324 int sobjd, cobjd;
5325 size_t nr_fields;
5326 struct ustctl_field *fields;
5327
5328 DBG2("UST app ustctl register channel received");
5329
5330 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5331 &fields);
5332 if (ret < 0) {
5333 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5334 ERR("UST app recv channel failed with ret %d", ret);
5335 } else {
5336 DBG3("UST app recv channel failed. Application died");
5337 }
5338 goto error;
5339 }
5340
5341 /*
5342 * The fields ownership are transfered to this function call meaning
5343 * that if needed it will be freed. After this, it's invalid to access
5344 * fields or clean it up.
5345 */
5346 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5347 fields);
5348 if (ret < 0) {
5349 goto error;
5350 }
5351
5352 break;
5353 }
5354 default:
5355 /* Should NEVER happen. */
5356 assert(0);
5357 }
5358
5359 error:
5360 return ret;
5361 }
5362
5363 /*
5364 * Once the notify socket hangs up, this is called. First, it tries to find the
5365 * corresponding application. On failure, the call_rcu to close the socket is
5366 * executed. If an application is found, it tries to delete it from the notify
5367 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5368 *
5369 * Note that an object needs to be allocated here so on ENOMEM failure, the
5370 * call RCU is not done but the rest of the cleanup is.
5371 */
5372 void ust_app_notify_sock_unregister(int sock)
5373 {
5374 int err_enomem = 0;
5375 struct lttng_ht_iter iter;
5376 struct ust_app *app;
5377 struct ust_app_notify_sock_obj *obj;
5378
5379 assert(sock >= 0);
5380
5381 rcu_read_lock();
5382
5383 obj = zmalloc(sizeof(*obj));
5384 if (!obj) {
5385 /*
5386 * An ENOMEM is kind of uncool. If this strikes we continue the
5387 * procedure but the call_rcu will not be called. In this case, we
5388 * accept the fd leak rather than possibly creating an unsynchronized
5389 * state between threads.
5390 *
5391 * TODO: The notify object should be created once the notify socket is
5392 * registered and stored independantely from the ust app object. The
5393 * tricky part is to synchronize the teardown of the application and
5394 * this notify object. Let's keep that in mind so we can avoid this
5395 * kind of shenanigans with ENOMEM in the teardown path.
5396 */
5397 err_enomem = 1;
5398 } else {
5399 obj->fd = sock;
5400 }
5401
5402 DBG("UST app notify socket unregister %d", sock);
5403
5404 /*
5405 * Lookup application by notify socket. If this fails, this means that the
5406 * hash table delete has already been done by the application
5407 * unregistration process so we can safely close the notify socket in a
5408 * call RCU.
5409 */
5410 app = find_app_by_notify_sock(sock);
5411 if (!app) {
5412 goto close_socket;
5413 }
5414
5415 iter.iter.node = &app->notify_sock_n.node;
5416
5417 /*
5418 * Whatever happens here either we fail or succeed, in both cases we have
5419 * to close the socket after a grace period to continue to the call RCU
5420 * here. If the deletion is successful, the application is not visible
5421 * anymore by other threads and is it fails it means that it was already
5422 * deleted from the hash table so either way we just have to close the
5423 * socket.
5424 */
5425 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5426
5427 close_socket:
5428 rcu_read_unlock();
5429
5430 /*
5431 * Close socket after a grace period to avoid for the socket to be reused
5432 * before the application object is freed creating potential race between
5433 * threads trying to add unique in the global hash table.
5434 */
5435 if (!err_enomem) {
5436 call_rcu(&obj->head, close_notify_sock_rcu);
5437 }
5438 }
5439
5440 /*
5441 * Destroy a ust app data structure and free its memory.
5442 */
5443 void ust_app_destroy(struct ust_app *app)
5444 {
5445 if (!app) {
5446 return;
5447 }
5448
5449 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5450 }
5451
5452 /*
5453 * Take a snapshot for a given UST session. The snapshot is sent to the given
5454 * output.
5455 *
5456 * Return 0 on success or else a negative value.
5457 */
5458 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5459 struct snapshot_output *output, int wait,
5460 uint64_t nb_packets_per_stream)
5461 {
5462 int ret = 0;
5463 unsigned int snapshot_done = 0;
5464 struct lttng_ht_iter iter;
5465 struct ust_app *app;
5466 char pathname[PATH_MAX];
5467
5468 assert(usess);
5469 assert(output);
5470
5471 rcu_read_lock();
5472
5473 switch (usess->buffer_type) {
5474 case LTTNG_BUFFER_PER_UID:
5475 {
5476 struct buffer_reg_uid *reg;
5477
5478 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5479 struct buffer_reg_channel *reg_chan;
5480 struct consumer_socket *socket;
5481
5482 /* Get consumer socket to use to push the metadata.*/
5483 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5484 usess->consumer);
5485 if (!socket) {
5486 ret = -EINVAL;
5487 goto error;
5488 }
5489
5490 memset(pathname, 0, sizeof(pathname));
5491 ret = snprintf(pathname, sizeof(pathname),
5492 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5493 reg->uid, reg->bits_per_long);
5494 if (ret < 0) {
5495 PERROR("snprintf snapshot path");
5496 goto error;
5497 }
5498
5499 /* Add the UST default trace dir to path. */
5500 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5501 reg_chan, node.node) {
5502 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5503 output, 0, usess->uid, usess->gid, pathname, wait,
5504 nb_packets_per_stream);
5505 if (ret < 0) {
5506 goto error;
5507 }
5508 }
5509 ret = consumer_snapshot_channel(socket,
5510 reg->registry->reg.ust->metadata_key, output, 1,
5511 usess->uid, usess->gid, pathname, wait, 0);
5512 if (ret < 0) {
5513 goto error;
5514 }
5515 snapshot_done = 1;
5516 }
5517 break;
5518 }
5519 case LTTNG_BUFFER_PER_PID:
5520 {
5521 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5522 struct consumer_socket *socket;
5523 struct lttng_ht_iter chan_iter;
5524 struct ust_app_channel *ua_chan;
5525 struct ust_app_session *ua_sess;
5526 struct ust_registry_session *registry;
5527
5528 ua_sess = lookup_session_by_app(usess, app);
5529 if (!ua_sess) {
5530 /* Session not associated with this app. */
5531 continue;
5532 }
5533
5534 /* Get the right consumer socket for the application. */
5535 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5536 output->consumer);
5537 if (!socket) {
5538 ret = -EINVAL;
5539 goto error;
5540 }
5541
5542 /* Add the UST default trace dir to path. */
5543 memset(pathname, 0, sizeof(pathname));
5544 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5545 ua_sess->path);
5546 if (ret < 0) {
5547 PERROR("snprintf snapshot path");
5548 goto error;
5549 }
5550
5551 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5552 ua_chan, node.node) {
5553 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5554 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5555 nb_packets_per_stream);
5556 if (ret < 0) {
5557 goto error;
5558 }
5559 }
5560
5561 registry = get_session_registry(ua_sess);
5562 assert(registry);
5563 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5564 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5565 if (ret < 0) {
5566 goto error;
5567 }
5568 snapshot_done = 1;
5569 }
5570 break;
5571 }
5572 default:
5573 assert(0);
5574 break;
5575 }
5576
5577 if (!snapshot_done) {
5578 /*
5579 * If no snapshot was made and we are not in the error path, this means
5580 * that there are no buffers thus no (prior) application to snapshot
5581 * data from so we have simply NO data.
5582 */
5583 ret = -ENODATA;
5584 }
5585
5586 error:
5587 rcu_read_unlock();
5588 return ret;
5589 }
5590
5591 /*
5592 * Return the size taken by one more packet per stream.
5593 */
5594 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5595 uint64_t cur_nr_packets)
5596 {
5597 uint64_t tot_size = 0;
5598 struct ust_app *app;
5599 struct lttng_ht_iter iter;
5600
5601 assert(usess);
5602
5603 switch (usess->buffer_type) {
5604 case LTTNG_BUFFER_PER_UID:
5605 {
5606 struct buffer_reg_uid *reg;
5607
5608 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5609 struct buffer_reg_channel *reg_chan;
5610
5611 rcu_read_lock();
5612 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5613 reg_chan, node.node) {
5614 if (cur_nr_packets >= reg_chan->num_subbuf) {
5615 /*
5616 * Don't take channel into account if we
5617 * already grab all its packets.
5618 */
5619 continue;
5620 }
5621 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5622 }
5623 rcu_read_unlock();
5624 }
5625 break;
5626 }
5627 case LTTNG_BUFFER_PER_PID:
5628 {
5629 rcu_read_lock();
5630 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5631 struct ust_app_channel *ua_chan;
5632 struct ust_app_session *ua_sess;
5633 struct lttng_ht_iter chan_iter;
5634
5635 ua_sess = lookup_session_by_app(usess, app);
5636 if (!ua_sess) {
5637 /* Session not associated with this app. */
5638 continue;
5639 }
5640
5641 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5642 ua_chan, node.node) {
5643 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5644 /*
5645 * Don't take channel into account if we
5646 * already grab all its packets.
5647 */
5648 continue;
5649 }
5650 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5651 }
5652 }
5653 rcu_read_unlock();
5654 break;
5655 }
5656 default:
5657 assert(0);
5658 break;
5659 }
5660
5661 return tot_size;
5662 }
This page took 0.14237 seconds and 5 git commands to generate.