Fix: sessiond vs consumerd push/get metadata deadlock
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 static
44 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
45
46 /* Next available channel key. Access under next_channel_key_lock. */
47 static uint64_t _next_channel_key;
48 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
49
50 /* Next available session ID. Access under next_session_id_lock. */
51 static uint64_t _next_session_id;
52 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
53
54 /*
55 * Return the incremented value of next_channel_key.
56 */
57 static uint64_t get_next_channel_key(void)
58 {
59 uint64_t ret;
60
61 pthread_mutex_lock(&next_channel_key_lock);
62 ret = ++_next_channel_key;
63 pthread_mutex_unlock(&next_channel_key_lock);
64 return ret;
65 }
66
67 /*
68 * Return the atomically incremented value of next_session_id.
69 */
70 static uint64_t get_next_session_id(void)
71 {
72 uint64_t ret;
73
74 pthread_mutex_lock(&next_session_id_lock);
75 ret = ++_next_session_id;
76 pthread_mutex_unlock(&next_session_id_lock);
77 return ret;
78 }
79
80 static void copy_channel_attr_to_ustctl(
81 struct ustctl_consumer_channel_attr *attr,
82 struct lttng_ust_channel_attr *uattr)
83 {
84 /* Copy event attributes since the layout is different. */
85 attr->subbuf_size = uattr->subbuf_size;
86 attr->num_subbuf = uattr->num_subbuf;
87 attr->overwrite = uattr->overwrite;
88 attr->switch_timer_interval = uattr->switch_timer_interval;
89 attr->read_timer_interval = uattr->read_timer_interval;
90 attr->output = uattr->output;
91 }
92
93 /*
94 * Match function for the hash table lookup.
95 *
96 * It matches an ust app event based on three attributes which are the event
97 * name, the filter bytecode and the loglevel.
98 */
99 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
100 {
101 struct ust_app_event *event;
102 const struct ust_app_ht_key *key;
103
104 assert(node);
105 assert(_key);
106
107 event = caa_container_of(node, struct ust_app_event, node.node);
108 key = _key;
109
110 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
111
112 /* Event name */
113 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
114 goto no_match;
115 }
116
117 /* Event loglevel. */
118 if (event->attr.loglevel != key->loglevel) {
119 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
120 && key->loglevel == 0 && event->attr.loglevel == -1) {
121 /*
122 * Match is accepted. This is because on event creation, the
123 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
124 * -1 are accepted for this loglevel type since 0 is the one set by
125 * the API when receiving an enable event.
126 */
127 } else {
128 goto no_match;
129 }
130 }
131
132 /* One of the filters is NULL, fail. */
133 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
134 goto no_match;
135 }
136
137 if (key->filter && event->filter) {
138 /* Both filters exists, check length followed by the bytecode. */
139 if (event->filter->len != key->filter->len ||
140 memcmp(event->filter->data, key->filter->data,
141 event->filter->len) != 0) {
142 goto no_match;
143 }
144 }
145
146 /* One of the exclusions is NULL, fail. */
147 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
148 goto no_match;
149 }
150
151 if (key->exclusion && event->exclusion) {
152 /* Both exclusions exists, check count followed by the names. */
153 if (event->exclusion->count != key->exclusion->count ||
154 memcmp(event->exclusion->names, key->exclusion->names,
155 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
156 goto no_match;
157 }
158 }
159
160
161 /* Match. */
162 return 1;
163
164 no_match:
165 return 0;
166 }
167
168 /*
169 * Unique add of an ust app event in the given ht. This uses the custom
170 * ht_match_ust_app_event match function and the event name as hash.
171 */
172 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
173 struct ust_app_event *event)
174 {
175 struct cds_lfht_node *node_ptr;
176 struct ust_app_ht_key key;
177 struct lttng_ht *ht;
178
179 assert(ua_chan);
180 assert(ua_chan->events);
181 assert(event);
182
183 ht = ua_chan->events;
184 key.name = event->attr.name;
185 key.filter = event->filter;
186 key.loglevel = event->attr.loglevel;
187 key.exclusion = event->exclusion;
188
189 node_ptr = cds_lfht_add_unique(ht->ht,
190 ht->hash_fct(event->node.key, lttng_ht_seed),
191 ht_match_ust_app_event, &key, &event->node.node);
192 assert(node_ptr == &event->node.node);
193 }
194
195 /*
196 * Close the notify socket from the given RCU head object. This MUST be called
197 * through a call_rcu().
198 */
199 static void close_notify_sock_rcu(struct rcu_head *head)
200 {
201 int ret;
202 struct ust_app_notify_sock_obj *obj =
203 caa_container_of(head, struct ust_app_notify_sock_obj, head);
204
205 /* Must have a valid fd here. */
206 assert(obj->fd >= 0);
207
208 ret = close(obj->fd);
209 if (ret) {
210 ERR("close notify sock %d RCU", obj->fd);
211 }
212 lttng_fd_put(LTTNG_FD_APPS, 1);
213
214 free(obj);
215 }
216
217 /*
218 * Return the session registry according to the buffer type of the given
219 * session.
220 *
221 * A registry per UID object MUST exists before calling this function or else
222 * it assert() if not found. RCU read side lock must be acquired.
223 */
224 static struct ust_registry_session *get_session_registry(
225 struct ust_app_session *ua_sess)
226 {
227 struct ust_registry_session *registry = NULL;
228
229 assert(ua_sess);
230
231 switch (ua_sess->buffer_type) {
232 case LTTNG_BUFFER_PER_PID:
233 {
234 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
235 if (!reg_pid) {
236 goto error;
237 }
238 registry = reg_pid->registry->reg.ust;
239 break;
240 }
241 case LTTNG_BUFFER_PER_UID:
242 {
243 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
244 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
245 if (!reg_uid) {
246 goto error;
247 }
248 registry = reg_uid->registry->reg.ust;
249 break;
250 }
251 default:
252 assert(0);
253 };
254
255 error:
256 return registry;
257 }
258
259 /*
260 * Delete ust context safely. RCU read lock must be held before calling
261 * this function.
262 */
263 static
264 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
265 {
266 int ret;
267
268 assert(ua_ctx);
269
270 if (ua_ctx->obj) {
271 ret = ustctl_release_object(sock, ua_ctx->obj);
272 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
273 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
274 sock, ua_ctx->obj->handle, ret);
275 }
276 free(ua_ctx->obj);
277 }
278 free(ua_ctx);
279 }
280
281 /*
282 * Delete ust app event safely. RCU read lock must be held before calling
283 * this function.
284 */
285 static
286 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
287 {
288 int ret;
289
290 assert(ua_event);
291
292 free(ua_event->filter);
293 if (ua_event->exclusion != NULL)
294 free(ua_event->exclusion);
295 if (ua_event->obj != NULL) {
296 ret = ustctl_release_object(sock, ua_event->obj);
297 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
298 ERR("UST app sock %d release event obj failed with ret %d",
299 sock, ret);
300 }
301 free(ua_event->obj);
302 }
303 free(ua_event);
304 }
305
306 /*
307 * Release ust data object of the given stream.
308 *
309 * Return 0 on success or else a negative value.
310 */
311 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
312 {
313 int ret = 0;
314
315 assert(stream);
316
317 if (stream->obj) {
318 ret = ustctl_release_object(sock, stream->obj);
319 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
320 ERR("UST app sock %d release stream obj failed with ret %d",
321 sock, ret);
322 }
323 lttng_fd_put(LTTNG_FD_APPS, 2);
324 free(stream->obj);
325 }
326
327 return ret;
328 }
329
330 /*
331 * Delete ust app stream safely. RCU read lock must be held before calling
332 * this function.
333 */
334 static
335 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
336 {
337 assert(stream);
338
339 (void) release_ust_app_stream(sock, stream);
340 free(stream);
341 }
342
343 /*
344 * We need to execute ht_destroy outside of RCU read-side critical
345 * section and outside of call_rcu thread, so we postpone its execution
346 * using ht_cleanup_push. It is simpler than to change the semantic of
347 * the many callers of delete_ust_app_session().
348 */
349 static
350 void delete_ust_app_channel_rcu(struct rcu_head *head)
351 {
352 struct ust_app_channel *ua_chan =
353 caa_container_of(head, struct ust_app_channel, rcu_head);
354
355 ht_cleanup_push(ua_chan->ctx);
356 ht_cleanup_push(ua_chan->events);
357 free(ua_chan);
358 }
359
360 /*
361 * Delete ust app channel safely. RCU read lock must be held before calling
362 * this function.
363 */
364 static
365 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
366 struct ust_app *app)
367 {
368 int ret;
369 struct lttng_ht_iter iter;
370 struct ust_app_event *ua_event;
371 struct ust_app_ctx *ua_ctx;
372 struct ust_app_stream *stream, *stmp;
373 struct ust_registry_session *registry;
374
375 assert(ua_chan);
376
377 DBG3("UST app deleting channel %s", ua_chan->name);
378
379 /* Wipe stream */
380 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
381 cds_list_del(&stream->list);
382 delete_ust_app_stream(sock, stream);
383 }
384
385 /* Wipe context */
386 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
387 cds_list_del(&ua_ctx->list);
388 ret = lttng_ht_del(ua_chan->ctx, &iter);
389 assert(!ret);
390 delete_ust_app_ctx(sock, ua_ctx);
391 }
392
393 /* Wipe events */
394 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
395 node.node) {
396 ret = lttng_ht_del(ua_chan->events, &iter);
397 assert(!ret);
398 delete_ust_app_event(sock, ua_event);
399 }
400
401 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
402 /* Wipe and free registry from session registry. */
403 registry = get_session_registry(ua_chan->session);
404 if (registry) {
405 ust_registry_channel_del_free(registry, ua_chan->key);
406 }
407 }
408
409 if (ua_chan->obj != NULL) {
410 /* Remove channel from application UST object descriptor. */
411 iter.iter.node = &ua_chan->ust_objd_node.node;
412 ret = lttng_ht_del(app->ust_objd, &iter);
413 assert(!ret);
414 ret = ustctl_release_object(sock, ua_chan->obj);
415 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
416 ERR("UST app sock %d release channel obj failed with ret %d",
417 sock, ret);
418 }
419 lttng_fd_put(LTTNG_FD_APPS, 1);
420 free(ua_chan->obj);
421 }
422 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
423 }
424
425 /*
426 * Push metadata to consumer socket.
427 *
428 * RCU read-side lock must be held to guarantee existance of socket.
429 * Must be called with the ust app session lock held.
430 * Must be called with the registry lock held.
431 *
432 * On success, return the len of metadata pushed or else a negative value.
433 * Returning a -EPIPE return value means we could not send the metadata,
434 * but it can be caused by recoverable errors (e.g. the application has
435 * terminated concurrently).
436 */
437 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
438 struct consumer_socket *socket, int send_zero_data)
439 {
440 int ret;
441 char *metadata_str = NULL;
442 size_t len, offset, new_metadata_len_sent;
443 ssize_t ret_val;
444 uint64_t metadata_key;
445
446 assert(registry);
447 assert(socket);
448
449 metadata_key = registry->metadata_key;
450
451 /*
452 * Means that no metadata was assigned to the session. This can
453 * happens if no start has been done previously.
454 */
455 if (!metadata_key) {
456 return 0;
457 }
458
459 /*
460 * On a push metadata error either the consumer is dead or the
461 * metadata channel has been destroyed because its endpoint
462 * might have died (e.g: relayd), or because the application has
463 * exited. If so, the metadata closed flag is set to 1 so we
464 * deny pushing metadata again which is not valid anymore on the
465 * consumer side.
466 */
467 if (registry->metadata_closed) {
468 return -EPIPE;
469 }
470
471 offset = registry->metadata_len_sent;
472 len = registry->metadata_len - registry->metadata_len_sent;
473 new_metadata_len_sent = registry->metadata_len;
474 if (len == 0) {
475 DBG3("No metadata to push for metadata key %" PRIu64,
476 registry->metadata_key);
477 ret_val = len;
478 if (send_zero_data) {
479 DBG("No metadata to push");
480 goto push_data;
481 }
482 goto end;
483 }
484
485 /* Allocate only what we have to send. */
486 metadata_str = zmalloc(len);
487 if (!metadata_str) {
488 PERROR("zmalloc ust app metadata string");
489 ret_val = -ENOMEM;
490 goto error;
491 }
492 /* Copy what we haven't sent out. */
493 memcpy(metadata_str, registry->metadata + offset, len);
494
495 push_data:
496 pthread_mutex_unlock(&registry->lock);
497 /*
498 * We need to unlock the registry while we push metadata to
499 * break a circular dependency between the consumerd metadata
500 * lock and the sessiond registry lock. Indeed, pushing metadata
501 * to the consumerd awaits that it gets pushed all the way to
502 * relayd, but doing so requires grabbing the metadata lock. If
503 * a concurrent metadata request is being performed by
504 * consumerd, this can try to grab the registry lock on the
505 * sessiond while holding the metadata lock on the consumer
506 * daemon. Those push and pull schemes are performed on two
507 * different bidirectionnal communication sockets.
508 */
509 ret = consumer_push_metadata(socket, metadata_key,
510 metadata_str, len, offset);
511 pthread_mutex_lock(&registry->lock);
512 if (ret < 0) {
513 /*
514 * There is an acceptable race here between the registry
515 * metadata key assignment and the creation on the
516 * consumer. The session daemon can concurrently push
517 * metadata for this registry while being created on the
518 * consumer since the metadata key of the registry is
519 * assigned *before* it is setup to avoid the consumer
520 * to ask for metadata that could possibly be not found
521 * in the session daemon.
522 *
523 * The metadata will get pushed either by the session
524 * being stopped or the consumer requesting metadata if
525 * that race is triggered.
526 */
527 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
528 ret = 0;
529 } else {
530 ERR("Error pushing metadata to consumer");
531 }
532 ret_val = ret;
533 goto error_push;
534 } else {
535 /*
536 * Metadata may have been concurrently pushed, since
537 * we're not holding the registry lock while pushing to
538 * consumer. This is handled by the fact that we send
539 * the metadata content, size, and the offset at which
540 * that metadata belongs. This may arrive out of order
541 * on the consumer side, and the consumer is able to
542 * deal with overlapping fragments. The consumer
543 * supports overlapping fragments, which must be
544 * contiguous starting from offset 0. We keep the
545 * largest metadata_len_sent value of the concurrent
546 * send.
547 */
548 registry->metadata_len_sent =
549 max_t(size_t, registry->metadata_len_sent,
550 new_metadata_len_sent);
551 }
552 free(metadata_str);
553 return len;
554
555 end:
556 error:
557 if (ret_val) {
558 /*
559 * On error, flag the registry that the metadata is
560 * closed. We were unable to push anything and this
561 * means that either the consumer is not responding or
562 * the metadata cache has been destroyed on the
563 * consumer.
564 */
565 registry->metadata_closed = 1;
566 }
567 error_push:
568 free(metadata_str);
569 return ret_val;
570 }
571
572 /*
573 * For a given application and session, push metadata to consumer.
574 * Either sock or consumer is required : if sock is NULL, the default
575 * socket to send the metadata is retrieved from consumer, if sock
576 * is not NULL we use it to send the metadata.
577 * RCU read-side lock must be held while calling this function,
578 * therefore ensuring existance of registry. It also ensures existance
579 * of socket throughout this function.
580 *
581 * Return 0 on success else a negative error.
582 * Returning a -EPIPE return value means we could not send the metadata,
583 * but it can be caused by recoverable errors (e.g. the application has
584 * terminated concurrently).
585 */
586 static int push_metadata(struct ust_registry_session *registry,
587 struct consumer_output *consumer)
588 {
589 int ret_val;
590 ssize_t ret;
591 struct consumer_socket *socket;
592
593 assert(registry);
594 assert(consumer);
595
596 pthread_mutex_lock(&registry->lock);
597 if (registry->metadata_closed) {
598 ret_val = -EPIPE;
599 goto error;
600 }
601
602 /* Get consumer socket to use to push the metadata.*/
603 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
604 consumer);
605 if (!socket) {
606 ret_val = -1;
607 goto error;
608 }
609
610 ret = ust_app_push_metadata(registry, socket, 0);
611 if (ret < 0) {
612 ret_val = ret;
613 goto error;
614 }
615 pthread_mutex_unlock(&registry->lock);
616 return 0;
617
618 error:
619 pthread_mutex_unlock(&registry->lock);
620 return ret_val;
621 }
622
623 /*
624 * Send to the consumer a close metadata command for the given session. Once
625 * done, the metadata channel is deleted and the session metadata pointer is
626 * nullified. The session lock MUST be held unless the application is
627 * in the destroy path.
628 *
629 * Return 0 on success else a negative value.
630 */
631 static int close_metadata(struct ust_registry_session *registry,
632 struct consumer_output *consumer)
633 {
634 int ret;
635 struct consumer_socket *socket;
636
637 assert(registry);
638 assert(consumer);
639
640 rcu_read_lock();
641
642 pthread_mutex_lock(&registry->lock);
643
644 if (!registry->metadata_key || registry->metadata_closed) {
645 ret = 0;
646 goto end;
647 }
648
649 /* Get consumer socket to use to push the metadata.*/
650 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
651 consumer);
652 if (!socket) {
653 ret = -1;
654 goto error;
655 }
656
657 ret = consumer_close_metadata(socket, registry->metadata_key);
658 if (ret < 0) {
659 goto error;
660 }
661
662 error:
663 /*
664 * Metadata closed. Even on error this means that the consumer is not
665 * responding or not found so either way a second close should NOT be emit
666 * for this registry.
667 */
668 registry->metadata_closed = 1;
669 end:
670 pthread_mutex_unlock(&registry->lock);
671 rcu_read_unlock();
672 return ret;
673 }
674
675 /*
676 * We need to execute ht_destroy outside of RCU read-side critical
677 * section and outside of call_rcu thread, so we postpone its execution
678 * using ht_cleanup_push. It is simpler than to change the semantic of
679 * the many callers of delete_ust_app_session().
680 */
681 static
682 void delete_ust_app_session_rcu(struct rcu_head *head)
683 {
684 struct ust_app_session *ua_sess =
685 caa_container_of(head, struct ust_app_session, rcu_head);
686
687 ht_cleanup_push(ua_sess->channels);
688 free(ua_sess);
689 }
690
691 /*
692 * Delete ust app session safely. RCU read lock must be held before calling
693 * this function.
694 */
695 static
696 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
697 struct ust_app *app)
698 {
699 int ret;
700 struct lttng_ht_iter iter;
701 struct ust_app_channel *ua_chan;
702 struct ust_registry_session *registry;
703
704 assert(ua_sess);
705
706 pthread_mutex_lock(&ua_sess->lock);
707
708 assert(!ua_sess->deleted);
709 ua_sess->deleted = true;
710
711 registry = get_session_registry(ua_sess);
712 if (registry) {
713 /* Push metadata for application before freeing the application. */
714 (void) push_metadata(registry, ua_sess->consumer);
715
716 /*
717 * Don't ask to close metadata for global per UID buffers. Close
718 * metadata only on destroy trace session in this case. Also, the
719 * previous push metadata could have flag the metadata registry to
720 * close so don't send a close command if closed.
721 */
722 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
723 /* And ask to close it for this session registry. */
724 (void) close_metadata(registry, ua_sess->consumer);
725 }
726 }
727
728 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
729 node.node) {
730 ret = lttng_ht_del(ua_sess->channels, &iter);
731 assert(!ret);
732 delete_ust_app_channel(sock, ua_chan, app);
733 }
734
735 /* In case of per PID, the registry is kept in the session. */
736 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
737 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
738 if (reg_pid) {
739 buffer_reg_pid_remove(reg_pid);
740 buffer_reg_pid_destroy(reg_pid);
741 }
742 }
743
744 if (ua_sess->handle != -1) {
745 ret = ustctl_release_handle(sock, ua_sess->handle);
746 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
747 ERR("UST app sock %d release session handle failed with ret %d",
748 sock, ret);
749 }
750 }
751 pthread_mutex_unlock(&ua_sess->lock);
752
753 consumer_output_put(ua_sess->consumer);
754
755 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
756 }
757
758 /*
759 * Delete a traceable application structure from the global list. Never call
760 * this function outside of a call_rcu call.
761 *
762 * RCU read side lock should _NOT_ be held when calling this function.
763 */
764 static
765 void delete_ust_app(struct ust_app *app)
766 {
767 int ret, sock;
768 struct ust_app_session *ua_sess, *tmp_ua_sess;
769
770 /* Delete ust app sessions info */
771 sock = app->sock;
772 app->sock = -1;
773
774 /* Wipe sessions */
775 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
776 teardown_node) {
777 /* Free every object in the session and the session. */
778 rcu_read_lock();
779 delete_ust_app_session(sock, ua_sess, app);
780 rcu_read_unlock();
781 }
782
783 ht_cleanup_push(app->sessions);
784 ht_cleanup_push(app->ust_objd);
785
786 /*
787 * Wait until we have deleted the application from the sock hash table
788 * before closing this socket, otherwise an application could re-use the
789 * socket ID and race with the teardown, using the same hash table entry.
790 *
791 * It's OK to leave the close in call_rcu. We want it to stay unique for
792 * all RCU readers that could run concurrently with unregister app,
793 * therefore we _need_ to only close that socket after a grace period. So
794 * it should stay in this RCU callback.
795 *
796 * This close() is a very important step of the synchronization model so
797 * every modification to this function must be carefully reviewed.
798 */
799 ret = close(sock);
800 if (ret) {
801 PERROR("close");
802 }
803 lttng_fd_put(LTTNG_FD_APPS, 1);
804
805 DBG2("UST app pid %d deleted", app->pid);
806 free(app);
807 }
808
809 /*
810 * URCU intermediate call to delete an UST app.
811 */
812 static
813 void delete_ust_app_rcu(struct rcu_head *head)
814 {
815 struct lttng_ht_node_ulong *node =
816 caa_container_of(head, struct lttng_ht_node_ulong, head);
817 struct ust_app *app =
818 caa_container_of(node, struct ust_app, pid_n);
819
820 DBG3("Call RCU deleting app PID %d", app->pid);
821 delete_ust_app(app);
822 }
823
824 /*
825 * Delete the session from the application ht and delete the data structure by
826 * freeing every object inside and releasing them.
827 */
828 static void destroy_app_session(struct ust_app *app,
829 struct ust_app_session *ua_sess)
830 {
831 int ret;
832 struct lttng_ht_iter iter;
833
834 assert(app);
835 assert(ua_sess);
836
837 iter.iter.node = &ua_sess->node.node;
838 ret = lttng_ht_del(app->sessions, &iter);
839 if (ret) {
840 /* Already scheduled for teardown. */
841 goto end;
842 }
843
844 /* Once deleted, free the data structure. */
845 delete_ust_app_session(app->sock, ua_sess, app);
846
847 end:
848 return;
849 }
850
851 /*
852 * Alloc new UST app session.
853 */
854 static
855 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
856 {
857 struct ust_app_session *ua_sess;
858
859 /* Init most of the default value by allocating and zeroing */
860 ua_sess = zmalloc(sizeof(struct ust_app_session));
861 if (ua_sess == NULL) {
862 PERROR("malloc");
863 goto error_free;
864 }
865
866 ua_sess->handle = -1;
867 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
868 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
869 pthread_mutex_init(&ua_sess->lock, NULL);
870
871 return ua_sess;
872
873 error_free:
874 return NULL;
875 }
876
877 /*
878 * Alloc new UST app channel.
879 */
880 static
881 struct ust_app_channel *alloc_ust_app_channel(char *name,
882 struct ust_app_session *ua_sess,
883 struct lttng_ust_channel_attr *attr)
884 {
885 struct ust_app_channel *ua_chan;
886
887 /* Init most of the default value by allocating and zeroing */
888 ua_chan = zmalloc(sizeof(struct ust_app_channel));
889 if (ua_chan == NULL) {
890 PERROR("malloc");
891 goto error;
892 }
893
894 /* Setup channel name */
895 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
896 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
897
898 ua_chan->enabled = 1;
899 ua_chan->handle = -1;
900 ua_chan->session = ua_sess;
901 ua_chan->key = get_next_channel_key();
902 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
903 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
904 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
905
906 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
907 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
908
909 /* Copy attributes */
910 if (attr) {
911 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
912 ua_chan->attr.subbuf_size = attr->subbuf_size;
913 ua_chan->attr.num_subbuf = attr->num_subbuf;
914 ua_chan->attr.overwrite = attr->overwrite;
915 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
916 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
917 ua_chan->attr.output = attr->output;
918 }
919 /* By default, the channel is a per cpu channel. */
920 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
921
922 DBG3("UST app channel %s allocated", ua_chan->name);
923
924 return ua_chan;
925
926 error:
927 return NULL;
928 }
929
930 /*
931 * Allocate and initialize a UST app stream.
932 *
933 * Return newly allocated stream pointer or NULL on error.
934 */
935 struct ust_app_stream *ust_app_alloc_stream(void)
936 {
937 struct ust_app_stream *stream = NULL;
938
939 stream = zmalloc(sizeof(*stream));
940 if (stream == NULL) {
941 PERROR("zmalloc ust app stream");
942 goto error;
943 }
944
945 /* Zero could be a valid value for a handle so flag it to -1. */
946 stream->handle = -1;
947
948 error:
949 return stream;
950 }
951
952 /*
953 * Alloc new UST app event.
954 */
955 static
956 struct ust_app_event *alloc_ust_app_event(char *name,
957 struct lttng_ust_event *attr)
958 {
959 struct ust_app_event *ua_event;
960
961 /* Init most of the default value by allocating and zeroing */
962 ua_event = zmalloc(sizeof(struct ust_app_event));
963 if (ua_event == NULL) {
964 PERROR("malloc");
965 goto error;
966 }
967
968 ua_event->enabled = 1;
969 strncpy(ua_event->name, name, sizeof(ua_event->name));
970 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
971 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
972
973 /* Copy attributes */
974 if (attr) {
975 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
976 }
977
978 DBG3("UST app event %s allocated", ua_event->name);
979
980 return ua_event;
981
982 error:
983 return NULL;
984 }
985
986 /*
987 * Alloc new UST app context.
988 */
989 static
990 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
991 {
992 struct ust_app_ctx *ua_ctx;
993
994 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
995 if (ua_ctx == NULL) {
996 goto error;
997 }
998
999 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1000
1001 if (uctx) {
1002 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
1003 }
1004
1005 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
1006
1007 error:
1008 return ua_ctx;
1009 }
1010
1011 /*
1012 * Allocate a filter and copy the given original filter.
1013 *
1014 * Return allocated filter or NULL on error.
1015 */
1016 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
1017 struct lttng_ust_filter_bytecode *orig_f)
1018 {
1019 struct lttng_ust_filter_bytecode *filter = NULL;
1020
1021 /* Copy filter bytecode */
1022 filter = zmalloc(sizeof(*filter) + orig_f->len);
1023 if (!filter) {
1024 PERROR("zmalloc alloc ust app filter");
1025 goto error;
1026 }
1027
1028 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1029
1030 error:
1031 return filter;
1032 }
1033
1034 /*
1035 * Find an ust_app using the sock and return it. RCU read side lock must be
1036 * held before calling this helper function.
1037 */
1038 struct ust_app *ust_app_find_by_sock(int sock)
1039 {
1040 struct lttng_ht_node_ulong *node;
1041 struct lttng_ht_iter iter;
1042
1043 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1044 node = lttng_ht_iter_get_node_ulong(&iter);
1045 if (node == NULL) {
1046 DBG2("UST app find by sock %d not found", sock);
1047 goto error;
1048 }
1049
1050 return caa_container_of(node, struct ust_app, sock_n);
1051
1052 error:
1053 return NULL;
1054 }
1055
1056 /*
1057 * Find an ust_app using the notify sock and return it. RCU read side lock must
1058 * be held before calling this helper function.
1059 */
1060 static struct ust_app *find_app_by_notify_sock(int sock)
1061 {
1062 struct lttng_ht_node_ulong *node;
1063 struct lttng_ht_iter iter;
1064
1065 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1066 &iter);
1067 node = lttng_ht_iter_get_node_ulong(&iter);
1068 if (node == NULL) {
1069 DBG2("UST app find by notify sock %d not found", sock);
1070 goto error;
1071 }
1072
1073 return caa_container_of(node, struct ust_app, notify_sock_n);
1074
1075 error:
1076 return NULL;
1077 }
1078
1079 /*
1080 * Lookup for an ust app event based on event name, filter bytecode and the
1081 * event loglevel.
1082 *
1083 * Return an ust_app_event object or NULL on error.
1084 */
1085 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1086 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1087 const struct lttng_event_exclusion *exclusion)
1088 {
1089 struct lttng_ht_iter iter;
1090 struct lttng_ht_node_str *node;
1091 struct ust_app_event *event = NULL;
1092 struct ust_app_ht_key key;
1093
1094 assert(name);
1095 assert(ht);
1096
1097 /* Setup key for event lookup. */
1098 key.name = name;
1099 key.filter = filter;
1100 key.loglevel = loglevel;
1101 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1102 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1103
1104 /* Lookup using the event name as hash and a custom match fct. */
1105 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1106 ht_match_ust_app_event, &key, &iter.iter);
1107 node = lttng_ht_iter_get_node_str(&iter);
1108 if (node == NULL) {
1109 goto end;
1110 }
1111
1112 event = caa_container_of(node, struct ust_app_event, node);
1113
1114 end:
1115 return event;
1116 }
1117
1118 /*
1119 * Create the channel context on the tracer.
1120 *
1121 * Called with UST app session lock held.
1122 */
1123 static
1124 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1125 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1126 {
1127 int ret;
1128
1129 health_code_update();
1130
1131 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1132 ua_chan->obj, &ua_ctx->obj);
1133 if (ret < 0) {
1134 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1135 ERR("UST app create channel context failed for app (pid: %d) "
1136 "with ret %d", app->pid, ret);
1137 } else {
1138 /*
1139 * This is normal behavior, an application can die during the
1140 * creation process. Don't report an error so the execution can
1141 * continue normally.
1142 */
1143 ret = 0;
1144 DBG3("UST app disable event failed. Application is dead.");
1145 }
1146 goto error;
1147 }
1148
1149 ua_ctx->handle = ua_ctx->obj->handle;
1150
1151 DBG2("UST app context handle %d created successfully for channel %s",
1152 ua_ctx->handle, ua_chan->name);
1153
1154 error:
1155 health_code_update();
1156 return ret;
1157 }
1158
1159 /*
1160 * Set the filter on the tracer.
1161 */
1162 static
1163 int set_ust_event_filter(struct ust_app_event *ua_event,
1164 struct ust_app *app)
1165 {
1166 int ret;
1167
1168 health_code_update();
1169
1170 if (!ua_event->filter) {
1171 ret = 0;
1172 goto error;
1173 }
1174
1175 ret = ustctl_set_filter(app->sock, ua_event->filter,
1176 ua_event->obj);
1177 if (ret < 0) {
1178 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1179 ERR("UST app event %s filter failed for app (pid: %d) "
1180 "with ret %d", ua_event->attr.name, app->pid, ret);
1181 } else {
1182 /*
1183 * This is normal behavior, an application can die during the
1184 * creation process. Don't report an error so the execution can
1185 * continue normally.
1186 */
1187 ret = 0;
1188 DBG3("UST app filter event failed. Application is dead.");
1189 }
1190 goto error;
1191 }
1192
1193 DBG2("UST filter set successfully for event %s", ua_event->name);
1194
1195 error:
1196 health_code_update();
1197 return ret;
1198 }
1199
1200 /*
1201 * Set event exclusions on the tracer.
1202 */
1203 static
1204 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1205 struct ust_app *app)
1206 {
1207 int ret;
1208
1209 health_code_update();
1210
1211 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1212 ret = 0;
1213 goto error;
1214 }
1215
1216 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1217 ua_event->obj);
1218 if (ret < 0) {
1219 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1220 ERR("UST app event %s exclusions failed for app (pid: %d) "
1221 "with ret %d", ua_event->attr.name, app->pid, ret);
1222 } else {
1223 /*
1224 * This is normal behavior, an application can die during the
1225 * creation process. Don't report an error so the execution can
1226 * continue normally.
1227 */
1228 ret = 0;
1229 DBG3("UST app event exclusion failed. Application is dead.");
1230 }
1231 goto error;
1232 }
1233
1234 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1235
1236 error:
1237 health_code_update();
1238 return ret;
1239 }
1240
1241 /*
1242 * Disable the specified event on to UST tracer for the UST session.
1243 */
1244 static int disable_ust_event(struct ust_app *app,
1245 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1246 {
1247 int ret;
1248
1249 health_code_update();
1250
1251 ret = ustctl_disable(app->sock, ua_event->obj);
1252 if (ret < 0) {
1253 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1254 ERR("UST app event %s disable failed for app (pid: %d) "
1255 "and session handle %d with ret %d",
1256 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1257 } else {
1258 /*
1259 * This is normal behavior, an application can die during the
1260 * creation process. Don't report an error so the execution can
1261 * continue normally.
1262 */
1263 ret = 0;
1264 DBG3("UST app disable event failed. Application is dead.");
1265 }
1266 goto error;
1267 }
1268
1269 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1270 ua_event->attr.name, app->pid);
1271
1272 error:
1273 health_code_update();
1274 return ret;
1275 }
1276
1277 /*
1278 * Disable the specified channel on to UST tracer for the UST session.
1279 */
1280 static int disable_ust_channel(struct ust_app *app,
1281 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1282 {
1283 int ret;
1284
1285 health_code_update();
1286
1287 ret = ustctl_disable(app->sock, ua_chan->obj);
1288 if (ret < 0) {
1289 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1290 ERR("UST app channel %s disable failed for app (pid: %d) "
1291 "and session handle %d with ret %d",
1292 ua_chan->name, app->pid, ua_sess->handle, ret);
1293 } else {
1294 /*
1295 * This is normal behavior, an application can die during the
1296 * creation process. Don't report an error so the execution can
1297 * continue normally.
1298 */
1299 ret = 0;
1300 DBG3("UST app disable channel failed. Application is dead.");
1301 }
1302 goto error;
1303 }
1304
1305 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1306 ua_chan->name, app->pid);
1307
1308 error:
1309 health_code_update();
1310 return ret;
1311 }
1312
1313 /*
1314 * Enable the specified channel on to UST tracer for the UST session.
1315 */
1316 static int enable_ust_channel(struct ust_app *app,
1317 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1318 {
1319 int ret;
1320
1321 health_code_update();
1322
1323 ret = ustctl_enable(app->sock, ua_chan->obj);
1324 if (ret < 0) {
1325 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1326 ERR("UST app channel %s enable failed for app (pid: %d) "
1327 "and session handle %d with ret %d",
1328 ua_chan->name, app->pid, ua_sess->handle, ret);
1329 } else {
1330 /*
1331 * This is normal behavior, an application can die during the
1332 * creation process. Don't report an error so the execution can
1333 * continue normally.
1334 */
1335 ret = 0;
1336 DBG3("UST app enable channel failed. Application is dead.");
1337 }
1338 goto error;
1339 }
1340
1341 ua_chan->enabled = 1;
1342
1343 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1344 ua_chan->name, app->pid);
1345
1346 error:
1347 health_code_update();
1348 return ret;
1349 }
1350
1351 /*
1352 * Enable the specified event on to UST tracer for the UST session.
1353 */
1354 static int enable_ust_event(struct ust_app *app,
1355 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1356 {
1357 int ret;
1358
1359 health_code_update();
1360
1361 ret = ustctl_enable(app->sock, ua_event->obj);
1362 if (ret < 0) {
1363 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1364 ERR("UST app event %s enable failed for app (pid: %d) "
1365 "and session handle %d with ret %d",
1366 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1367 } else {
1368 /*
1369 * This is normal behavior, an application can die during the
1370 * creation process. Don't report an error so the execution can
1371 * continue normally.
1372 */
1373 ret = 0;
1374 DBG3("UST app enable event failed. Application is dead.");
1375 }
1376 goto error;
1377 }
1378
1379 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1380 ua_event->attr.name, app->pid);
1381
1382 error:
1383 health_code_update();
1384 return ret;
1385 }
1386
1387 /*
1388 * Send channel and stream buffer to application.
1389 *
1390 * Return 0 on success. On error, a negative value is returned.
1391 */
1392 static int send_channel_pid_to_ust(struct ust_app *app,
1393 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1394 {
1395 int ret;
1396 struct ust_app_stream *stream, *stmp;
1397
1398 assert(app);
1399 assert(ua_sess);
1400 assert(ua_chan);
1401
1402 health_code_update();
1403
1404 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1405 app->sock);
1406
1407 /* Send channel to the application. */
1408 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1409 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1410 ret = -ENOTCONN; /* Caused by app exiting. */
1411 goto error;
1412 } else if (ret < 0) {
1413 goto error;
1414 }
1415
1416 health_code_update();
1417
1418 /* Send all streams to application. */
1419 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1420 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1421 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1422 ret = -ENOTCONN; /* Caused by app exiting. */
1423 goto error;
1424 } else if (ret < 0) {
1425 goto error;
1426 }
1427 /* We don't need the stream anymore once sent to the tracer. */
1428 cds_list_del(&stream->list);
1429 delete_ust_app_stream(-1, stream);
1430 }
1431 /* Flag the channel that it is sent to the application. */
1432 ua_chan->is_sent = 1;
1433
1434 error:
1435 health_code_update();
1436 return ret;
1437 }
1438
1439 /*
1440 * Create the specified event onto the UST tracer for a UST session.
1441 *
1442 * Should be called with session mutex held.
1443 */
1444 static
1445 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1446 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1447 {
1448 int ret = 0;
1449
1450 health_code_update();
1451
1452 /* Create UST event on tracer */
1453 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1454 &ua_event->obj);
1455 if (ret < 0) {
1456 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1457 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1458 ua_event->attr.name, app->pid, ret);
1459 } else {
1460 /*
1461 * This is normal behavior, an application can die during the
1462 * creation process. Don't report an error so the execution can
1463 * continue normally.
1464 */
1465 ret = 0;
1466 DBG3("UST app create event failed. Application is dead.");
1467 }
1468 goto error;
1469 }
1470
1471 ua_event->handle = ua_event->obj->handle;
1472
1473 DBG2("UST app event %s created successfully for pid:%d",
1474 ua_event->attr.name, app->pid);
1475
1476 health_code_update();
1477
1478 /* Set filter if one is present. */
1479 if (ua_event->filter) {
1480 ret = set_ust_event_filter(ua_event, app);
1481 if (ret < 0) {
1482 goto error;
1483 }
1484 }
1485
1486 /* Set exclusions for the event */
1487 if (ua_event->exclusion) {
1488 ret = set_ust_event_exclusion(ua_event, app);
1489 if (ret < 0) {
1490 goto error;
1491 }
1492 }
1493
1494 /* If event not enabled, disable it on the tracer */
1495 if (ua_event->enabled) {
1496 /*
1497 * We now need to explicitly enable the event, since it
1498 * is now disabled at creation.
1499 */
1500 ret = enable_ust_event(app, ua_sess, ua_event);
1501 if (ret < 0) {
1502 /*
1503 * If we hit an EPERM, something is wrong with our enable call. If
1504 * we get an EEXIST, there is a problem on the tracer side since we
1505 * just created it.
1506 */
1507 switch (ret) {
1508 case -LTTNG_UST_ERR_PERM:
1509 /* Code flow problem */
1510 assert(0);
1511 case -LTTNG_UST_ERR_EXIST:
1512 /* It's OK for our use case. */
1513 ret = 0;
1514 break;
1515 default:
1516 break;
1517 }
1518 goto error;
1519 }
1520 } else {
1521 ret = disable_ust_event(app, ua_sess, ua_event);
1522 if (ret < 0) {
1523 /*
1524 * If we hit an EPERM, something is wrong with our disable call. If
1525 * we get an EEXIST, there is a problem on the tracer side since we
1526 * just created it.
1527 */
1528 switch (ret) {
1529 case -LTTNG_UST_ERR_PERM:
1530 /* Code flow problem */
1531 assert(0);
1532 case -LTTNG_UST_ERR_EXIST:
1533 /* It's OK for our use case. */
1534 ret = 0;
1535 break;
1536 default:
1537 break;
1538 }
1539 goto error;
1540 }
1541 }
1542
1543 error:
1544 health_code_update();
1545 return ret;
1546 }
1547
1548 /*
1549 * Copy data between an UST app event and a LTT event.
1550 */
1551 static void shadow_copy_event(struct ust_app_event *ua_event,
1552 struct ltt_ust_event *uevent)
1553 {
1554 size_t exclusion_alloc_size;
1555
1556 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1557 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1558
1559 ua_event->enabled = uevent->enabled;
1560
1561 /* Copy event attributes */
1562 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1563
1564 /* Copy filter bytecode */
1565 if (uevent->filter) {
1566 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1567 /* Filter might be NULL here in case of ENONEM. */
1568 }
1569
1570 /* Copy exclusion data */
1571 if (uevent->exclusion) {
1572 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1573 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1574 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1575 if (ua_event->exclusion == NULL) {
1576 PERROR("malloc");
1577 } else {
1578 memcpy(ua_event->exclusion, uevent->exclusion,
1579 exclusion_alloc_size);
1580 }
1581 }
1582 }
1583
1584 /*
1585 * Copy data between an UST app channel and a LTT channel.
1586 */
1587 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1588 struct ltt_ust_channel *uchan)
1589 {
1590 struct lttng_ht_iter iter;
1591 struct ltt_ust_event *uevent;
1592 struct ltt_ust_context *uctx;
1593 struct ust_app_event *ua_event;
1594 struct ust_app_ctx *ua_ctx;
1595
1596 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1597
1598 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1599 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1600
1601 ua_chan->tracefile_size = uchan->tracefile_size;
1602 ua_chan->tracefile_count = uchan->tracefile_count;
1603
1604 /* Copy event attributes since the layout is different. */
1605 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1606 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1607 ua_chan->attr.overwrite = uchan->attr.overwrite;
1608 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1609 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1610 ua_chan->attr.output = uchan->attr.output;
1611 /*
1612 * Note that the attribute channel type is not set since the channel on the
1613 * tracing registry side does not have this information.
1614 */
1615
1616 ua_chan->enabled = uchan->enabled;
1617 ua_chan->tracing_channel_id = uchan->id;
1618
1619 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1620 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1621 if (ua_ctx == NULL) {
1622 continue;
1623 }
1624 lttng_ht_node_init_ulong(&ua_ctx->node,
1625 (unsigned long) ua_ctx->ctx.ctx);
1626 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1627 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1628 }
1629
1630 /* Copy all events from ltt ust channel to ust app channel */
1631 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1632 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1633 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1634 if (ua_event == NULL) {
1635 DBG2("UST event %s not found on shadow copy channel",
1636 uevent->attr.name);
1637 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1638 if (ua_event == NULL) {
1639 continue;
1640 }
1641 shadow_copy_event(ua_event, uevent);
1642 add_unique_ust_app_event(ua_chan, ua_event);
1643 }
1644 }
1645
1646 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1647 }
1648
1649 /*
1650 * Copy data between a UST app session and a regular LTT session.
1651 */
1652 static void shadow_copy_session(struct ust_app_session *ua_sess,
1653 struct ltt_ust_session *usess, struct ust_app *app)
1654 {
1655 struct lttng_ht_node_str *ua_chan_node;
1656 struct lttng_ht_iter iter;
1657 struct ltt_ust_channel *uchan;
1658 struct ust_app_channel *ua_chan;
1659 time_t rawtime;
1660 struct tm *timeinfo;
1661 char datetime[16];
1662 int ret;
1663
1664 /* Get date and time for unique app path */
1665 time(&rawtime);
1666 timeinfo = localtime(&rawtime);
1667 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1668
1669 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1670
1671 ua_sess->tracing_id = usess->id;
1672 ua_sess->id = get_next_session_id();
1673 ua_sess->uid = app->uid;
1674 ua_sess->gid = app->gid;
1675 ua_sess->euid = usess->uid;
1676 ua_sess->egid = usess->gid;
1677 ua_sess->buffer_type = usess->buffer_type;
1678 ua_sess->bits_per_long = app->bits_per_long;
1679
1680 /* There is only one consumer object per session possible. */
1681 consumer_output_get(usess->consumer);
1682 ua_sess->consumer = usess->consumer;
1683
1684 ua_sess->output_traces = usess->output_traces;
1685 ua_sess->live_timer_interval = usess->live_timer_interval;
1686 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1687 &usess->metadata_attr);
1688
1689 switch (ua_sess->buffer_type) {
1690 case LTTNG_BUFFER_PER_PID:
1691 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1692 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1693 datetime);
1694 break;
1695 case LTTNG_BUFFER_PER_UID:
1696 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1697 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1698 break;
1699 default:
1700 assert(0);
1701 goto error;
1702 }
1703 if (ret < 0) {
1704 PERROR("asprintf UST shadow copy session");
1705 assert(0);
1706 goto error;
1707 }
1708
1709 /* Iterate over all channels in global domain. */
1710 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1711 uchan, node.node) {
1712 struct lttng_ht_iter uiter;
1713
1714 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1715 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1716 if (ua_chan_node != NULL) {
1717 /* Session exist. Contiuing. */
1718 continue;
1719 }
1720
1721 DBG2("Channel %s not found on shadow session copy, creating it",
1722 uchan->name);
1723 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1724 if (ua_chan == NULL) {
1725 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1726 continue;
1727 }
1728 shadow_copy_channel(ua_chan, uchan);
1729 /*
1730 * The concept of metadata channel does not exist on the tracing
1731 * registry side of the session daemon so this can only be a per CPU
1732 * channel and not metadata.
1733 */
1734 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1735
1736 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1737 }
1738 return;
1739
1740 error:
1741 consumer_output_put(ua_sess->consumer);
1742 }
1743
1744 /*
1745 * Lookup sesison wrapper.
1746 */
1747 static
1748 void __lookup_session_by_app(struct ltt_ust_session *usess,
1749 struct ust_app *app, struct lttng_ht_iter *iter)
1750 {
1751 /* Get right UST app session from app */
1752 lttng_ht_lookup(app->sessions, &usess->id, iter);
1753 }
1754
1755 /*
1756 * Return ust app session from the app session hashtable using the UST session
1757 * id.
1758 */
1759 static struct ust_app_session *lookup_session_by_app(
1760 struct ltt_ust_session *usess, struct ust_app *app)
1761 {
1762 struct lttng_ht_iter iter;
1763 struct lttng_ht_node_u64 *node;
1764
1765 __lookup_session_by_app(usess, app, &iter);
1766 node = lttng_ht_iter_get_node_u64(&iter);
1767 if (node == NULL) {
1768 goto error;
1769 }
1770
1771 return caa_container_of(node, struct ust_app_session, node);
1772
1773 error:
1774 return NULL;
1775 }
1776
1777 /*
1778 * Setup buffer registry per PID for the given session and application. If none
1779 * is found, a new one is created, added to the global registry and
1780 * initialized. If regp is valid, it's set with the newly created object.
1781 *
1782 * Return 0 on success or else a negative value.
1783 */
1784 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1785 struct ust_app *app, struct buffer_reg_pid **regp)
1786 {
1787 int ret = 0;
1788 struct buffer_reg_pid *reg_pid;
1789
1790 assert(ua_sess);
1791 assert(app);
1792
1793 rcu_read_lock();
1794
1795 reg_pid = buffer_reg_pid_find(ua_sess->id);
1796 if (!reg_pid) {
1797 /*
1798 * This is the create channel path meaning that if there is NO
1799 * registry available, we have to create one for this session.
1800 */
1801 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1802 if (ret < 0) {
1803 goto error;
1804 }
1805 buffer_reg_pid_add(reg_pid);
1806 } else {
1807 goto end;
1808 }
1809
1810 /* Initialize registry. */
1811 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1812 app->bits_per_long, app->uint8_t_alignment,
1813 app->uint16_t_alignment, app->uint32_t_alignment,
1814 app->uint64_t_alignment, app->long_alignment,
1815 app->byte_order, app->version.major,
1816 app->version.minor);
1817 if (ret < 0) {
1818 goto error;
1819 }
1820
1821 DBG3("UST app buffer registry per PID created successfully");
1822
1823 end:
1824 if (regp) {
1825 *regp = reg_pid;
1826 }
1827 error:
1828 rcu_read_unlock();
1829 return ret;
1830 }
1831
1832 /*
1833 * Setup buffer registry per UID for the given session and application. If none
1834 * is found, a new one is created, added to the global registry and
1835 * initialized. If regp is valid, it's set with the newly created object.
1836 *
1837 * Return 0 on success or else a negative value.
1838 */
1839 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1840 struct ust_app *app, struct buffer_reg_uid **regp)
1841 {
1842 int ret = 0;
1843 struct buffer_reg_uid *reg_uid;
1844
1845 assert(usess);
1846 assert(app);
1847
1848 rcu_read_lock();
1849
1850 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1851 if (!reg_uid) {
1852 /*
1853 * This is the create channel path meaning that if there is NO
1854 * registry available, we have to create one for this session.
1855 */
1856 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1857 LTTNG_DOMAIN_UST, &reg_uid);
1858 if (ret < 0) {
1859 goto error;
1860 }
1861 buffer_reg_uid_add(reg_uid);
1862 } else {
1863 goto end;
1864 }
1865
1866 /* Initialize registry. */
1867 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1868 app->bits_per_long, app->uint8_t_alignment,
1869 app->uint16_t_alignment, app->uint32_t_alignment,
1870 app->uint64_t_alignment, app->long_alignment,
1871 app->byte_order, app->version.major,
1872 app->version.minor);
1873 if (ret < 0) {
1874 goto error;
1875 }
1876 /* Add node to teardown list of the session. */
1877 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1878
1879 DBG3("UST app buffer registry per UID created successfully");
1880
1881 end:
1882 if (regp) {
1883 *regp = reg_uid;
1884 }
1885 error:
1886 rcu_read_unlock();
1887 return ret;
1888 }
1889
1890 /*
1891 * Create a session on the tracer side for the given app.
1892 *
1893 * On success, ua_sess_ptr is populated with the session pointer or else left
1894 * untouched. If the session was created, is_created is set to 1. On error,
1895 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1896 * be NULL.
1897 *
1898 * Returns 0 on success or else a negative code which is either -ENOMEM or
1899 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1900 */
1901 static int create_ust_app_session(struct ltt_ust_session *usess,
1902 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1903 int *is_created)
1904 {
1905 int ret, created = 0;
1906 struct ust_app_session *ua_sess;
1907
1908 assert(usess);
1909 assert(app);
1910 assert(ua_sess_ptr);
1911
1912 health_code_update();
1913
1914 ua_sess = lookup_session_by_app(usess, app);
1915 if (ua_sess == NULL) {
1916 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1917 app->pid, usess->id);
1918 ua_sess = alloc_ust_app_session(app);
1919 if (ua_sess == NULL) {
1920 /* Only malloc can failed so something is really wrong */
1921 ret = -ENOMEM;
1922 goto error;
1923 }
1924 shadow_copy_session(ua_sess, usess, app);
1925 created = 1;
1926 }
1927
1928 switch (usess->buffer_type) {
1929 case LTTNG_BUFFER_PER_PID:
1930 /* Init local registry. */
1931 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1932 if (ret < 0) {
1933 goto error;
1934 }
1935 break;
1936 case LTTNG_BUFFER_PER_UID:
1937 /* Look for a global registry. If none exists, create one. */
1938 ret = setup_buffer_reg_uid(usess, app, NULL);
1939 if (ret < 0) {
1940 goto error;
1941 }
1942 break;
1943 default:
1944 assert(0);
1945 ret = -EINVAL;
1946 goto error;
1947 }
1948
1949 health_code_update();
1950
1951 if (ua_sess->handle == -1) {
1952 ret = ustctl_create_session(app->sock);
1953 if (ret < 0) {
1954 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1955 ERR("Creating session for app pid %d with ret %d",
1956 app->pid, ret);
1957 } else {
1958 DBG("UST app creating session failed. Application is dead");
1959 /*
1960 * This is normal behavior, an application can die during the
1961 * creation process. Don't report an error so the execution can
1962 * continue normally. This will get flagged ENOTCONN and the
1963 * caller will handle it.
1964 */
1965 ret = 0;
1966 }
1967 delete_ust_app_session(-1, ua_sess, app);
1968 if (ret != -ENOMEM) {
1969 /*
1970 * Tracer is probably gone or got an internal error so let's
1971 * behave like it will soon unregister or not usable.
1972 */
1973 ret = -ENOTCONN;
1974 }
1975 goto error;
1976 }
1977
1978 ua_sess->handle = ret;
1979
1980 /* Add ust app session to app's HT */
1981 lttng_ht_node_init_u64(&ua_sess->node,
1982 ua_sess->tracing_id);
1983 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1984
1985 DBG2("UST app session created successfully with handle %d", ret);
1986 }
1987
1988 *ua_sess_ptr = ua_sess;
1989 if (is_created) {
1990 *is_created = created;
1991 }
1992
1993 /* Everything went well. */
1994 ret = 0;
1995
1996 error:
1997 health_code_update();
1998 return ret;
1999 }
2000
2001 /*
2002 * Match function for a hash table lookup of ust_app_ctx.
2003 *
2004 * It matches an ust app context based on the context type and, in the case
2005 * of perf counters, their name.
2006 */
2007 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2008 {
2009 struct ust_app_ctx *ctx;
2010 const struct lttng_ust_context *key;
2011
2012 assert(node);
2013 assert(_key);
2014
2015 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2016 key = _key;
2017
2018 /* Context type */
2019 if (ctx->ctx.ctx != key->ctx) {
2020 goto no_match;
2021 }
2022
2023 /* Check the name in the case of perf thread counters. */
2024 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
2025 if (strncmp(key->u.perf_counter.name,
2026 ctx->ctx.u.perf_counter.name,
2027 sizeof(key->u.perf_counter.name))) {
2028 goto no_match;
2029 }
2030 }
2031
2032 /* Match. */
2033 return 1;
2034
2035 no_match:
2036 return 0;
2037 }
2038
2039 /*
2040 * Lookup for an ust app context from an lttng_ust_context.
2041 *
2042 * Must be called while holding RCU read side lock.
2043 * Return an ust_app_ctx object or NULL on error.
2044 */
2045 static
2046 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2047 struct lttng_ust_context *uctx)
2048 {
2049 struct lttng_ht_iter iter;
2050 struct lttng_ht_node_ulong *node;
2051 struct ust_app_ctx *app_ctx = NULL;
2052
2053 assert(uctx);
2054 assert(ht);
2055
2056 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2057 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2058 ht_match_ust_app_ctx, uctx, &iter.iter);
2059 node = lttng_ht_iter_get_node_ulong(&iter);
2060 if (!node) {
2061 goto end;
2062 }
2063
2064 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2065
2066 end:
2067 return app_ctx;
2068 }
2069
2070 /*
2071 * Create a context for the channel on the tracer.
2072 *
2073 * Called with UST app session lock held and a RCU read side lock.
2074 */
2075 static
2076 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2077 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2078 struct ust_app *app)
2079 {
2080 int ret = 0;
2081 struct ust_app_ctx *ua_ctx;
2082
2083 DBG2("UST app adding context to channel %s", ua_chan->name);
2084
2085 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2086 if (ua_ctx) {
2087 ret = -EEXIST;
2088 goto error;
2089 }
2090
2091 ua_ctx = alloc_ust_app_ctx(uctx);
2092 if (ua_ctx == NULL) {
2093 /* malloc failed */
2094 ret = -1;
2095 goto error;
2096 }
2097
2098 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2099 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2100 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2101
2102 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2103 if (ret < 0) {
2104 goto error;
2105 }
2106
2107 error:
2108 return ret;
2109 }
2110
2111 /*
2112 * Enable on the tracer side a ust app event for the session and channel.
2113 *
2114 * Called with UST app session lock held.
2115 */
2116 static
2117 int enable_ust_app_event(struct ust_app_session *ua_sess,
2118 struct ust_app_event *ua_event, struct ust_app *app)
2119 {
2120 int ret;
2121
2122 ret = enable_ust_event(app, ua_sess, ua_event);
2123 if (ret < 0) {
2124 goto error;
2125 }
2126
2127 ua_event->enabled = 1;
2128
2129 error:
2130 return ret;
2131 }
2132
2133 /*
2134 * Disable on the tracer side a ust app event for the session and channel.
2135 */
2136 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2137 struct ust_app_event *ua_event, struct ust_app *app)
2138 {
2139 int ret;
2140
2141 ret = disable_ust_event(app, ua_sess, ua_event);
2142 if (ret < 0) {
2143 goto error;
2144 }
2145
2146 ua_event->enabled = 0;
2147
2148 error:
2149 return ret;
2150 }
2151
2152 /*
2153 * Lookup ust app channel for session and disable it on the tracer side.
2154 */
2155 static
2156 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2157 struct ust_app_channel *ua_chan, struct ust_app *app)
2158 {
2159 int ret;
2160
2161 ret = disable_ust_channel(app, ua_sess, ua_chan);
2162 if (ret < 0) {
2163 goto error;
2164 }
2165
2166 ua_chan->enabled = 0;
2167
2168 error:
2169 return ret;
2170 }
2171
2172 /*
2173 * Lookup ust app channel for session and enable it on the tracer side. This
2174 * MUST be called with a RCU read side lock acquired.
2175 */
2176 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2177 struct ltt_ust_channel *uchan, struct ust_app *app)
2178 {
2179 int ret = 0;
2180 struct lttng_ht_iter iter;
2181 struct lttng_ht_node_str *ua_chan_node;
2182 struct ust_app_channel *ua_chan;
2183
2184 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2185 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2186 if (ua_chan_node == NULL) {
2187 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2188 uchan->name, ua_sess->tracing_id);
2189 goto error;
2190 }
2191
2192 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2193
2194 ret = enable_ust_channel(app, ua_sess, ua_chan);
2195 if (ret < 0) {
2196 goto error;
2197 }
2198
2199 error:
2200 return ret;
2201 }
2202
2203 /*
2204 * Ask the consumer to create a channel and get it if successful.
2205 *
2206 * Return 0 on success or else a negative value.
2207 */
2208 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2209 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2210 int bitness, struct ust_registry_session *registry)
2211 {
2212 int ret;
2213 unsigned int nb_fd = 0;
2214 struct consumer_socket *socket;
2215
2216 assert(usess);
2217 assert(ua_sess);
2218 assert(ua_chan);
2219 assert(registry);
2220
2221 rcu_read_lock();
2222 health_code_update();
2223
2224 /* Get the right consumer socket for the application. */
2225 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2226 if (!socket) {
2227 ret = -EINVAL;
2228 goto error;
2229 }
2230
2231 health_code_update();
2232
2233 /* Need one fd for the channel. */
2234 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2235 if (ret < 0) {
2236 ERR("Exhausted number of available FD upon create channel");
2237 goto error;
2238 }
2239
2240 /*
2241 * Ask consumer to create channel. The consumer will return the number of
2242 * stream we have to expect.
2243 */
2244 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2245 registry);
2246 if (ret < 0) {
2247 goto error_ask;
2248 }
2249
2250 /*
2251 * Compute the number of fd needed before receiving them. It must be 2 per
2252 * stream (2 being the default value here).
2253 */
2254 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2255
2256 /* Reserve the amount of file descriptor we need. */
2257 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2258 if (ret < 0) {
2259 ERR("Exhausted number of available FD upon create channel");
2260 goto error_fd_get_stream;
2261 }
2262
2263 health_code_update();
2264
2265 /*
2266 * Now get the channel from the consumer. This call wil populate the stream
2267 * list of that channel and set the ust objects.
2268 */
2269 if (usess->consumer->enabled) {
2270 ret = ust_consumer_get_channel(socket, ua_chan);
2271 if (ret < 0) {
2272 goto error_destroy;
2273 }
2274 }
2275
2276 rcu_read_unlock();
2277 return 0;
2278
2279 error_destroy:
2280 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2281 error_fd_get_stream:
2282 /*
2283 * Initiate a destroy channel on the consumer since we had an error
2284 * handling it on our side. The return value is of no importance since we
2285 * already have a ret value set by the previous error that we need to
2286 * return.
2287 */
2288 (void) ust_consumer_destroy_channel(socket, ua_chan);
2289 error_ask:
2290 lttng_fd_put(LTTNG_FD_APPS, 1);
2291 error:
2292 health_code_update();
2293 rcu_read_unlock();
2294 return ret;
2295 }
2296
2297 /*
2298 * Duplicate the ust data object of the ust app stream and save it in the
2299 * buffer registry stream.
2300 *
2301 * Return 0 on success or else a negative value.
2302 */
2303 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2304 struct ust_app_stream *stream)
2305 {
2306 int ret;
2307
2308 assert(reg_stream);
2309 assert(stream);
2310
2311 /* Reserve the amount of file descriptor we need. */
2312 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2313 if (ret < 0) {
2314 ERR("Exhausted number of available FD upon duplicate stream");
2315 goto error;
2316 }
2317
2318 /* Duplicate object for stream once the original is in the registry. */
2319 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2320 reg_stream->obj.ust);
2321 if (ret < 0) {
2322 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2323 reg_stream->obj.ust, stream->obj, ret);
2324 lttng_fd_put(LTTNG_FD_APPS, 2);
2325 goto error;
2326 }
2327 stream->handle = stream->obj->handle;
2328
2329 error:
2330 return ret;
2331 }
2332
2333 /*
2334 * Duplicate the ust data object of the ust app. channel and save it in the
2335 * buffer registry channel.
2336 *
2337 * Return 0 on success or else a negative value.
2338 */
2339 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2340 struct ust_app_channel *ua_chan)
2341 {
2342 int ret;
2343
2344 assert(reg_chan);
2345 assert(ua_chan);
2346
2347 /* Need two fds for the channel. */
2348 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2349 if (ret < 0) {
2350 ERR("Exhausted number of available FD upon duplicate channel");
2351 goto error_fd_get;
2352 }
2353
2354 /* Duplicate object for stream once the original is in the registry. */
2355 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2356 if (ret < 0) {
2357 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2358 reg_chan->obj.ust, ua_chan->obj, ret);
2359 goto error;
2360 }
2361 ua_chan->handle = ua_chan->obj->handle;
2362
2363 return 0;
2364
2365 error:
2366 lttng_fd_put(LTTNG_FD_APPS, 1);
2367 error_fd_get:
2368 return ret;
2369 }
2370
2371 /*
2372 * For a given channel buffer registry, setup all streams of the given ust
2373 * application channel.
2374 *
2375 * Return 0 on success or else a negative value.
2376 */
2377 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2378 struct ust_app_channel *ua_chan)
2379 {
2380 int ret = 0;
2381 struct ust_app_stream *stream, *stmp;
2382
2383 assert(reg_chan);
2384 assert(ua_chan);
2385
2386 DBG2("UST app setup buffer registry stream");
2387
2388 /* Send all streams to application. */
2389 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2390 struct buffer_reg_stream *reg_stream;
2391
2392 ret = buffer_reg_stream_create(&reg_stream);
2393 if (ret < 0) {
2394 goto error;
2395 }
2396
2397 /*
2398 * Keep original pointer and nullify it in the stream so the delete
2399 * stream call does not release the object.
2400 */
2401 reg_stream->obj.ust = stream->obj;
2402 stream->obj = NULL;
2403 buffer_reg_stream_add(reg_stream, reg_chan);
2404
2405 /* We don't need the streams anymore. */
2406 cds_list_del(&stream->list);
2407 delete_ust_app_stream(-1, stream);
2408 }
2409
2410 error:
2411 return ret;
2412 }
2413
2414 /*
2415 * Create a buffer registry channel for the given session registry and
2416 * application channel object. If regp pointer is valid, it's set with the
2417 * created object. Important, the created object is NOT added to the session
2418 * registry hash table.
2419 *
2420 * Return 0 on success else a negative value.
2421 */
2422 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2423 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2424 {
2425 int ret;
2426 struct buffer_reg_channel *reg_chan = NULL;
2427
2428 assert(reg_sess);
2429 assert(ua_chan);
2430
2431 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2432
2433 /* Create buffer registry channel. */
2434 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2435 if (ret < 0) {
2436 goto error_create;
2437 }
2438 assert(reg_chan);
2439 reg_chan->consumer_key = ua_chan->key;
2440 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2441 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2442
2443 /* Create and add a channel registry to session. */
2444 ret = ust_registry_channel_add(reg_sess->reg.ust,
2445 ua_chan->tracing_channel_id);
2446 if (ret < 0) {
2447 goto error;
2448 }
2449 buffer_reg_channel_add(reg_sess, reg_chan);
2450
2451 if (regp) {
2452 *regp = reg_chan;
2453 }
2454
2455 return 0;
2456
2457 error:
2458 /* Safe because the registry channel object was not added to any HT. */
2459 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2460 error_create:
2461 return ret;
2462 }
2463
2464 /*
2465 * Setup buffer registry channel for the given session registry and application
2466 * channel object. If regp pointer is valid, it's set with the created object.
2467 *
2468 * Return 0 on success else a negative value.
2469 */
2470 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2471 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2472 {
2473 int ret;
2474
2475 assert(reg_sess);
2476 assert(reg_chan);
2477 assert(ua_chan);
2478 assert(ua_chan->obj);
2479
2480 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2481
2482 /* Setup all streams for the registry. */
2483 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2484 if (ret < 0) {
2485 goto error;
2486 }
2487
2488 reg_chan->obj.ust = ua_chan->obj;
2489 ua_chan->obj = NULL;
2490
2491 return 0;
2492
2493 error:
2494 buffer_reg_channel_remove(reg_sess, reg_chan);
2495 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2496 return ret;
2497 }
2498
2499 /*
2500 * Send buffer registry channel to the application.
2501 *
2502 * Return 0 on success else a negative value.
2503 */
2504 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2505 struct ust_app *app, struct ust_app_session *ua_sess,
2506 struct ust_app_channel *ua_chan)
2507 {
2508 int ret;
2509 struct buffer_reg_stream *reg_stream;
2510
2511 assert(reg_chan);
2512 assert(app);
2513 assert(ua_sess);
2514 assert(ua_chan);
2515
2516 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2517
2518 ret = duplicate_channel_object(reg_chan, ua_chan);
2519 if (ret < 0) {
2520 goto error;
2521 }
2522
2523 /* Send channel to the application. */
2524 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2525 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2526 ret = -ENOTCONN; /* Caused by app exiting. */
2527 goto error;
2528 } else if (ret < 0) {
2529 goto error;
2530 }
2531
2532 health_code_update();
2533
2534 /* Send all streams to application. */
2535 pthread_mutex_lock(&reg_chan->stream_list_lock);
2536 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2537 struct ust_app_stream stream;
2538
2539 ret = duplicate_stream_object(reg_stream, &stream);
2540 if (ret < 0) {
2541 goto error_stream_unlock;
2542 }
2543
2544 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2545 if (ret < 0) {
2546 (void) release_ust_app_stream(-1, &stream);
2547 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2548 ret = -ENOTCONN; /* Caused by app exiting. */
2549 goto error_stream_unlock;
2550 } else if (ret < 0) {
2551 goto error_stream_unlock;
2552 }
2553 goto error_stream_unlock;
2554 }
2555
2556 /*
2557 * The return value is not important here. This function will output an
2558 * error if needed.
2559 */
2560 (void) release_ust_app_stream(-1, &stream);
2561 }
2562 ua_chan->is_sent = 1;
2563
2564 error_stream_unlock:
2565 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2566 error:
2567 return ret;
2568 }
2569
2570 /*
2571 * Create and send to the application the created buffers with per UID buffers.
2572 *
2573 * Return 0 on success else a negative value.
2574 */
2575 static int create_channel_per_uid(struct ust_app *app,
2576 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2577 struct ust_app_channel *ua_chan)
2578 {
2579 int ret;
2580 struct buffer_reg_uid *reg_uid;
2581 struct buffer_reg_channel *reg_chan;
2582
2583 assert(app);
2584 assert(usess);
2585 assert(ua_sess);
2586 assert(ua_chan);
2587
2588 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2589
2590 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2591 /*
2592 * The session creation handles the creation of this global registry
2593 * object. If none can be find, there is a code flow problem or a
2594 * teardown race.
2595 */
2596 assert(reg_uid);
2597
2598 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2599 reg_uid);
2600 if (!reg_chan) {
2601 /* Create the buffer registry channel object. */
2602 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2603 if (ret < 0) {
2604 ERR("Error creating the UST channel \"%s\" registry instance",
2605 ua_chan->name);
2606 goto error;
2607 }
2608 assert(reg_chan);
2609
2610 /*
2611 * Create the buffers on the consumer side. This call populates the
2612 * ust app channel object with all streams and data object.
2613 */
2614 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2615 app->bits_per_long, reg_uid->registry->reg.ust);
2616 if (ret < 0) {
2617 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2618 ua_chan->name);
2619
2620 /*
2621 * Let's remove the previously created buffer registry channel so
2622 * it's not visible anymore in the session registry.
2623 */
2624 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2625 ua_chan->tracing_channel_id);
2626 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2627 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2628 goto error;
2629 }
2630
2631 /*
2632 * Setup the streams and add it to the session registry.
2633 */
2634 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2635 if (ret < 0) {
2636 ERR("Error setting up UST channel \"%s\"",
2637 ua_chan->name);
2638 goto error;
2639 }
2640
2641 }
2642
2643 /* Send buffers to the application. */
2644 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2645 if (ret < 0) {
2646 if (ret != -ENOTCONN) {
2647 ERR("Error sending channel to application");
2648 }
2649 goto error;
2650 }
2651
2652 error:
2653 return ret;
2654 }
2655
2656 /*
2657 * Create and send to the application the created buffers with per PID buffers.
2658 *
2659 * Return 0 on success else a negative value.
2660 */
2661 static int create_channel_per_pid(struct ust_app *app,
2662 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2663 struct ust_app_channel *ua_chan)
2664 {
2665 int ret;
2666 struct ust_registry_session *registry;
2667
2668 assert(app);
2669 assert(usess);
2670 assert(ua_sess);
2671 assert(ua_chan);
2672
2673 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2674
2675 rcu_read_lock();
2676
2677 registry = get_session_registry(ua_sess);
2678 assert(registry);
2679
2680 /* Create and add a new channel registry to session. */
2681 ret = ust_registry_channel_add(registry, ua_chan->key);
2682 if (ret < 0) {
2683 ERR("Error creating the UST channel \"%s\" registry instance",
2684 ua_chan->name);
2685 goto error;
2686 }
2687
2688 /* Create and get channel on the consumer side. */
2689 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2690 app->bits_per_long, registry);
2691 if (ret < 0) {
2692 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2693 ua_chan->name);
2694 goto error;
2695 }
2696
2697 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2698 if (ret < 0) {
2699 if (ret != -ENOTCONN) {
2700 ERR("Error sending channel to application");
2701 }
2702 goto error;
2703 }
2704
2705 error:
2706 rcu_read_unlock();
2707 return ret;
2708 }
2709
2710 /*
2711 * From an already allocated ust app channel, create the channel buffers if
2712 * need and send it to the application. This MUST be called with a RCU read
2713 * side lock acquired.
2714 *
2715 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2716 * the application exited concurrently.
2717 */
2718 static int do_create_channel(struct ust_app *app,
2719 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2720 struct ust_app_channel *ua_chan)
2721 {
2722 int ret;
2723
2724 assert(app);
2725 assert(usess);
2726 assert(ua_sess);
2727 assert(ua_chan);
2728
2729 /* Handle buffer type before sending the channel to the application. */
2730 switch (usess->buffer_type) {
2731 case LTTNG_BUFFER_PER_UID:
2732 {
2733 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2734 if (ret < 0) {
2735 goto error;
2736 }
2737 break;
2738 }
2739 case LTTNG_BUFFER_PER_PID:
2740 {
2741 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2742 if (ret < 0) {
2743 goto error;
2744 }
2745 break;
2746 }
2747 default:
2748 assert(0);
2749 ret = -EINVAL;
2750 goto error;
2751 }
2752
2753 /* Initialize ust objd object using the received handle and add it. */
2754 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2755 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2756
2757 /* If channel is not enabled, disable it on the tracer */
2758 if (!ua_chan->enabled) {
2759 ret = disable_ust_channel(app, ua_sess, ua_chan);
2760 if (ret < 0) {
2761 goto error;
2762 }
2763 }
2764
2765 error:
2766 return ret;
2767 }
2768
2769 /*
2770 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2771 * newly created channel if not NULL.
2772 *
2773 * Called with UST app session lock and RCU read-side lock held.
2774 *
2775 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2776 * the application exited concurrently.
2777 */
2778 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2779 struct ltt_ust_channel *uchan, struct ust_app *app,
2780 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2781 struct ust_app_channel **ua_chanp)
2782 {
2783 int ret = 0;
2784 struct lttng_ht_iter iter;
2785 struct lttng_ht_node_str *ua_chan_node;
2786 struct ust_app_channel *ua_chan;
2787
2788 /* Lookup channel in the ust app session */
2789 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2790 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2791 if (ua_chan_node != NULL) {
2792 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2793 goto end;
2794 }
2795
2796 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2797 if (ua_chan == NULL) {
2798 /* Only malloc can fail here */
2799 ret = -ENOMEM;
2800 goto error_alloc;
2801 }
2802 shadow_copy_channel(ua_chan, uchan);
2803
2804 /* Set channel type. */
2805 ua_chan->attr.type = type;
2806
2807 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2808 if (ret < 0) {
2809 goto error;
2810 }
2811
2812 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2813 app->pid);
2814
2815 /* Only add the channel if successful on the tracer side. */
2816 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2817
2818 end:
2819 if (ua_chanp) {
2820 *ua_chanp = ua_chan;
2821 }
2822
2823 /* Everything went well. */
2824 return 0;
2825
2826 error:
2827 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2828 error_alloc:
2829 return ret;
2830 }
2831
2832 /*
2833 * Create UST app event and create it on the tracer side.
2834 *
2835 * Called with ust app session mutex held.
2836 */
2837 static
2838 int create_ust_app_event(struct ust_app_session *ua_sess,
2839 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2840 struct ust_app *app)
2841 {
2842 int ret = 0;
2843 struct ust_app_event *ua_event;
2844
2845 /* Get event node */
2846 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2847 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2848 if (ua_event != NULL) {
2849 ret = -EEXIST;
2850 goto end;
2851 }
2852
2853 /* Does not exist so create one */
2854 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2855 if (ua_event == NULL) {
2856 /* Only malloc can failed so something is really wrong */
2857 ret = -ENOMEM;
2858 goto end;
2859 }
2860 shadow_copy_event(ua_event, uevent);
2861
2862 /* Create it on the tracer side */
2863 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2864 if (ret < 0) {
2865 /* Not found previously means that it does not exist on the tracer */
2866 assert(ret != -LTTNG_UST_ERR_EXIST);
2867 goto error;
2868 }
2869
2870 add_unique_ust_app_event(ua_chan, ua_event);
2871
2872 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2873 app->pid);
2874
2875 end:
2876 return ret;
2877
2878 error:
2879 /* Valid. Calling here is already in a read side lock */
2880 delete_ust_app_event(-1, ua_event);
2881 return ret;
2882 }
2883
2884 /*
2885 * Create UST metadata and open it on the tracer side.
2886 *
2887 * Called with UST app session lock held and RCU read side lock.
2888 */
2889 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2890 struct ust_app *app, struct consumer_output *consumer)
2891 {
2892 int ret = 0;
2893 struct ust_app_channel *metadata;
2894 struct consumer_socket *socket;
2895 struct ust_registry_session *registry;
2896
2897 assert(ua_sess);
2898 assert(app);
2899 assert(consumer);
2900
2901 registry = get_session_registry(ua_sess);
2902 assert(registry);
2903
2904 pthread_mutex_lock(&registry->lock);
2905
2906 /* Metadata already exists for this registry or it was closed previously */
2907 if (registry->metadata_key || registry->metadata_closed) {
2908 ret = 0;
2909 goto error;
2910 }
2911
2912 /* Allocate UST metadata */
2913 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2914 if (!metadata) {
2915 /* malloc() failed */
2916 ret = -ENOMEM;
2917 goto error;
2918 }
2919
2920 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
2921
2922 /* Need one fd for the channel. */
2923 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2924 if (ret < 0) {
2925 ERR("Exhausted number of available FD upon create metadata");
2926 goto error;
2927 }
2928
2929 /* Get the right consumer socket for the application. */
2930 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2931 if (!socket) {
2932 ret = -EINVAL;
2933 goto error_consumer;
2934 }
2935
2936 /*
2937 * Keep metadata key so we can identify it on the consumer side. Assign it
2938 * to the registry *before* we ask the consumer so we avoid the race of the
2939 * consumer requesting the metadata and the ask_channel call on our side
2940 * did not returned yet.
2941 */
2942 registry->metadata_key = metadata->key;
2943
2944 /*
2945 * Ask the metadata channel creation to the consumer. The metadata object
2946 * will be created by the consumer and kept their. However, the stream is
2947 * never added or monitored until we do a first push metadata to the
2948 * consumer.
2949 */
2950 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2951 registry);
2952 if (ret < 0) {
2953 /* Nullify the metadata key so we don't try to close it later on. */
2954 registry->metadata_key = 0;
2955 goto error_consumer;
2956 }
2957
2958 /*
2959 * The setup command will make the metadata stream be sent to the relayd,
2960 * if applicable, and the thread managing the metadatas. This is important
2961 * because after this point, if an error occurs, the only way the stream
2962 * can be deleted is to be monitored in the consumer.
2963 */
2964 ret = consumer_setup_metadata(socket, metadata->key);
2965 if (ret < 0) {
2966 /* Nullify the metadata key so we don't try to close it later on. */
2967 registry->metadata_key = 0;
2968 goto error_consumer;
2969 }
2970
2971 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2972 metadata->key, app->pid);
2973
2974 error_consumer:
2975 lttng_fd_put(LTTNG_FD_APPS, 1);
2976 delete_ust_app_channel(-1, metadata, app);
2977 error:
2978 pthread_mutex_unlock(&registry->lock);
2979 return ret;
2980 }
2981
2982 /*
2983 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2984 * acquired before calling this function.
2985 */
2986 struct ust_app *ust_app_find_by_pid(pid_t pid)
2987 {
2988 struct ust_app *app = NULL;
2989 struct lttng_ht_node_ulong *node;
2990 struct lttng_ht_iter iter;
2991
2992 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2993 node = lttng_ht_iter_get_node_ulong(&iter);
2994 if (node == NULL) {
2995 DBG2("UST app no found with pid %d", pid);
2996 goto error;
2997 }
2998
2999 DBG2("Found UST app by pid %d", pid);
3000
3001 app = caa_container_of(node, struct ust_app, pid_n);
3002
3003 error:
3004 return app;
3005 }
3006
3007 /*
3008 * Allocate and init an UST app object using the registration information and
3009 * the command socket. This is called when the command socket connects to the
3010 * session daemon.
3011 *
3012 * The object is returned on success or else NULL.
3013 */
3014 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
3015 {
3016 struct ust_app *lta = NULL;
3017
3018 assert(msg);
3019 assert(sock >= 0);
3020
3021 DBG3("UST app creating application for socket %d", sock);
3022
3023 if ((msg->bits_per_long == 64 &&
3024 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3025 || (msg->bits_per_long == 32 &&
3026 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
3027 ERR("Registration failed: application \"%s\" (pid: %d) has "
3028 "%d-bit long, but no consumerd for this size is available.\n",
3029 msg->name, msg->pid, msg->bits_per_long);
3030 goto error;
3031 }
3032
3033 lta = zmalloc(sizeof(struct ust_app));
3034 if (lta == NULL) {
3035 PERROR("malloc");
3036 goto error;
3037 }
3038
3039 lta->ppid = msg->ppid;
3040 lta->uid = msg->uid;
3041 lta->gid = msg->gid;
3042
3043 lta->bits_per_long = msg->bits_per_long;
3044 lta->uint8_t_alignment = msg->uint8_t_alignment;
3045 lta->uint16_t_alignment = msg->uint16_t_alignment;
3046 lta->uint32_t_alignment = msg->uint32_t_alignment;
3047 lta->uint64_t_alignment = msg->uint64_t_alignment;
3048 lta->long_alignment = msg->long_alignment;
3049 lta->byte_order = msg->byte_order;
3050
3051 lta->v_major = msg->major;
3052 lta->v_minor = msg->minor;
3053 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3054 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3055 lta->notify_sock = -1;
3056
3057 /* Copy name and make sure it's NULL terminated. */
3058 strncpy(lta->name, msg->name, sizeof(lta->name));
3059 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3060
3061 /*
3062 * Before this can be called, when receiving the registration information,
3063 * the application compatibility is checked. So, at this point, the
3064 * application can work with this session daemon.
3065 */
3066 lta->compatible = 1;
3067
3068 lta->pid = msg->pid;
3069 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3070 lta->sock = sock;
3071 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3072
3073 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3074
3075 error:
3076 return lta;
3077 }
3078
3079 /*
3080 * For a given application object, add it to every hash table.
3081 */
3082 void ust_app_add(struct ust_app *app)
3083 {
3084 assert(app);
3085 assert(app->notify_sock >= 0);
3086
3087 rcu_read_lock();
3088
3089 /*
3090 * On a re-registration, we want to kick out the previous registration of
3091 * that pid
3092 */
3093 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3094
3095 /*
3096 * The socket _should_ be unique until _we_ call close. So, a add_unique
3097 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3098 * already in the table.
3099 */
3100 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3101
3102 /* Add application to the notify socket hash table. */
3103 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3104 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3105
3106 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3107 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3108 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3109 app->v_minor);
3110
3111 rcu_read_unlock();
3112 }
3113
3114 /*
3115 * Set the application version into the object.
3116 *
3117 * Return 0 on success else a negative value either an errno code or a
3118 * LTTng-UST error code.
3119 */
3120 int ust_app_version(struct ust_app *app)
3121 {
3122 int ret;
3123
3124 assert(app);
3125
3126 ret = ustctl_tracer_version(app->sock, &app->version);
3127 if (ret < 0) {
3128 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3129 ERR("UST app %d version failed with ret %d", app->sock, ret);
3130 } else {
3131 DBG3("UST app %d version failed. Application is dead", app->sock);
3132 }
3133 }
3134
3135 return ret;
3136 }
3137
3138 /*
3139 * Unregister app by removing it from the global traceable app list and freeing
3140 * the data struct.
3141 *
3142 * The socket is already closed at this point so no close to sock.
3143 */
3144 void ust_app_unregister(int sock)
3145 {
3146 struct ust_app *lta;
3147 struct lttng_ht_node_ulong *node;
3148 struct lttng_ht_iter ust_app_sock_iter;
3149 struct lttng_ht_iter iter;
3150 struct ust_app_session *ua_sess;
3151 int ret;
3152
3153 rcu_read_lock();
3154
3155 /* Get the node reference for a call_rcu */
3156 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3157 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3158 assert(node);
3159
3160 lta = caa_container_of(node, struct ust_app, sock_n);
3161 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3162
3163 /*
3164 * For per-PID buffers, perform "push metadata" and flush all
3165 * application streams before removing app from hash tables,
3166 * ensuring proper behavior of data_pending check.
3167 * Remove sessions so they are not visible during deletion.
3168 */
3169 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3170 node.node) {
3171 struct ust_registry_session *registry;
3172
3173 ret = lttng_ht_del(lta->sessions, &iter);
3174 if (ret) {
3175 /* The session was already removed so scheduled for teardown. */
3176 continue;
3177 }
3178
3179 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3180 (void) ust_app_flush_app_session(lta, ua_sess);
3181 }
3182
3183 /*
3184 * Add session to list for teardown. This is safe since at this point we
3185 * are the only one using this list.
3186 */
3187 pthread_mutex_lock(&ua_sess->lock);
3188
3189 if (ua_sess->deleted) {
3190 pthread_mutex_unlock(&ua_sess->lock);
3191 continue;
3192 }
3193
3194 /*
3195 * Normally, this is done in the delete session process which is
3196 * executed in the call rcu below. However, upon registration we can't
3197 * afford to wait for the grace period before pushing data or else the
3198 * data pending feature can race between the unregistration and stop
3199 * command where the data pending command is sent *before* the grace
3200 * period ended.
3201 *
3202 * The close metadata below nullifies the metadata pointer in the
3203 * session so the delete session will NOT push/close a second time.
3204 */
3205 registry = get_session_registry(ua_sess);
3206 if (registry) {
3207 /* Push metadata for application before freeing the application. */
3208 (void) push_metadata(registry, ua_sess->consumer);
3209
3210 /*
3211 * Don't ask to close metadata for global per UID buffers. Close
3212 * metadata only on destroy trace session in this case. Also, the
3213 * previous push metadata could have flag the metadata registry to
3214 * close so don't send a close command if closed.
3215 */
3216 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3217 /* And ask to close it for this session registry. */
3218 (void) close_metadata(registry, ua_sess->consumer);
3219 }
3220 }
3221 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3222
3223 pthread_mutex_unlock(&ua_sess->lock);
3224 }
3225
3226 /* Remove application from PID hash table */
3227 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3228 assert(!ret);
3229
3230 /*
3231 * Remove application from notify hash table. The thread handling the
3232 * notify socket could have deleted the node so ignore on error because
3233 * either way it's valid. The close of that socket is handled by the other
3234 * thread.
3235 */
3236 iter.iter.node = &lta->notify_sock_n.node;
3237 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3238
3239 /*
3240 * Ignore return value since the node might have been removed before by an
3241 * add replace during app registration because the PID can be reassigned by
3242 * the OS.
3243 */
3244 iter.iter.node = &lta->pid_n.node;
3245 ret = lttng_ht_del(ust_app_ht, &iter);
3246 if (ret) {
3247 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3248 lta->pid);
3249 }
3250
3251 /* Free memory */
3252 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3253
3254 rcu_read_unlock();
3255 return;
3256 }
3257
3258 /*
3259 * Fill events array with all events name of all registered apps.
3260 */
3261 int ust_app_list_events(struct lttng_event **events)
3262 {
3263 int ret, handle;
3264 size_t nbmem, count = 0;
3265 struct lttng_ht_iter iter;
3266 struct ust_app *app;
3267 struct lttng_event *tmp_event;
3268
3269 nbmem = UST_APP_EVENT_LIST_SIZE;
3270 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3271 if (tmp_event == NULL) {
3272 PERROR("zmalloc ust app events");
3273 ret = -ENOMEM;
3274 goto error;
3275 }
3276
3277 rcu_read_lock();
3278
3279 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3280 struct lttng_ust_tracepoint_iter uiter;
3281
3282 health_code_update();
3283
3284 if (!app->compatible) {
3285 /*
3286 * TODO: In time, we should notice the caller of this error by
3287 * telling him that this is a version error.
3288 */
3289 continue;
3290 }
3291 handle = ustctl_tracepoint_list(app->sock);
3292 if (handle < 0) {
3293 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3294 ERR("UST app list events getting handle failed for app pid %d",
3295 app->pid);
3296 }
3297 continue;
3298 }
3299
3300 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3301 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3302 /* Handle ustctl error. */
3303 if (ret < 0) {
3304 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3305 ERR("UST app tp list get failed for app %d with ret %d",
3306 app->sock, ret);
3307 } else {
3308 DBG3("UST app tp list get failed. Application is dead");
3309 /*
3310 * This is normal behavior, an application can die during the
3311 * creation process. Don't report an error so the execution can
3312 * continue normally. Continue normal execution.
3313 */
3314 break;
3315 }
3316 free(tmp_event);
3317 goto rcu_error;
3318 }
3319
3320 health_code_update();
3321 if (count >= nbmem) {
3322 /* In case the realloc fails, we free the memory */
3323 struct lttng_event *new_tmp_event;
3324 size_t new_nbmem;
3325
3326 new_nbmem = nbmem << 1;
3327 DBG2("Reallocating event list from %zu to %zu entries",
3328 nbmem, new_nbmem);
3329 new_tmp_event = realloc(tmp_event,
3330 new_nbmem * sizeof(struct lttng_event));
3331 if (new_tmp_event == NULL) {
3332 PERROR("realloc ust app events");
3333 free(tmp_event);
3334 ret = -ENOMEM;
3335 goto rcu_error;
3336 }
3337 /* Zero the new memory */
3338 memset(new_tmp_event + nbmem, 0,
3339 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3340 nbmem = new_nbmem;
3341 tmp_event = new_tmp_event;
3342 }
3343 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3344 tmp_event[count].loglevel = uiter.loglevel;
3345 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3346 tmp_event[count].pid = app->pid;
3347 tmp_event[count].enabled = -1;
3348 count++;
3349 }
3350 }
3351
3352 ret = count;
3353 *events = tmp_event;
3354
3355 DBG2("UST app list events done (%zu events)", count);
3356
3357 rcu_error:
3358 rcu_read_unlock();
3359 error:
3360 health_code_update();
3361 return ret;
3362 }
3363
3364 /*
3365 * Fill events array with all events name of all registered apps.
3366 */
3367 int ust_app_list_event_fields(struct lttng_event_field **fields)
3368 {
3369 int ret, handle;
3370 size_t nbmem, count = 0;
3371 struct lttng_ht_iter iter;
3372 struct ust_app *app;
3373 struct lttng_event_field *tmp_event;
3374
3375 nbmem = UST_APP_EVENT_LIST_SIZE;
3376 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3377 if (tmp_event == NULL) {
3378 PERROR("zmalloc ust app event fields");
3379 ret = -ENOMEM;
3380 goto error;
3381 }
3382
3383 rcu_read_lock();
3384
3385 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3386 struct lttng_ust_field_iter uiter;
3387
3388 health_code_update();
3389
3390 if (!app->compatible) {
3391 /*
3392 * TODO: In time, we should notice the caller of this error by
3393 * telling him that this is a version error.
3394 */
3395 continue;
3396 }
3397 handle = ustctl_tracepoint_field_list(app->sock);
3398 if (handle < 0) {
3399 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3400 ERR("UST app list field getting handle failed for app pid %d",
3401 app->pid);
3402 }
3403 continue;
3404 }
3405
3406 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3407 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3408 /* Handle ustctl error. */
3409 if (ret < 0) {
3410 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3411 ERR("UST app tp list field failed for app %d with ret %d",
3412 app->sock, ret);
3413 } else {
3414 DBG3("UST app tp list field failed. Application is dead");
3415 /*
3416 * This is normal behavior, an application can die during the
3417 * creation process. Don't report an error so the execution can
3418 * continue normally. Reset list and count for next app.
3419 */
3420 break;
3421 }
3422 free(tmp_event);
3423 goto rcu_error;
3424 }
3425
3426 health_code_update();
3427 if (count >= nbmem) {
3428 /* In case the realloc fails, we free the memory */
3429 struct lttng_event_field *new_tmp_event;
3430 size_t new_nbmem;
3431
3432 new_nbmem = nbmem << 1;
3433 DBG2("Reallocating event field list from %zu to %zu entries",
3434 nbmem, new_nbmem);
3435 new_tmp_event = realloc(tmp_event,
3436 new_nbmem * sizeof(struct lttng_event_field));
3437 if (new_tmp_event == NULL) {
3438 PERROR("realloc ust app event fields");
3439 free(tmp_event);
3440 ret = -ENOMEM;
3441 goto rcu_error;
3442 }
3443 /* Zero the new memory */
3444 memset(new_tmp_event + nbmem, 0,
3445 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3446 nbmem = new_nbmem;
3447 tmp_event = new_tmp_event;
3448 }
3449
3450 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3451 /* Mapping between these enums matches 1 to 1. */
3452 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3453 tmp_event[count].nowrite = uiter.nowrite;
3454
3455 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3456 tmp_event[count].event.loglevel = uiter.loglevel;
3457 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3458 tmp_event[count].event.pid = app->pid;
3459 tmp_event[count].event.enabled = -1;
3460 count++;
3461 }
3462 }
3463
3464 ret = count;
3465 *fields = tmp_event;
3466
3467 DBG2("UST app list event fields done (%zu events)", count);
3468
3469 rcu_error:
3470 rcu_read_unlock();
3471 error:
3472 health_code_update();
3473 return ret;
3474 }
3475
3476 /*
3477 * Free and clean all traceable apps of the global list.
3478 *
3479 * Should _NOT_ be called with RCU read-side lock held.
3480 */
3481 void ust_app_clean_list(void)
3482 {
3483 int ret;
3484 struct ust_app *app;
3485 struct lttng_ht_iter iter;
3486
3487 DBG2("UST app cleaning registered apps hash table");
3488
3489 rcu_read_lock();
3490
3491 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3492 ret = lttng_ht_del(ust_app_ht, &iter);
3493 assert(!ret);
3494 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3495 }
3496
3497 /* Cleanup socket hash table */
3498 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3499 sock_n.node) {
3500 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3501 assert(!ret);
3502 }
3503
3504 /* Cleanup notify socket hash table */
3505 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3506 notify_sock_n.node) {
3507 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3508 assert(!ret);
3509 }
3510 rcu_read_unlock();
3511
3512 /* Destroy is done only when the ht is empty */
3513 ht_cleanup_push(ust_app_ht);
3514 ht_cleanup_push(ust_app_ht_by_sock);
3515 ht_cleanup_push(ust_app_ht_by_notify_sock);
3516 }
3517
3518 /*
3519 * Init UST app hash table.
3520 */
3521 void ust_app_ht_alloc(void)
3522 {
3523 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3524 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3525 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3526 }
3527
3528 /*
3529 * For a specific UST session, disable the channel for all registered apps.
3530 */
3531 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3532 struct ltt_ust_channel *uchan)
3533 {
3534 int ret = 0;
3535 struct lttng_ht_iter iter;
3536 struct lttng_ht_node_str *ua_chan_node;
3537 struct ust_app *app;
3538 struct ust_app_session *ua_sess;
3539 struct ust_app_channel *ua_chan;
3540
3541 if (usess == NULL || uchan == NULL) {
3542 ERR("Disabling UST global channel with NULL values");
3543 ret = -1;
3544 goto error;
3545 }
3546
3547 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3548 uchan->name, usess->id);
3549
3550 rcu_read_lock();
3551
3552 /* For every registered applications */
3553 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3554 struct lttng_ht_iter uiter;
3555 if (!app->compatible) {
3556 /*
3557 * TODO: In time, we should notice the caller of this error by
3558 * telling him that this is a version error.
3559 */
3560 continue;
3561 }
3562 ua_sess = lookup_session_by_app(usess, app);
3563 if (ua_sess == NULL) {
3564 continue;
3565 }
3566
3567 /* Get channel */
3568 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3569 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3570 /* If the session if found for the app, the channel must be there */
3571 assert(ua_chan_node);
3572
3573 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3574 /* The channel must not be already disabled */
3575 assert(ua_chan->enabled == 1);
3576
3577 /* Disable channel onto application */
3578 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3579 if (ret < 0) {
3580 /* XXX: We might want to report this error at some point... */
3581 continue;
3582 }
3583 }
3584
3585 rcu_read_unlock();
3586
3587 error:
3588 return ret;
3589 }
3590
3591 /*
3592 * For a specific UST session, enable the channel for all registered apps.
3593 */
3594 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3595 struct ltt_ust_channel *uchan)
3596 {
3597 int ret = 0;
3598 struct lttng_ht_iter iter;
3599 struct ust_app *app;
3600 struct ust_app_session *ua_sess;
3601
3602 if (usess == NULL || uchan == NULL) {
3603 ERR("Adding UST global channel to NULL values");
3604 ret = -1;
3605 goto error;
3606 }
3607
3608 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3609 uchan->name, usess->id);
3610
3611 rcu_read_lock();
3612
3613 /* For every registered applications */
3614 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3615 if (!app->compatible) {
3616 /*
3617 * TODO: In time, we should notice the caller of this error by
3618 * telling him that this is a version error.
3619 */
3620 continue;
3621 }
3622 ua_sess = lookup_session_by_app(usess, app);
3623 if (ua_sess == NULL) {
3624 continue;
3625 }
3626
3627 /* Enable channel onto application */
3628 ret = enable_ust_app_channel(ua_sess, uchan, app);
3629 if (ret < 0) {
3630 /* XXX: We might want to report this error at some point... */
3631 continue;
3632 }
3633 }
3634
3635 rcu_read_unlock();
3636
3637 error:
3638 return ret;
3639 }
3640
3641 /*
3642 * Disable an event in a channel and for a specific session.
3643 */
3644 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3645 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3646 {
3647 int ret = 0;
3648 struct lttng_ht_iter iter, uiter;
3649 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3650 struct ust_app *app;
3651 struct ust_app_session *ua_sess;
3652 struct ust_app_channel *ua_chan;
3653 struct ust_app_event *ua_event;
3654
3655 DBG("UST app disabling event %s for all apps in channel "
3656 "%s for session id %" PRIu64,
3657 uevent->attr.name, uchan->name, usess->id);
3658
3659 rcu_read_lock();
3660
3661 /* For all registered applications */
3662 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3663 if (!app->compatible) {
3664 /*
3665 * TODO: In time, we should notice the caller of this error by
3666 * telling him that this is a version error.
3667 */
3668 continue;
3669 }
3670 ua_sess = lookup_session_by_app(usess, app);
3671 if (ua_sess == NULL) {
3672 /* Next app */
3673 continue;
3674 }
3675
3676 /* Lookup channel in the ust app session */
3677 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3678 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3679 if (ua_chan_node == NULL) {
3680 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3681 "Skipping", uchan->name, usess->id, app->pid);
3682 continue;
3683 }
3684 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3685
3686 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3687 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3688 if (ua_event_node == NULL) {
3689 DBG2("Event %s not found in channel %s for app pid %d."
3690 "Skipping", uevent->attr.name, uchan->name, app->pid);
3691 continue;
3692 }
3693 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3694
3695 ret = disable_ust_app_event(ua_sess, ua_event, app);
3696 if (ret < 0) {
3697 /* XXX: Report error someday... */
3698 continue;
3699 }
3700 }
3701
3702 rcu_read_unlock();
3703
3704 return ret;
3705 }
3706
3707 /*
3708 * For a specific UST session, create the channel for all registered apps.
3709 */
3710 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3711 struct ltt_ust_channel *uchan)
3712 {
3713 int ret = 0, created;
3714 struct lttng_ht_iter iter;
3715 struct ust_app *app;
3716 struct ust_app_session *ua_sess = NULL;
3717
3718 /* Very wrong code flow */
3719 assert(usess);
3720 assert(uchan);
3721
3722 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3723 uchan->name, usess->id);
3724
3725 rcu_read_lock();
3726
3727 /* For every registered applications */
3728 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3729 if (!app->compatible) {
3730 /*
3731 * TODO: In time, we should notice the caller of this error by
3732 * telling him that this is a version error.
3733 */
3734 continue;
3735 }
3736 /*
3737 * Create session on the tracer side and add it to app session HT. Note
3738 * that if session exist, it will simply return a pointer to the ust
3739 * app session.
3740 */
3741 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3742 if (ret < 0) {
3743 switch (ret) {
3744 case -ENOTCONN:
3745 /*
3746 * The application's socket is not valid. Either a bad socket
3747 * or a timeout on it. We can't inform the caller that for a
3748 * specific app, the session failed so lets continue here.
3749 */
3750 ret = 0; /* Not an error. */
3751 continue;
3752 case -ENOMEM:
3753 default:
3754 goto error_rcu_unlock;
3755 }
3756 }
3757 assert(ua_sess);
3758
3759 pthread_mutex_lock(&ua_sess->lock);
3760
3761 if (ua_sess->deleted) {
3762 pthread_mutex_unlock(&ua_sess->lock);
3763 continue;
3764 }
3765
3766 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3767 sizeof(uchan->name))) {
3768 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3769 ret = 0;
3770 } else {
3771 /* Create channel onto application. We don't need the chan ref. */
3772 ret = create_ust_app_channel(ua_sess, uchan, app,
3773 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3774 }
3775 pthread_mutex_unlock(&ua_sess->lock);
3776 if (ret < 0) {
3777 /* Cleanup the created session if it's the case. */
3778 if (created) {
3779 destroy_app_session(app, ua_sess);
3780 }
3781 switch (ret) {
3782 case -ENOTCONN:
3783 /*
3784 * The application's socket is not valid. Either a bad socket
3785 * or a timeout on it. We can't inform the caller that for a
3786 * specific app, the session failed so lets continue here.
3787 */
3788 ret = 0; /* Not an error. */
3789 continue;
3790 case -ENOMEM:
3791 default:
3792 goto error_rcu_unlock;
3793 }
3794 }
3795 }
3796
3797 error_rcu_unlock:
3798 rcu_read_unlock();
3799 return ret;
3800 }
3801
3802 /*
3803 * Enable event for a specific session and channel on the tracer.
3804 */
3805 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3806 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3807 {
3808 int ret = 0;
3809 struct lttng_ht_iter iter, uiter;
3810 struct lttng_ht_node_str *ua_chan_node;
3811 struct ust_app *app;
3812 struct ust_app_session *ua_sess;
3813 struct ust_app_channel *ua_chan;
3814 struct ust_app_event *ua_event;
3815
3816 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3817 uevent->attr.name, usess->id);
3818
3819 /*
3820 * NOTE: At this point, this function is called only if the session and
3821 * channel passed are already created for all apps. and enabled on the
3822 * tracer also.
3823 */
3824
3825 rcu_read_lock();
3826
3827 /* For all registered applications */
3828 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3829 if (!app->compatible) {
3830 /*
3831 * TODO: In time, we should notice the caller of this error by
3832 * telling him that this is a version error.
3833 */
3834 continue;
3835 }
3836 ua_sess = lookup_session_by_app(usess, app);
3837 if (!ua_sess) {
3838 /* The application has problem or is probably dead. */
3839 continue;
3840 }
3841
3842 pthread_mutex_lock(&ua_sess->lock);
3843
3844 if (ua_sess->deleted) {
3845 pthread_mutex_unlock(&ua_sess->lock);
3846 continue;
3847 }
3848
3849 /* Lookup channel in the ust app session */
3850 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3851 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3852 /*
3853 * It is possible that the channel cannot be found is
3854 * the channel/event creation occurs concurrently with
3855 * an application exit.
3856 */
3857 if (!ua_chan_node) {
3858 pthread_mutex_unlock(&ua_sess->lock);
3859 continue;
3860 }
3861
3862 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3863
3864 /* Get event node */
3865 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3866 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3867 if (ua_event == NULL) {
3868 DBG3("UST app enable event %s not found for app PID %d."
3869 "Skipping app", uevent->attr.name, app->pid);
3870 goto next_app;
3871 }
3872
3873 ret = enable_ust_app_event(ua_sess, ua_event, app);
3874 if (ret < 0) {
3875 pthread_mutex_unlock(&ua_sess->lock);
3876 goto error;
3877 }
3878 next_app:
3879 pthread_mutex_unlock(&ua_sess->lock);
3880 }
3881
3882 error:
3883 rcu_read_unlock();
3884 return ret;
3885 }
3886
3887 /*
3888 * For a specific existing UST session and UST channel, creates the event for
3889 * all registered apps.
3890 */
3891 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3892 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3893 {
3894 int ret = 0;
3895 struct lttng_ht_iter iter, uiter;
3896 struct lttng_ht_node_str *ua_chan_node;
3897 struct ust_app *app;
3898 struct ust_app_session *ua_sess;
3899 struct ust_app_channel *ua_chan;
3900
3901 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3902 uevent->attr.name, usess->id);
3903
3904 rcu_read_lock();
3905
3906 /* For all registered applications */
3907 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3908 if (!app->compatible) {
3909 /*
3910 * TODO: In time, we should notice the caller of this error by
3911 * telling him that this is a version error.
3912 */
3913 continue;
3914 }
3915 ua_sess = lookup_session_by_app(usess, app);
3916 if (!ua_sess) {
3917 /* The application has problem or is probably dead. */
3918 continue;
3919 }
3920
3921 pthread_mutex_lock(&ua_sess->lock);
3922
3923 if (ua_sess->deleted) {
3924 pthread_mutex_unlock(&ua_sess->lock);
3925 continue;
3926 }
3927
3928 /* Lookup channel in the ust app session */
3929 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3930 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3931 /* If the channel is not found, there is a code flow error */
3932 assert(ua_chan_node);
3933
3934 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3935
3936 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3937 pthread_mutex_unlock(&ua_sess->lock);
3938 if (ret < 0) {
3939 if (ret != -LTTNG_UST_ERR_EXIST) {
3940 /* Possible value at this point: -ENOMEM. If so, we stop! */
3941 break;
3942 }
3943 DBG2("UST app event %s already exist on app PID %d",
3944 uevent->attr.name, app->pid);
3945 continue;
3946 }
3947 }
3948
3949 rcu_read_unlock();
3950
3951 return ret;
3952 }
3953
3954 /*
3955 * Start tracing for a specific UST session and app.
3956 */
3957 static
3958 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3959 {
3960 int ret = 0;
3961 struct ust_app_session *ua_sess;
3962
3963 DBG("Starting tracing for ust app pid %d", app->pid);
3964
3965 rcu_read_lock();
3966
3967 if (!app->compatible) {
3968 goto end;
3969 }
3970
3971 ua_sess = lookup_session_by_app(usess, app);
3972 if (ua_sess == NULL) {
3973 /* The session is in teardown process. Ignore and continue. */
3974 goto end;
3975 }
3976
3977 pthread_mutex_lock(&ua_sess->lock);
3978
3979 if (ua_sess->deleted) {
3980 pthread_mutex_unlock(&ua_sess->lock);
3981 goto end;
3982 }
3983
3984 /* Upon restart, we skip the setup, already done */
3985 if (ua_sess->started) {
3986 goto skip_setup;
3987 }
3988
3989 /* Create directories if consumer is LOCAL and has a path defined. */
3990 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3991 strlen(usess->consumer->dst.trace_path) > 0) {
3992 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3993 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3994 if (ret < 0) {
3995 if (errno != EEXIST) {
3996 ERR("Trace directory creation error");
3997 goto error_unlock;
3998 }
3999 }
4000 }
4001
4002 /*
4003 * Create the metadata for the application. This returns gracefully if a
4004 * metadata was already set for the session.
4005 */
4006 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
4007 if (ret < 0) {
4008 goto error_unlock;
4009 }
4010
4011 health_code_update();
4012
4013 skip_setup:
4014 /* This start the UST tracing */
4015 ret = ustctl_start_session(app->sock, ua_sess->handle);
4016 if (ret < 0) {
4017 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4018 ERR("Error starting tracing for app pid: %d (ret: %d)",
4019 app->pid, ret);
4020 } else {
4021 DBG("UST app start session failed. Application is dead.");
4022 /*
4023 * This is normal behavior, an application can die during the
4024 * creation process. Don't report an error so the execution can
4025 * continue normally.
4026 */
4027 pthread_mutex_unlock(&ua_sess->lock);
4028 goto end;
4029 }
4030 goto error_unlock;
4031 }
4032
4033 /* Indicate that the session has been started once */
4034 ua_sess->started = 1;
4035
4036 pthread_mutex_unlock(&ua_sess->lock);
4037
4038 health_code_update();
4039
4040 /* Quiescent wait after starting trace */
4041 ret = ustctl_wait_quiescent(app->sock);
4042 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4043 ERR("UST app wait quiescent failed for app pid %d ret %d",
4044 app->pid, ret);
4045 }
4046
4047 end:
4048 rcu_read_unlock();
4049 health_code_update();
4050 return 0;
4051
4052 error_unlock:
4053 pthread_mutex_unlock(&ua_sess->lock);
4054 rcu_read_unlock();
4055 health_code_update();
4056 return -1;
4057 }
4058
4059 /*
4060 * Stop tracing for a specific UST session and app.
4061 */
4062 static
4063 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4064 {
4065 int ret = 0;
4066 struct ust_app_session *ua_sess;
4067 struct ust_registry_session *registry;
4068
4069 DBG("Stopping tracing for ust app pid %d", app->pid);
4070
4071 rcu_read_lock();
4072
4073 if (!app->compatible) {
4074 goto end_no_session;
4075 }
4076
4077 ua_sess = lookup_session_by_app(usess, app);
4078 if (ua_sess == NULL) {
4079 goto end_no_session;
4080 }
4081
4082 pthread_mutex_lock(&ua_sess->lock);
4083
4084 if (ua_sess->deleted) {
4085 pthread_mutex_unlock(&ua_sess->lock);
4086 goto end_no_session;
4087 }
4088
4089 /*
4090 * If started = 0, it means that stop trace has been called for a session
4091 * that was never started. It's possible since we can have a fail start
4092 * from either the application manager thread or the command thread. Simply
4093 * indicate that this is a stop error.
4094 */
4095 if (!ua_sess->started) {
4096 goto error_rcu_unlock;
4097 }
4098
4099 health_code_update();
4100
4101 /* This inhibits UST tracing */
4102 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4103 if (ret < 0) {
4104 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4105 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4106 app->pid, ret);
4107 } else {
4108 DBG("UST app stop session failed. Application is dead.");
4109 /*
4110 * This is normal behavior, an application can die during the
4111 * creation process. Don't report an error so the execution can
4112 * continue normally.
4113 */
4114 goto end_unlock;
4115 }
4116 goto error_rcu_unlock;
4117 }
4118
4119 health_code_update();
4120
4121 /* Quiescent wait after stopping trace */
4122 ret = ustctl_wait_quiescent(app->sock);
4123 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4124 ERR("UST app wait quiescent failed for app pid %d ret %d",
4125 app->pid, ret);
4126 }
4127
4128 health_code_update();
4129
4130 registry = get_session_registry(ua_sess);
4131 assert(registry);
4132
4133 /* Push metadata for application before freeing the application. */
4134 (void) push_metadata(registry, ua_sess->consumer);
4135
4136 end_unlock:
4137 pthread_mutex_unlock(&ua_sess->lock);
4138 end_no_session:
4139 rcu_read_unlock();
4140 health_code_update();
4141 return 0;
4142
4143 error_rcu_unlock:
4144 pthread_mutex_unlock(&ua_sess->lock);
4145 rcu_read_unlock();
4146 health_code_update();
4147 return -1;
4148 }
4149
4150 static
4151 int ust_app_flush_app_session(struct ust_app *app,
4152 struct ust_app_session *ua_sess)
4153 {
4154 int ret, retval = 0;
4155 struct lttng_ht_iter iter;
4156 struct ust_app_channel *ua_chan;
4157 struct consumer_socket *socket;
4158
4159 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4160
4161 rcu_read_lock();
4162
4163 if (!app->compatible) {
4164 goto end_not_compatible;
4165 }
4166
4167 pthread_mutex_lock(&ua_sess->lock);
4168
4169 if (ua_sess->deleted) {
4170 goto end_deleted;
4171 }
4172
4173 health_code_update();
4174
4175 /* Flushing buffers */
4176 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4177 ua_sess->consumer);
4178
4179 /* Flush buffers and push metadata. */
4180 switch (ua_sess->buffer_type) {
4181 case LTTNG_BUFFER_PER_PID:
4182 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4183 node.node) {
4184 health_code_update();
4185 assert(ua_chan->is_sent);
4186 ret = consumer_flush_channel(socket, ua_chan->key);
4187 if (ret) {
4188 ERR("Error flushing consumer channel");
4189 retval = -1;
4190 continue;
4191 }
4192 }
4193 break;
4194 case LTTNG_BUFFER_PER_UID:
4195 default:
4196 assert(0);
4197 break;
4198 }
4199
4200 health_code_update();
4201
4202 end_deleted:
4203 pthread_mutex_unlock(&ua_sess->lock);
4204
4205 end_not_compatible:
4206 rcu_read_unlock();
4207 health_code_update();
4208 return retval;
4209 }
4210
4211 /*
4212 * Flush buffers for all applications for a specific UST session.
4213 * Called with UST session lock held.
4214 */
4215 static
4216 int ust_app_flush_session(struct ltt_ust_session *usess)
4217
4218 {
4219 int ret = 0;
4220
4221 DBG("Flushing session buffers for all ust apps");
4222
4223 rcu_read_lock();
4224
4225 /* Flush buffers and push metadata. */
4226 switch (usess->buffer_type) {
4227 case LTTNG_BUFFER_PER_UID:
4228 {
4229 struct buffer_reg_uid *reg;
4230 struct lttng_ht_iter iter;
4231
4232 /* Flush all per UID buffers associated to that session. */
4233 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4234 struct ust_registry_session *ust_session_reg;
4235 struct buffer_reg_channel *reg_chan;
4236 struct consumer_socket *socket;
4237
4238 /* Get consumer socket to use to push the metadata.*/
4239 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4240 usess->consumer);
4241 if (!socket) {
4242 /* Ignore request if no consumer is found for the session. */
4243 continue;
4244 }
4245
4246 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4247 reg_chan, node.node) {
4248 /*
4249 * The following call will print error values so the return
4250 * code is of little importance because whatever happens, we
4251 * have to try them all.
4252 */
4253 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4254 }
4255
4256 ust_session_reg = reg->registry->reg.ust;
4257 /* Push metadata. */
4258 (void) push_metadata(ust_session_reg, usess->consumer);
4259 }
4260 break;
4261 }
4262 case LTTNG_BUFFER_PER_PID:
4263 {
4264 struct ust_app_session *ua_sess;
4265 struct lttng_ht_iter iter;
4266 struct ust_app *app;
4267
4268 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4269 ua_sess = lookup_session_by_app(usess, app);
4270 if (ua_sess == NULL) {
4271 continue;
4272 }
4273 (void) ust_app_flush_app_session(app, ua_sess);
4274 }
4275 break;
4276 }
4277 default:
4278 ret = -1;
4279 assert(0);
4280 break;
4281 }
4282
4283 rcu_read_unlock();
4284 health_code_update();
4285 return ret;
4286 }
4287
4288 /*
4289 * Destroy a specific UST session in apps.
4290 */
4291 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4292 {
4293 int ret;
4294 struct ust_app_session *ua_sess;
4295 struct lttng_ht_iter iter;
4296 struct lttng_ht_node_u64 *node;
4297
4298 DBG("Destroy tracing for ust app pid %d", app->pid);
4299
4300 rcu_read_lock();
4301
4302 if (!app->compatible) {
4303 goto end;
4304 }
4305
4306 __lookup_session_by_app(usess, app, &iter);
4307 node = lttng_ht_iter_get_node_u64(&iter);
4308 if (node == NULL) {
4309 /* Session is being or is deleted. */
4310 goto end;
4311 }
4312 ua_sess = caa_container_of(node, struct ust_app_session, node);
4313
4314 health_code_update();
4315 destroy_app_session(app, ua_sess);
4316
4317 health_code_update();
4318
4319 /* Quiescent wait after stopping trace */
4320 ret = ustctl_wait_quiescent(app->sock);
4321 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4322 ERR("UST app wait quiescent failed for app pid %d ret %d",
4323 app->pid, ret);
4324 }
4325 end:
4326 rcu_read_unlock();
4327 health_code_update();
4328 return 0;
4329 }
4330
4331 /*
4332 * Start tracing for the UST session.
4333 */
4334 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4335 {
4336 int ret = 0;
4337 struct lttng_ht_iter iter;
4338 struct ust_app *app;
4339
4340 DBG("Starting all UST traces");
4341
4342 rcu_read_lock();
4343
4344 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4345 ret = ust_app_start_trace(usess, app);
4346 if (ret < 0) {
4347 /* Continue to next apps even on error */
4348 continue;
4349 }
4350 }
4351
4352 rcu_read_unlock();
4353
4354 return 0;
4355 }
4356
4357 /*
4358 * Start tracing for the UST session.
4359 * Called with UST session lock held.
4360 */
4361 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4362 {
4363 int ret = 0;
4364 struct lttng_ht_iter iter;
4365 struct ust_app *app;
4366
4367 DBG("Stopping all UST traces");
4368
4369 rcu_read_lock();
4370
4371 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4372 ret = ust_app_stop_trace(usess, app);
4373 if (ret < 0) {
4374 /* Continue to next apps even on error */
4375 continue;
4376 }
4377 }
4378
4379 (void) ust_app_flush_session(usess);
4380
4381 rcu_read_unlock();
4382
4383 return 0;
4384 }
4385
4386 /*
4387 * Destroy app UST session.
4388 */
4389 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4390 {
4391 int ret = 0;
4392 struct lttng_ht_iter iter;
4393 struct ust_app *app;
4394
4395 DBG("Destroy all UST traces");
4396
4397 rcu_read_lock();
4398
4399 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4400 ret = destroy_trace(usess, app);
4401 if (ret < 0) {
4402 /* Continue to next apps even on error */
4403 continue;
4404 }
4405 }
4406
4407 rcu_read_unlock();
4408
4409 return 0;
4410 }
4411
4412 /*
4413 * Add channels/events from UST global domain to registered apps at sock.
4414 */
4415 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4416 {
4417 int ret = 0;
4418 struct lttng_ht_iter iter, uiter;
4419 struct ust_app *app;
4420 struct ust_app_session *ua_sess = NULL;
4421 struct ust_app_channel *ua_chan;
4422 struct ust_app_event *ua_event;
4423 struct ust_app_ctx *ua_ctx;
4424
4425 assert(usess);
4426 assert(sock >= 0);
4427
4428 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4429 usess->id);
4430
4431 rcu_read_lock();
4432
4433 app = ust_app_find_by_sock(sock);
4434 if (app == NULL) {
4435 /*
4436 * Application can be unregistered before so this is possible hence
4437 * simply stopping the update.
4438 */
4439 DBG3("UST app update failed to find app sock %d", sock);
4440 goto error;
4441 }
4442
4443 if (!app->compatible) {
4444 goto error;
4445 }
4446
4447 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4448 if (ret < 0) {
4449 /* Tracer is probably gone or ENOMEM. */
4450 goto error;
4451 }
4452 assert(ua_sess);
4453
4454 pthread_mutex_lock(&ua_sess->lock);
4455
4456 if (ua_sess->deleted) {
4457 pthread_mutex_unlock(&ua_sess->lock);
4458 goto error;
4459 }
4460
4461 /*
4462 * We can iterate safely here over all UST app session since the create ust
4463 * app session above made a shadow copy of the UST global domain from the
4464 * ltt ust session.
4465 */
4466 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4467 node.node) {
4468 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4469 if (ret < 0 && ret != -ENOTCONN) {
4470 /*
4471 * Stop everything. On error, the application
4472 * failed, no more file descriptor are available
4473 * or ENOMEM so stopping here is the only thing
4474 * we can do for now. The only exception is
4475 * -ENOTCONN, which indicates that the application
4476 * has exit.
4477 */
4478 goto error_unlock;
4479 }
4480
4481 /*
4482 * Add context using the list so they are enabled in the same order the
4483 * user added them.
4484 */
4485 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4486 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4487 if (ret < 0) {
4488 goto error_unlock;
4489 }
4490 }
4491
4492
4493 /* For each events */
4494 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4495 node.node) {
4496 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4497 if (ret < 0) {
4498 goto error_unlock;
4499 }
4500 }
4501 }
4502
4503 pthread_mutex_unlock(&ua_sess->lock);
4504
4505 if (usess->active) {
4506 ret = ust_app_start_trace(usess, app);
4507 if (ret < 0) {
4508 goto error;
4509 }
4510
4511 DBG2("UST trace started for app pid %d", app->pid);
4512 }
4513
4514 /* Everything went well at this point. */
4515 rcu_read_unlock();
4516 return;
4517
4518 error_unlock:
4519 pthread_mutex_unlock(&ua_sess->lock);
4520 error:
4521 if (ua_sess) {
4522 destroy_app_session(app, ua_sess);
4523 }
4524 rcu_read_unlock();
4525 return;
4526 }
4527
4528 /*
4529 * Add context to a specific channel for global UST domain.
4530 */
4531 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4532 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4533 {
4534 int ret = 0;
4535 struct lttng_ht_node_str *ua_chan_node;
4536 struct lttng_ht_iter iter, uiter;
4537 struct ust_app_channel *ua_chan = NULL;
4538 struct ust_app_session *ua_sess;
4539 struct ust_app *app;
4540
4541 rcu_read_lock();
4542
4543 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4544 if (!app->compatible) {
4545 /*
4546 * TODO: In time, we should notice the caller of this error by
4547 * telling him that this is a version error.
4548 */
4549 continue;
4550 }
4551 ua_sess = lookup_session_by_app(usess, app);
4552 if (ua_sess == NULL) {
4553 continue;
4554 }
4555
4556 pthread_mutex_lock(&ua_sess->lock);
4557
4558 if (ua_sess->deleted) {
4559 pthread_mutex_unlock(&ua_sess->lock);
4560 continue;
4561 }
4562
4563 /* Lookup channel in the ust app session */
4564 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4565 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4566 if (ua_chan_node == NULL) {
4567 goto next_app;
4568 }
4569 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4570 node);
4571 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4572 if (ret < 0) {
4573 goto next_app;
4574 }
4575 next_app:
4576 pthread_mutex_unlock(&ua_sess->lock);
4577 }
4578
4579 rcu_read_unlock();
4580 return ret;
4581 }
4582
4583 /*
4584 * Enable event for a channel from a UST session for a specific PID.
4585 */
4586 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4587 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4588 {
4589 int ret = 0;
4590 struct lttng_ht_iter iter;
4591 struct lttng_ht_node_str *ua_chan_node;
4592 struct ust_app *app;
4593 struct ust_app_session *ua_sess;
4594 struct ust_app_channel *ua_chan;
4595 struct ust_app_event *ua_event;
4596
4597 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4598
4599 rcu_read_lock();
4600
4601 app = ust_app_find_by_pid(pid);
4602 if (app == NULL) {
4603 ERR("UST app enable event per PID %d not found", pid);
4604 ret = -1;
4605 goto end;
4606 }
4607
4608 if (!app->compatible) {
4609 ret = 0;
4610 goto end;
4611 }
4612
4613 ua_sess = lookup_session_by_app(usess, app);
4614 if (!ua_sess) {
4615 /* The application has problem or is probably dead. */
4616 ret = 0;
4617 goto end;
4618 }
4619
4620 pthread_mutex_lock(&ua_sess->lock);
4621
4622 if (ua_sess->deleted) {
4623 ret = 0;
4624 goto end_unlock;
4625 }
4626
4627 /* Lookup channel in the ust app session */
4628 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4629 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4630 /* If the channel is not found, there is a code flow error */
4631 assert(ua_chan_node);
4632
4633 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4634
4635 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4636 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4637 if (ua_event == NULL) {
4638 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4639 if (ret < 0) {
4640 goto end_unlock;
4641 }
4642 } else {
4643 ret = enable_ust_app_event(ua_sess, ua_event, app);
4644 if (ret < 0) {
4645 goto end_unlock;
4646 }
4647 }
4648
4649 end_unlock:
4650 pthread_mutex_unlock(&ua_sess->lock);
4651 end:
4652 rcu_read_unlock();
4653 return ret;
4654 }
4655
4656 /*
4657 * Calibrate registered applications.
4658 */
4659 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4660 {
4661 int ret = 0;
4662 struct lttng_ht_iter iter;
4663 struct ust_app *app;
4664
4665 rcu_read_lock();
4666
4667 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4668 if (!app->compatible) {
4669 /*
4670 * TODO: In time, we should notice the caller of this error by
4671 * telling him that this is a version error.
4672 */
4673 continue;
4674 }
4675
4676 health_code_update();
4677
4678 ret = ustctl_calibrate(app->sock, calibrate);
4679 if (ret < 0) {
4680 switch (ret) {
4681 case -ENOSYS:
4682 /* Means that it's not implemented on the tracer side. */
4683 ret = 0;
4684 break;
4685 default:
4686 DBG2("Calibrate app PID %d returned with error %d",
4687 app->pid, ret);
4688 break;
4689 }
4690 }
4691 }
4692
4693 DBG("UST app global domain calibration finished");
4694
4695 rcu_read_unlock();
4696
4697 health_code_update();
4698
4699 return ret;
4700 }
4701
4702 /*
4703 * Receive registration and populate the given msg structure.
4704 *
4705 * On success return 0 else a negative value returned by the ustctl call.
4706 */
4707 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4708 {
4709 int ret;
4710 uint32_t pid, ppid, uid, gid;
4711
4712 assert(msg);
4713
4714 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4715 &pid, &ppid, &uid, &gid,
4716 &msg->bits_per_long,
4717 &msg->uint8_t_alignment,
4718 &msg->uint16_t_alignment,
4719 &msg->uint32_t_alignment,
4720 &msg->uint64_t_alignment,
4721 &msg->long_alignment,
4722 &msg->byte_order,
4723 msg->name);
4724 if (ret < 0) {
4725 switch (-ret) {
4726 case EPIPE:
4727 case ECONNRESET:
4728 case LTTNG_UST_ERR_EXITING:
4729 DBG3("UST app recv reg message failed. Application died");
4730 break;
4731 case LTTNG_UST_ERR_UNSUP_MAJOR:
4732 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4733 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4734 LTTNG_UST_ABI_MINOR_VERSION);
4735 break;
4736 default:
4737 ERR("UST app recv reg message failed with ret %d", ret);
4738 break;
4739 }
4740 goto error;
4741 }
4742 msg->pid = (pid_t) pid;
4743 msg->ppid = (pid_t) ppid;
4744 msg->uid = (uid_t) uid;
4745 msg->gid = (gid_t) gid;
4746
4747 error:
4748 return ret;
4749 }
4750
4751 /*
4752 * Return a ust app channel object using the application object and the channel
4753 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4754 * lock MUST be acquired before calling this function.
4755 */
4756 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4757 int objd)
4758 {
4759 struct lttng_ht_node_ulong *node;
4760 struct lttng_ht_iter iter;
4761 struct ust_app_channel *ua_chan = NULL;
4762
4763 assert(app);
4764
4765 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4766 node = lttng_ht_iter_get_node_ulong(&iter);
4767 if (node == NULL) {
4768 DBG2("UST app channel find by objd %d not found", objd);
4769 goto error;
4770 }
4771
4772 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4773
4774 error:
4775 return ua_chan;
4776 }
4777
4778 /*
4779 * Reply to a register channel notification from an application on the notify
4780 * socket. The channel metadata is also created.
4781 *
4782 * The session UST registry lock is acquired in this function.
4783 *
4784 * On success 0 is returned else a negative value.
4785 */
4786 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4787 size_t nr_fields, struct ustctl_field *fields)
4788 {
4789 int ret, ret_code = 0;
4790 uint32_t chan_id, reg_count;
4791 uint64_t chan_reg_key;
4792 enum ustctl_channel_header type;
4793 struct ust_app *app;
4794 struct ust_app_channel *ua_chan;
4795 struct ust_app_session *ua_sess;
4796 struct ust_registry_session *registry;
4797 struct ust_registry_channel *chan_reg;
4798
4799 rcu_read_lock();
4800
4801 /* Lookup application. If not found, there is a code flow error. */
4802 app = find_app_by_notify_sock(sock);
4803 if (!app) {
4804 DBG("Application socket %d is being teardown. Abort event notify",
4805 sock);
4806 ret = 0;
4807 free(fields);
4808 goto error_rcu_unlock;
4809 }
4810
4811 /* Lookup channel by UST object descriptor. */
4812 ua_chan = find_channel_by_objd(app, cobjd);
4813 if (!ua_chan) {
4814 DBG("Application channel is being teardown. Abort event notify");
4815 ret = 0;
4816 free(fields);
4817 goto error_rcu_unlock;
4818 }
4819
4820 assert(ua_chan->session);
4821 ua_sess = ua_chan->session;
4822
4823 /* Get right session registry depending on the session buffer type. */
4824 registry = get_session_registry(ua_sess);
4825 assert(registry);
4826
4827 /* Depending on the buffer type, a different channel key is used. */
4828 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4829 chan_reg_key = ua_chan->tracing_channel_id;
4830 } else {
4831 chan_reg_key = ua_chan->key;
4832 }
4833
4834 pthread_mutex_lock(&registry->lock);
4835
4836 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4837 assert(chan_reg);
4838
4839 if (!chan_reg->register_done) {
4840 reg_count = ust_registry_get_event_count(chan_reg);
4841 if (reg_count < 31) {
4842 type = USTCTL_CHANNEL_HEADER_COMPACT;
4843 } else {
4844 type = USTCTL_CHANNEL_HEADER_LARGE;
4845 }
4846
4847 chan_reg->nr_ctx_fields = nr_fields;
4848 chan_reg->ctx_fields = fields;
4849 chan_reg->header_type = type;
4850 } else {
4851 /* Get current already assigned values. */
4852 type = chan_reg->header_type;
4853 free(fields);
4854 /* Set to NULL so the error path does not do a double free. */
4855 fields = NULL;
4856 }
4857 /* Channel id is set during the object creation. */
4858 chan_id = chan_reg->chan_id;
4859
4860 /* Append to metadata */
4861 if (!chan_reg->metadata_dumped) {
4862 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4863 if (ret_code) {
4864 ERR("Error appending channel metadata (errno = %d)", ret_code);
4865 goto reply;
4866 }
4867 }
4868
4869 reply:
4870 DBG3("UST app replying to register channel key %" PRIu64
4871 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4872 ret_code);
4873
4874 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4875 if (ret < 0) {
4876 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4877 ERR("UST app reply channel failed with ret %d", ret);
4878 } else {
4879 DBG3("UST app reply channel failed. Application died");
4880 }
4881 goto error;
4882 }
4883
4884 /* This channel registry registration is completed. */
4885 chan_reg->register_done = 1;
4886
4887 error:
4888 pthread_mutex_unlock(&registry->lock);
4889 error_rcu_unlock:
4890 rcu_read_unlock();
4891 if (ret) {
4892 free(fields);
4893 }
4894 return ret;
4895 }
4896
4897 /*
4898 * Add event to the UST channel registry. When the event is added to the
4899 * registry, the metadata is also created. Once done, this replies to the
4900 * application with the appropriate error code.
4901 *
4902 * The session UST registry lock is acquired in the function.
4903 *
4904 * On success 0 is returned else a negative value.
4905 */
4906 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4907 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4908 char *model_emf_uri)
4909 {
4910 int ret, ret_code;
4911 uint32_t event_id = 0;
4912 uint64_t chan_reg_key;
4913 struct ust_app *app;
4914 struct ust_app_channel *ua_chan;
4915 struct ust_app_session *ua_sess;
4916 struct ust_registry_session *registry;
4917
4918 rcu_read_lock();
4919
4920 /* Lookup application. If not found, there is a code flow error. */
4921 app = find_app_by_notify_sock(sock);
4922 if (!app) {
4923 DBG("Application socket %d is being teardown. Abort event notify",
4924 sock);
4925 ret = 0;
4926 free(sig);
4927 free(fields);
4928 free(model_emf_uri);
4929 goto error_rcu_unlock;
4930 }
4931
4932 /* Lookup channel by UST object descriptor. */
4933 ua_chan = find_channel_by_objd(app, cobjd);
4934 if (!ua_chan) {
4935 DBG("Application channel is being teardown. Abort event notify");
4936 ret = 0;
4937 free(sig);
4938 free(fields);
4939 free(model_emf_uri);
4940 goto error_rcu_unlock;
4941 }
4942
4943 assert(ua_chan->session);
4944 ua_sess = ua_chan->session;
4945
4946 registry = get_session_registry(ua_sess);
4947 assert(registry);
4948
4949 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4950 chan_reg_key = ua_chan->tracing_channel_id;
4951 } else {
4952 chan_reg_key = ua_chan->key;
4953 }
4954
4955 pthread_mutex_lock(&registry->lock);
4956
4957 /*
4958 * From this point on, this call acquires the ownership of the sig, fields
4959 * and model_emf_uri meaning any free are done inside it if needed. These
4960 * three variables MUST NOT be read/write after this.
4961 */
4962 ret_code = ust_registry_create_event(registry, chan_reg_key,
4963 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4964 model_emf_uri, ua_sess->buffer_type, &event_id,
4965 app);
4966
4967 /*
4968 * The return value is returned to ustctl so in case of an error, the
4969 * application can be notified. In case of an error, it's important not to
4970 * return a negative error or else the application will get closed.
4971 */
4972 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4973 if (ret < 0) {
4974 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4975 ERR("UST app reply event failed with ret %d", ret);
4976 } else {
4977 DBG3("UST app reply event failed. Application died");
4978 }
4979 /*
4980 * No need to wipe the create event since the application socket will
4981 * get close on error hence cleaning up everything by itself.
4982 */
4983 goto error;
4984 }
4985
4986 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4987 name, event_id);
4988
4989 error:
4990 pthread_mutex_unlock(&registry->lock);
4991 error_rcu_unlock:
4992 rcu_read_unlock();
4993 return ret;
4994 }
4995
4996 /*
4997 * Handle application notification through the given notify socket.
4998 *
4999 * Return 0 on success or else a negative value.
5000 */
5001 int ust_app_recv_notify(int sock)
5002 {
5003 int ret;
5004 enum ustctl_notify_cmd cmd;
5005
5006 DBG3("UST app receiving notify from sock %d", sock);
5007
5008 ret = ustctl_recv_notify(sock, &cmd);
5009 if (ret < 0) {
5010 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5011 ERR("UST app recv notify failed with ret %d", ret);
5012 } else {
5013 DBG3("UST app recv notify failed. Application died");
5014 }
5015 goto error;
5016 }
5017
5018 switch (cmd) {
5019 case USTCTL_NOTIFY_CMD_EVENT:
5020 {
5021 int sobjd, cobjd, loglevel;
5022 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5023 size_t nr_fields;
5024 struct ustctl_field *fields;
5025
5026 DBG2("UST app ustctl register event received");
5027
5028 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
5029 &sig, &nr_fields, &fields, &model_emf_uri);
5030 if (ret < 0) {
5031 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5032 ERR("UST app recv event failed with ret %d", ret);
5033 } else {
5034 DBG3("UST app recv event failed. Application died");
5035 }
5036 goto error;
5037 }
5038
5039 /*
5040 * Add event to the UST registry coming from the notify socket. This
5041 * call will free if needed the sig, fields and model_emf_uri. This
5042 * code path loses the ownsership of these variables and transfer them
5043 * to the this function.
5044 */
5045 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5046 fields, loglevel, model_emf_uri);
5047 if (ret < 0) {
5048 goto error;
5049 }
5050
5051 break;
5052 }
5053 case USTCTL_NOTIFY_CMD_CHANNEL:
5054 {
5055 int sobjd, cobjd;
5056 size_t nr_fields;
5057 struct ustctl_field *fields;
5058
5059 DBG2("UST app ustctl register channel received");
5060
5061 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5062 &fields);
5063 if (ret < 0) {
5064 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5065 ERR("UST app recv channel failed with ret %d", ret);
5066 } else {
5067 DBG3("UST app recv channel failed. Application died");
5068 }
5069 goto error;
5070 }
5071
5072 /*
5073 * The fields ownership are transfered to this function call meaning
5074 * that if needed it will be freed. After this, it's invalid to access
5075 * fields or clean it up.
5076 */
5077 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5078 fields);
5079 if (ret < 0) {
5080 goto error;
5081 }
5082
5083 break;
5084 }
5085 default:
5086 /* Should NEVER happen. */
5087 assert(0);
5088 }
5089
5090 error:
5091 return ret;
5092 }
5093
5094 /*
5095 * Once the notify socket hangs up, this is called. First, it tries to find the
5096 * corresponding application. On failure, the call_rcu to close the socket is
5097 * executed. If an application is found, it tries to delete it from the notify
5098 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5099 *
5100 * Note that an object needs to be allocated here so on ENOMEM failure, the
5101 * call RCU is not done but the rest of the cleanup is.
5102 */
5103 void ust_app_notify_sock_unregister(int sock)
5104 {
5105 int err_enomem = 0;
5106 struct lttng_ht_iter iter;
5107 struct ust_app *app;
5108 struct ust_app_notify_sock_obj *obj;
5109
5110 assert(sock >= 0);
5111
5112 rcu_read_lock();
5113
5114 obj = zmalloc(sizeof(*obj));
5115 if (!obj) {
5116 /*
5117 * An ENOMEM is kind of uncool. If this strikes we continue the
5118 * procedure but the call_rcu will not be called. In this case, we
5119 * accept the fd leak rather than possibly creating an unsynchronized
5120 * state between threads.
5121 *
5122 * TODO: The notify object should be created once the notify socket is
5123 * registered and stored independantely from the ust app object. The
5124 * tricky part is to synchronize the teardown of the application and
5125 * this notify object. Let's keep that in mind so we can avoid this
5126 * kind of shenanigans with ENOMEM in the teardown path.
5127 */
5128 err_enomem = 1;
5129 } else {
5130 obj->fd = sock;
5131 }
5132
5133 DBG("UST app notify socket unregister %d", sock);
5134
5135 /*
5136 * Lookup application by notify socket. If this fails, this means that the
5137 * hash table delete has already been done by the application
5138 * unregistration process so we can safely close the notify socket in a
5139 * call RCU.
5140 */
5141 app = find_app_by_notify_sock(sock);
5142 if (!app) {
5143 goto close_socket;
5144 }
5145
5146 iter.iter.node = &app->notify_sock_n.node;
5147
5148 /*
5149 * Whatever happens here either we fail or succeed, in both cases we have
5150 * to close the socket after a grace period to continue to the call RCU
5151 * here. If the deletion is successful, the application is not visible
5152 * anymore by other threads and is it fails it means that it was already
5153 * deleted from the hash table so either way we just have to close the
5154 * socket.
5155 */
5156 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5157
5158 close_socket:
5159 rcu_read_unlock();
5160
5161 /*
5162 * Close socket after a grace period to avoid for the socket to be reused
5163 * before the application object is freed creating potential race between
5164 * threads trying to add unique in the global hash table.
5165 */
5166 if (!err_enomem) {
5167 call_rcu(&obj->head, close_notify_sock_rcu);
5168 }
5169 }
5170
5171 /*
5172 * Destroy a ust app data structure and free its memory.
5173 */
5174 void ust_app_destroy(struct ust_app *app)
5175 {
5176 if (!app) {
5177 return;
5178 }
5179
5180 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5181 }
5182
5183 /*
5184 * Take a snapshot for a given UST session. The snapshot is sent to the given
5185 * output.
5186 *
5187 * Return 0 on success or else a negative value.
5188 */
5189 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5190 struct snapshot_output *output, int wait,
5191 uint64_t nb_packets_per_stream)
5192 {
5193 int ret = 0;
5194 unsigned int snapshot_done = 0;
5195 struct lttng_ht_iter iter;
5196 struct ust_app *app;
5197 char pathname[PATH_MAX];
5198
5199 assert(usess);
5200 assert(output);
5201
5202 rcu_read_lock();
5203
5204 switch (usess->buffer_type) {
5205 case LTTNG_BUFFER_PER_UID:
5206 {
5207 struct buffer_reg_uid *reg;
5208
5209 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5210 struct buffer_reg_channel *reg_chan;
5211 struct consumer_socket *socket;
5212
5213 /* Get consumer socket to use to push the metadata.*/
5214 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5215 usess->consumer);
5216 if (!socket) {
5217 ret = -EINVAL;
5218 goto error;
5219 }
5220
5221 memset(pathname, 0, sizeof(pathname));
5222 ret = snprintf(pathname, sizeof(pathname),
5223 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5224 reg->uid, reg->bits_per_long);
5225 if (ret < 0) {
5226 PERROR("snprintf snapshot path");
5227 goto error;
5228 }
5229
5230 /* Add the UST default trace dir to path. */
5231 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5232 reg_chan, node.node) {
5233 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5234 output, 0, usess->uid, usess->gid, pathname, wait,
5235 nb_packets_per_stream);
5236 if (ret < 0) {
5237 goto error;
5238 }
5239 }
5240 ret = consumer_snapshot_channel(socket,
5241 reg->registry->reg.ust->metadata_key, output, 1,
5242 usess->uid, usess->gid, pathname, wait, 0);
5243 if (ret < 0) {
5244 goto error;
5245 }
5246 snapshot_done = 1;
5247 }
5248 break;
5249 }
5250 case LTTNG_BUFFER_PER_PID:
5251 {
5252 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5253 struct consumer_socket *socket;
5254 struct lttng_ht_iter chan_iter;
5255 struct ust_app_channel *ua_chan;
5256 struct ust_app_session *ua_sess;
5257 struct ust_registry_session *registry;
5258
5259 ua_sess = lookup_session_by_app(usess, app);
5260 if (!ua_sess) {
5261 /* Session not associated with this app. */
5262 continue;
5263 }
5264
5265 /* Get the right consumer socket for the application. */
5266 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5267 output->consumer);
5268 if (!socket) {
5269 ret = -EINVAL;
5270 goto error;
5271 }
5272
5273 /* Add the UST default trace dir to path. */
5274 memset(pathname, 0, sizeof(pathname));
5275 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5276 ua_sess->path);
5277 if (ret < 0) {
5278 PERROR("snprintf snapshot path");
5279 goto error;
5280 }
5281
5282 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5283 ua_chan, node.node) {
5284 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5285 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5286 nb_packets_per_stream);
5287 if (ret < 0) {
5288 goto error;
5289 }
5290 }
5291
5292 registry = get_session_registry(ua_sess);
5293 assert(registry);
5294 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5295 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5296 if (ret < 0) {
5297 goto error;
5298 }
5299 snapshot_done = 1;
5300 }
5301 break;
5302 }
5303 default:
5304 assert(0);
5305 break;
5306 }
5307
5308 if (!snapshot_done) {
5309 /*
5310 * If no snapshot was made and we are not in the error path, this means
5311 * that there are no buffers thus no (prior) application to snapshot
5312 * data from so we have simply NO data.
5313 */
5314 ret = -ENODATA;
5315 }
5316
5317 error:
5318 rcu_read_unlock();
5319 return ret;
5320 }
5321
5322 /*
5323 * Return the size taken by one more packet per stream.
5324 */
5325 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5326 uint64_t cur_nr_packets)
5327 {
5328 uint64_t tot_size = 0;
5329 struct ust_app *app;
5330 struct lttng_ht_iter iter;
5331
5332 assert(usess);
5333
5334 switch (usess->buffer_type) {
5335 case LTTNG_BUFFER_PER_UID:
5336 {
5337 struct buffer_reg_uid *reg;
5338
5339 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5340 struct buffer_reg_channel *reg_chan;
5341
5342 rcu_read_lock();
5343 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5344 reg_chan, node.node) {
5345 if (cur_nr_packets >= reg_chan->num_subbuf) {
5346 /*
5347 * Don't take channel into account if we
5348 * already grab all its packets.
5349 */
5350 continue;
5351 }
5352 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5353 }
5354 rcu_read_unlock();
5355 }
5356 break;
5357 }
5358 case LTTNG_BUFFER_PER_PID:
5359 {
5360 rcu_read_lock();
5361 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5362 struct ust_app_channel *ua_chan;
5363 struct ust_app_session *ua_sess;
5364 struct lttng_ht_iter chan_iter;
5365
5366 ua_sess = lookup_session_by_app(usess, app);
5367 if (!ua_sess) {
5368 /* Session not associated with this app. */
5369 continue;
5370 }
5371
5372 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5373 ua_chan, node.node) {
5374 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5375 /*
5376 * Don't take channel into account if we
5377 * already grab all its packets.
5378 */
5379 continue;
5380 }
5381 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5382 }
5383 }
5384 rcu_read_unlock();
5385 break;
5386 }
5387 default:
5388 assert(0);
5389 break;
5390 }
5391
5392 return tot_size;
5393 }
This page took 0.133636 seconds and 5 git commands to generate.