Fix: add UST context in the same order the user enabled them
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 /* Next available channel key. Access under next_channel_key_lock. */
44 static uint64_t _next_channel_key;
45 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 /* Next available session ID. Access under next_session_id_lock. */
48 static uint64_t _next_session_id;
49 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
50
51 /*
52 * Return the incremented value of next_channel_key.
53 */
54 static uint64_t get_next_channel_key(void)
55 {
56 uint64_t ret;
57
58 pthread_mutex_lock(&next_channel_key_lock);
59 ret = ++_next_channel_key;
60 pthread_mutex_unlock(&next_channel_key_lock);
61 return ret;
62 }
63
64 /*
65 * Return the atomically incremented value of next_session_id.
66 */
67 static uint64_t get_next_session_id(void)
68 {
69 uint64_t ret;
70
71 pthread_mutex_lock(&next_session_id_lock);
72 ret = ++_next_session_id;
73 pthread_mutex_unlock(&next_session_id_lock);
74 return ret;
75 }
76
77 static void copy_channel_attr_to_ustctl(
78 struct ustctl_consumer_channel_attr *attr,
79 struct lttng_ust_channel_attr *uattr)
80 {
81 /* Copy event attributes since the layout is different. */
82 attr->subbuf_size = uattr->subbuf_size;
83 attr->num_subbuf = uattr->num_subbuf;
84 attr->overwrite = uattr->overwrite;
85 attr->switch_timer_interval = uattr->switch_timer_interval;
86 attr->read_timer_interval = uattr->read_timer_interval;
87 attr->output = uattr->output;
88 }
89
90 /*
91 * Match function for the hash table lookup.
92 *
93 * It matches an ust app event based on three attributes which are the event
94 * name, the filter bytecode and the loglevel.
95 */
96 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
97 {
98 struct ust_app_event *event;
99 const struct ust_app_ht_key *key;
100
101 assert(node);
102 assert(_key);
103
104 event = caa_container_of(node, struct ust_app_event, node.node);
105 key = _key;
106
107 /* Match the 3 elements of the key: name, filter and loglevel. */
108
109 /* Event name */
110 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
111 goto no_match;
112 }
113
114 /* Event loglevel. */
115 if (event->attr.loglevel != key->loglevel) {
116 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
117 && key->loglevel == 0 && event->attr.loglevel == -1) {
118 /*
119 * Match is accepted. This is because on event creation, the
120 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
121 * -1 are accepted for this loglevel type since 0 is the one set by
122 * the API when receiving an enable event.
123 */
124 } else {
125 goto no_match;
126 }
127 }
128
129 /* One of the filters is NULL, fail. */
130 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
131 goto no_match;
132 }
133
134 if (key->filter && event->filter) {
135 /* Both filters exists, check length followed by the bytecode. */
136 if (event->filter->len != key->filter->len ||
137 memcmp(event->filter->data, key->filter->data,
138 event->filter->len) != 0) {
139 goto no_match;
140 }
141 }
142
143 /* Match. */
144 return 1;
145
146 no_match:
147 return 0;
148 }
149
150 /*
151 * Unique add of an ust app event in the given ht. This uses the custom
152 * ht_match_ust_app_event match function and the event name as hash.
153 */
154 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
155 struct ust_app_event *event)
156 {
157 struct cds_lfht_node *node_ptr;
158 struct ust_app_ht_key key;
159 struct lttng_ht *ht;
160
161 assert(ua_chan);
162 assert(ua_chan->events);
163 assert(event);
164
165 ht = ua_chan->events;
166 key.name = event->attr.name;
167 key.filter = event->filter;
168 key.loglevel = event->attr.loglevel;
169
170 node_ptr = cds_lfht_add_unique(ht->ht,
171 ht->hash_fct(event->node.key, lttng_ht_seed),
172 ht_match_ust_app_event, &key, &event->node.node);
173 assert(node_ptr == &event->node.node);
174 }
175
176 /*
177 * Close the notify socket from the given RCU head object. This MUST be called
178 * through a call_rcu().
179 */
180 static void close_notify_sock_rcu(struct rcu_head *head)
181 {
182 int ret;
183 struct ust_app_notify_sock_obj *obj =
184 caa_container_of(head, struct ust_app_notify_sock_obj, head);
185
186 /* Must have a valid fd here. */
187 assert(obj->fd >= 0);
188
189 ret = close(obj->fd);
190 if (ret) {
191 ERR("close notify sock %d RCU", obj->fd);
192 }
193 lttng_fd_put(LTTNG_FD_APPS, 1);
194
195 free(obj);
196 }
197
198 /*
199 * Return the session registry according to the buffer type of the given
200 * session.
201 *
202 * A registry per UID object MUST exists before calling this function or else
203 * it assert() if not found. RCU read side lock must be acquired.
204 */
205 static struct ust_registry_session *get_session_registry(
206 struct ust_app_session *ua_sess)
207 {
208 struct ust_registry_session *registry = NULL;
209
210 assert(ua_sess);
211
212 switch (ua_sess->buffer_type) {
213 case LTTNG_BUFFER_PER_PID:
214 {
215 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
216 if (!reg_pid) {
217 goto error;
218 }
219 registry = reg_pid->registry->reg.ust;
220 break;
221 }
222 case LTTNG_BUFFER_PER_UID:
223 {
224 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
225 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
226 if (!reg_uid) {
227 goto error;
228 }
229 registry = reg_uid->registry->reg.ust;
230 break;
231 }
232 default:
233 assert(0);
234 };
235
236 error:
237 return registry;
238 }
239
240 /*
241 * Delete ust context safely. RCU read lock must be held before calling
242 * this function.
243 */
244 static
245 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
246 {
247 int ret;
248
249 assert(ua_ctx);
250
251 if (ua_ctx->obj) {
252 ret = ustctl_release_object(sock, ua_ctx->obj);
253 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
254 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
255 sock, ua_ctx->obj->handle, ret);
256 }
257 free(ua_ctx->obj);
258 }
259 free(ua_ctx);
260 }
261
262 /*
263 * Delete ust app event safely. RCU read lock must be held before calling
264 * this function.
265 */
266 static
267 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
268 {
269 int ret;
270
271 assert(ua_event);
272
273 free(ua_event->filter);
274
275 if (ua_event->obj != NULL) {
276 ret = ustctl_release_object(sock, ua_event->obj);
277 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
278 ERR("UST app sock %d release event obj failed with ret %d",
279 sock, ret);
280 }
281 free(ua_event->obj);
282 }
283 free(ua_event);
284 }
285
286 /*
287 * Release ust data object of the given stream.
288 *
289 * Return 0 on success or else a negative value.
290 */
291 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
292 {
293 int ret = 0;
294
295 assert(stream);
296
297 if (stream->obj) {
298 ret = ustctl_release_object(sock, stream->obj);
299 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
300 ERR("UST app sock %d release stream obj failed with ret %d",
301 sock, ret);
302 }
303 lttng_fd_put(LTTNG_FD_APPS, 2);
304 free(stream->obj);
305 }
306
307 return ret;
308 }
309
310 /*
311 * Delete ust app stream safely. RCU read lock must be held before calling
312 * this function.
313 */
314 static
315 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
316 {
317 assert(stream);
318
319 (void) release_ust_app_stream(sock, stream);
320 free(stream);
321 }
322
323 /*
324 * We need to execute ht_destroy outside of RCU read-side critical
325 * section and outside of call_rcu thread, so we postpone its execution
326 * using ht_cleanup_push. It is simpler than to change the semantic of
327 * the many callers of delete_ust_app_session().
328 */
329 static
330 void delete_ust_app_channel_rcu(struct rcu_head *head)
331 {
332 struct ust_app_channel *ua_chan =
333 caa_container_of(head, struct ust_app_channel, rcu_head);
334
335 ht_cleanup_push(ua_chan->ctx);
336 ht_cleanup_push(ua_chan->events);
337 free(ua_chan);
338 }
339
340 /*
341 * Delete ust app channel safely. RCU read lock must be held before calling
342 * this function.
343 */
344 static
345 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
346 struct ust_app *app)
347 {
348 int ret;
349 struct lttng_ht_iter iter;
350 struct ust_app_event *ua_event;
351 struct ust_app_ctx *ua_ctx;
352 struct ust_app_stream *stream, *stmp;
353 struct ust_registry_session *registry;
354
355 assert(ua_chan);
356
357 DBG3("UST app deleting channel %s", ua_chan->name);
358
359 /* Wipe stream */
360 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
361 cds_list_del(&stream->list);
362 delete_ust_app_stream(sock, stream);
363 }
364
365 /* Wipe context */
366 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
367 cds_list_del(&ua_ctx->list);
368 ret = lttng_ht_del(ua_chan->ctx, &iter);
369 assert(!ret);
370 delete_ust_app_ctx(sock, ua_ctx);
371 }
372
373 /* Wipe events */
374 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
375 node.node) {
376 ret = lttng_ht_del(ua_chan->events, &iter);
377 assert(!ret);
378 delete_ust_app_event(sock, ua_event);
379 }
380
381 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
382 /* Wipe and free registry from session registry. */
383 registry = get_session_registry(ua_chan->session);
384 if (registry) {
385 ust_registry_channel_del_free(registry, ua_chan->key);
386 }
387 }
388
389 if (ua_chan->obj != NULL) {
390 /* Remove channel from application UST object descriptor. */
391 iter.iter.node = &ua_chan->ust_objd_node.node;
392 lttng_ht_del(app->ust_objd, &iter);
393 ret = ustctl_release_object(sock, ua_chan->obj);
394 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
395 ERR("UST app sock %d release channel obj failed with ret %d",
396 sock, ret);
397 }
398 lttng_fd_put(LTTNG_FD_APPS, 1);
399 free(ua_chan->obj);
400 }
401 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
402 }
403
404 /*
405 * Push metadata to consumer socket.
406 *
407 * The socket lock MUST be acquired.
408 * The ust app session lock MUST be acquired.
409 *
410 * On success, return the len of metadata pushed or else a negative value.
411 */
412 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
413 struct consumer_socket *socket, int send_zero_data)
414 {
415 int ret;
416 char *metadata_str = NULL;
417 size_t len, offset;
418 ssize_t ret_val;
419
420 assert(registry);
421 assert(socket);
422
423 /*
424 * On a push metadata error either the consumer is dead or the metadata
425 * channel has been destroyed because its endpoint might have died (e.g:
426 * relayd). If so, the metadata closed flag is set to 1 so we deny pushing
427 * metadata again which is not valid anymore on the consumer side.
428 *
429 * The ust app session mutex locked allows us to make this check without
430 * the registry lock.
431 */
432 if (registry->metadata_closed) {
433 return -EPIPE;
434 }
435
436 pthread_mutex_lock(&registry->lock);
437
438 offset = registry->metadata_len_sent;
439 len = registry->metadata_len - registry->metadata_len_sent;
440 if (len == 0) {
441 DBG3("No metadata to push for metadata key %" PRIu64,
442 registry->metadata_key);
443 ret_val = len;
444 if (send_zero_data) {
445 DBG("No metadata to push");
446 goto push_data;
447 }
448 goto end;
449 }
450
451 /* Allocate only what we have to send. */
452 metadata_str = zmalloc(len);
453 if (!metadata_str) {
454 PERROR("zmalloc ust app metadata string");
455 ret_val = -ENOMEM;
456 goto error;
457 }
458 /* Copy what we haven't send out. */
459 memcpy(metadata_str, registry->metadata + offset, len);
460 registry->metadata_len_sent += len;
461
462 push_data:
463 pthread_mutex_unlock(&registry->lock);
464 ret = consumer_push_metadata(socket, registry->metadata_key,
465 metadata_str, len, offset);
466 if (ret < 0) {
467 ret_val = ret;
468 goto error_push;
469 }
470
471 free(metadata_str);
472 return len;
473
474 end:
475 error:
476 pthread_mutex_unlock(&registry->lock);
477 error_push:
478 free(metadata_str);
479 return ret_val;
480 }
481
482 /*
483 * For a given application and session, push metadata to consumer. The session
484 * lock MUST be acquired here before calling this.
485 * Either sock or consumer is required : if sock is NULL, the default
486 * socket to send the metadata is retrieved from consumer, if sock
487 * is not NULL we use it to send the metadata.
488 *
489 * Return 0 on success else a negative error.
490 */
491 static int push_metadata(struct ust_registry_session *registry,
492 struct consumer_output *consumer)
493 {
494 int ret_val;
495 ssize_t ret;
496 struct consumer_socket *socket;
497
498 assert(registry);
499 assert(consumer);
500
501 rcu_read_lock();
502
503 /*
504 * Means that no metadata was assigned to the session. This can happens if
505 * no start has been done previously.
506 */
507 if (!registry->metadata_key) {
508 ret_val = 0;
509 goto end_rcu_unlock;
510 }
511
512 /* Get consumer socket to use to push the metadata.*/
513 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
514 consumer);
515 if (!socket) {
516 ret_val = -1;
517 goto error_rcu_unlock;
518 }
519
520 /*
521 * TODO: Currently, we hold the socket lock around sampling of the next
522 * metadata segment to ensure we send metadata over the consumer socket in
523 * the correct order. This makes the registry lock nest inside the socket
524 * lock.
525 *
526 * Please note that this is a temporary measure: we should move this lock
527 * back into ust_consumer_push_metadata() when the consumer gets the
528 * ability to reorder the metadata it receives.
529 */
530 pthread_mutex_lock(socket->lock);
531 ret = ust_app_push_metadata(registry, socket, 0);
532 pthread_mutex_unlock(socket->lock);
533 if (ret < 0) {
534 ret_val = ret;
535 goto error_rcu_unlock;
536 }
537
538 rcu_read_unlock();
539 return 0;
540
541 error_rcu_unlock:
542 /*
543 * On error, flag the registry that the metadata is closed. We were unable
544 * to push anything and this means that either the consumer is not
545 * responding or the metadata cache has been destroyed on the consumer.
546 */
547 registry->metadata_closed = 1;
548 end_rcu_unlock:
549 rcu_read_unlock();
550 return ret_val;
551 }
552
553 /*
554 * Send to the consumer a close metadata command for the given session. Once
555 * done, the metadata channel is deleted and the session metadata pointer is
556 * nullified. The session lock MUST be acquired here unless the application is
557 * in the destroy path.
558 *
559 * Return 0 on success else a negative value.
560 */
561 static int close_metadata(struct ust_registry_session *registry,
562 struct consumer_output *consumer)
563 {
564 int ret;
565 struct consumer_socket *socket;
566
567 assert(registry);
568 assert(consumer);
569
570 rcu_read_lock();
571
572 if (!registry->metadata_key || registry->metadata_closed) {
573 ret = 0;
574 goto end;
575 }
576
577 /* Get consumer socket to use to push the metadata.*/
578 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
579 consumer);
580 if (!socket) {
581 ret = -1;
582 goto error;
583 }
584
585 ret = consumer_close_metadata(socket, registry->metadata_key);
586 if (ret < 0) {
587 goto error;
588 }
589
590 error:
591 /*
592 * Metadata closed. Even on error this means that the consumer is not
593 * responding or not found so either way a second close should NOT be emit
594 * for this registry.
595 */
596 registry->metadata_closed = 1;
597 end:
598 rcu_read_unlock();
599 return ret;
600 }
601
602 /*
603 * We need to execute ht_destroy outside of RCU read-side critical
604 * section and outside of call_rcu thread, so we postpone its execution
605 * using ht_cleanup_push. It is simpler than to change the semantic of
606 * the many callers of delete_ust_app_session().
607 */
608 static
609 void delete_ust_app_session_rcu(struct rcu_head *head)
610 {
611 struct ust_app_session *ua_sess =
612 caa_container_of(head, struct ust_app_session, rcu_head);
613
614 ht_cleanup_push(ua_sess->channels);
615 free(ua_sess);
616 }
617
618 /*
619 * Delete ust app session safely. RCU read lock must be held before calling
620 * this function.
621 */
622 static
623 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
624 struct ust_app *app)
625 {
626 int ret;
627 struct lttng_ht_iter iter;
628 struct ust_app_channel *ua_chan;
629 struct ust_registry_session *registry;
630
631 assert(ua_sess);
632
633 pthread_mutex_lock(&ua_sess->lock);
634
635 registry = get_session_registry(ua_sess);
636 if (registry && !registry->metadata_closed) {
637 /* Push metadata for application before freeing the application. */
638 (void) push_metadata(registry, ua_sess->consumer);
639
640 /*
641 * Don't ask to close metadata for global per UID buffers. Close
642 * metadata only on destroy trace session in this case. Also, the
643 * previous push metadata could have flag the metadata registry to
644 * close so don't send a close command if closed.
645 */
646 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
647 !registry->metadata_closed) {
648 /* And ask to close it for this session registry. */
649 (void) close_metadata(registry, ua_sess->consumer);
650 }
651 }
652
653 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
654 node.node) {
655 ret = lttng_ht_del(ua_sess->channels, &iter);
656 assert(!ret);
657 delete_ust_app_channel(sock, ua_chan, app);
658 }
659
660 /* In case of per PID, the registry is kept in the session. */
661 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
662 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
663 if (reg_pid) {
664 buffer_reg_pid_remove(reg_pid);
665 buffer_reg_pid_destroy(reg_pid);
666 }
667 }
668
669 if (ua_sess->handle != -1) {
670 ret = ustctl_release_handle(sock, ua_sess->handle);
671 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
672 ERR("UST app sock %d release session handle failed with ret %d",
673 sock, ret);
674 }
675 }
676 pthread_mutex_unlock(&ua_sess->lock);
677
678 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
679 }
680
681 /*
682 * Delete a traceable application structure from the global list. Never call
683 * this function outside of a call_rcu call.
684 *
685 * RCU read side lock should _NOT_ be held when calling this function.
686 */
687 static
688 void delete_ust_app(struct ust_app *app)
689 {
690 int ret, sock;
691 struct ust_app_session *ua_sess, *tmp_ua_sess;
692
693 /* Delete ust app sessions info */
694 sock = app->sock;
695 app->sock = -1;
696
697 /* Wipe sessions */
698 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
699 teardown_node) {
700 /* Free every object in the session and the session. */
701 rcu_read_lock();
702 delete_ust_app_session(sock, ua_sess, app);
703 rcu_read_unlock();
704 }
705
706 ht_cleanup_push(app->sessions);
707 ht_cleanup_push(app->ust_objd);
708
709 /*
710 * Wait until we have deleted the application from the sock hash table
711 * before closing this socket, otherwise an application could re-use the
712 * socket ID and race with the teardown, using the same hash table entry.
713 *
714 * It's OK to leave the close in call_rcu. We want it to stay unique for
715 * all RCU readers that could run concurrently with unregister app,
716 * therefore we _need_ to only close that socket after a grace period. So
717 * it should stay in this RCU callback.
718 *
719 * This close() is a very important step of the synchronization model so
720 * every modification to this function must be carefully reviewed.
721 */
722 ret = close(sock);
723 if (ret) {
724 PERROR("close");
725 }
726 lttng_fd_put(LTTNG_FD_APPS, 1);
727
728 DBG2("UST app pid %d deleted", app->pid);
729 free(app);
730 }
731
732 /*
733 * URCU intermediate call to delete an UST app.
734 */
735 static
736 void delete_ust_app_rcu(struct rcu_head *head)
737 {
738 struct lttng_ht_node_ulong *node =
739 caa_container_of(head, struct lttng_ht_node_ulong, head);
740 struct ust_app *app =
741 caa_container_of(node, struct ust_app, pid_n);
742
743 DBG3("Call RCU deleting app PID %d", app->pid);
744 delete_ust_app(app);
745 }
746
747 /*
748 * Delete the session from the application ht and delete the data structure by
749 * freeing every object inside and releasing them.
750 */
751 static void destroy_app_session(struct ust_app *app,
752 struct ust_app_session *ua_sess)
753 {
754 int ret;
755 struct lttng_ht_iter iter;
756
757 assert(app);
758 assert(ua_sess);
759
760 iter.iter.node = &ua_sess->node.node;
761 ret = lttng_ht_del(app->sessions, &iter);
762 if (ret) {
763 /* Already scheduled for teardown. */
764 goto end;
765 }
766
767 /* Once deleted, free the data structure. */
768 delete_ust_app_session(app->sock, ua_sess, app);
769
770 end:
771 return;
772 }
773
774 /*
775 * Alloc new UST app session.
776 */
777 static
778 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
779 {
780 struct ust_app_session *ua_sess;
781
782 /* Init most of the default value by allocating and zeroing */
783 ua_sess = zmalloc(sizeof(struct ust_app_session));
784 if (ua_sess == NULL) {
785 PERROR("malloc");
786 goto error_free;
787 }
788
789 ua_sess->handle = -1;
790 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
791 pthread_mutex_init(&ua_sess->lock, NULL);
792
793 return ua_sess;
794
795 error_free:
796 return NULL;
797 }
798
799 /*
800 * Alloc new UST app channel.
801 */
802 static
803 struct ust_app_channel *alloc_ust_app_channel(char *name,
804 struct ust_app_session *ua_sess,
805 struct lttng_ust_channel_attr *attr)
806 {
807 struct ust_app_channel *ua_chan;
808
809 /* Init most of the default value by allocating and zeroing */
810 ua_chan = zmalloc(sizeof(struct ust_app_channel));
811 if (ua_chan == NULL) {
812 PERROR("malloc");
813 goto error;
814 }
815
816 /* Setup channel name */
817 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
818 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
819
820 ua_chan->enabled = 1;
821 ua_chan->handle = -1;
822 ua_chan->session = ua_sess;
823 ua_chan->key = get_next_channel_key();
824 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
825 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
826 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
827
828 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
829 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
830
831 /* Copy attributes */
832 if (attr) {
833 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
834 ua_chan->attr.subbuf_size = attr->subbuf_size;
835 ua_chan->attr.num_subbuf = attr->num_subbuf;
836 ua_chan->attr.overwrite = attr->overwrite;
837 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
838 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
839 ua_chan->attr.output = attr->output;
840 }
841 /* By default, the channel is a per cpu channel. */
842 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
843
844 DBG3("UST app channel %s allocated", ua_chan->name);
845
846 return ua_chan;
847
848 error:
849 return NULL;
850 }
851
852 /*
853 * Allocate and initialize a UST app stream.
854 *
855 * Return newly allocated stream pointer or NULL on error.
856 */
857 struct ust_app_stream *ust_app_alloc_stream(void)
858 {
859 struct ust_app_stream *stream = NULL;
860
861 stream = zmalloc(sizeof(*stream));
862 if (stream == NULL) {
863 PERROR("zmalloc ust app stream");
864 goto error;
865 }
866
867 /* Zero could be a valid value for a handle so flag it to -1. */
868 stream->handle = -1;
869
870 error:
871 return stream;
872 }
873
874 /*
875 * Alloc new UST app event.
876 */
877 static
878 struct ust_app_event *alloc_ust_app_event(char *name,
879 struct lttng_ust_event *attr)
880 {
881 struct ust_app_event *ua_event;
882
883 /* Init most of the default value by allocating and zeroing */
884 ua_event = zmalloc(sizeof(struct ust_app_event));
885 if (ua_event == NULL) {
886 PERROR("malloc");
887 goto error;
888 }
889
890 ua_event->enabled = 1;
891 strncpy(ua_event->name, name, sizeof(ua_event->name));
892 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
893 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
894
895 /* Copy attributes */
896 if (attr) {
897 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
898 }
899
900 DBG3("UST app event %s allocated", ua_event->name);
901
902 return ua_event;
903
904 error:
905 return NULL;
906 }
907
908 /*
909 * Alloc new UST app context.
910 */
911 static
912 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
913 {
914 struct ust_app_ctx *ua_ctx;
915
916 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
917 if (ua_ctx == NULL) {
918 goto error;
919 }
920
921 CDS_INIT_LIST_HEAD(&ua_ctx->list);
922
923 if (uctx) {
924 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
925 }
926
927 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
928
929 error:
930 return ua_ctx;
931 }
932
933 /*
934 * Allocate a filter and copy the given original filter.
935 *
936 * Return allocated filter or NULL on error.
937 */
938 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
939 struct lttng_ust_filter_bytecode *orig_f)
940 {
941 struct lttng_ust_filter_bytecode *filter = NULL;
942
943 /* Copy filter bytecode */
944 filter = zmalloc(sizeof(*filter) + orig_f->len);
945 if (!filter) {
946 PERROR("zmalloc alloc ust app filter");
947 goto error;
948 }
949
950 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
951
952 error:
953 return filter;
954 }
955
956 /*
957 * Find an ust_app using the sock and return it. RCU read side lock must be
958 * held before calling this helper function.
959 */
960 static
961 struct ust_app *find_app_by_sock(int sock)
962 {
963 struct lttng_ht_node_ulong *node;
964 struct lttng_ht_iter iter;
965
966 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
967 node = lttng_ht_iter_get_node_ulong(&iter);
968 if (node == NULL) {
969 DBG2("UST app find by sock %d not found", sock);
970 goto error;
971 }
972
973 return caa_container_of(node, struct ust_app, sock_n);
974
975 error:
976 return NULL;
977 }
978
979 /*
980 * Find an ust_app using the notify sock and return it. RCU read side lock must
981 * be held before calling this helper function.
982 */
983 static struct ust_app *find_app_by_notify_sock(int sock)
984 {
985 struct lttng_ht_node_ulong *node;
986 struct lttng_ht_iter iter;
987
988 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
989 &iter);
990 node = lttng_ht_iter_get_node_ulong(&iter);
991 if (node == NULL) {
992 DBG2("UST app find by notify sock %d not found", sock);
993 goto error;
994 }
995
996 return caa_container_of(node, struct ust_app, notify_sock_n);
997
998 error:
999 return NULL;
1000 }
1001
1002 /*
1003 * Lookup for an ust app event based on event name, filter bytecode and the
1004 * event loglevel.
1005 *
1006 * Return an ust_app_event object or NULL on error.
1007 */
1008 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1009 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel)
1010 {
1011 struct lttng_ht_iter iter;
1012 struct lttng_ht_node_str *node;
1013 struct ust_app_event *event = NULL;
1014 struct ust_app_ht_key key;
1015
1016 assert(name);
1017 assert(ht);
1018
1019 /* Setup key for event lookup. */
1020 key.name = name;
1021 key.filter = filter;
1022 key.loglevel = loglevel;
1023
1024 /* Lookup using the event name as hash and a custom match fct. */
1025 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1026 ht_match_ust_app_event, &key, &iter.iter);
1027 node = lttng_ht_iter_get_node_str(&iter);
1028 if (node == NULL) {
1029 goto end;
1030 }
1031
1032 event = caa_container_of(node, struct ust_app_event, node);
1033
1034 end:
1035 return event;
1036 }
1037
1038 /*
1039 * Create the channel context on the tracer.
1040 *
1041 * Called with UST app session lock held.
1042 */
1043 static
1044 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1045 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1046 {
1047 int ret;
1048
1049 health_code_update();
1050
1051 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1052 ua_chan->obj, &ua_ctx->obj);
1053 if (ret < 0) {
1054 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1055 ERR("UST app create channel context failed for app (pid: %d) "
1056 "with ret %d", app->pid, ret);
1057 } else {
1058 DBG3("UST app disable event failed. Application is dead.");
1059 }
1060 goto error;
1061 }
1062
1063 ua_ctx->handle = ua_ctx->obj->handle;
1064
1065 DBG2("UST app context handle %d created successfully for channel %s",
1066 ua_ctx->handle, ua_chan->name);
1067
1068 error:
1069 health_code_update();
1070 return ret;
1071 }
1072
1073 /*
1074 * Set the filter on the tracer.
1075 */
1076 static
1077 int set_ust_event_filter(struct ust_app_event *ua_event,
1078 struct ust_app *app)
1079 {
1080 int ret;
1081
1082 health_code_update();
1083
1084 if (!ua_event->filter) {
1085 ret = 0;
1086 goto error;
1087 }
1088
1089 ret = ustctl_set_filter(app->sock, ua_event->filter,
1090 ua_event->obj);
1091 if (ret < 0) {
1092 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1093 ERR("UST app event %s filter failed for app (pid: %d) "
1094 "with ret %d", ua_event->attr.name, app->pid, ret);
1095 } else {
1096 DBG3("UST app filter event failed. Application is dead.");
1097 }
1098 goto error;
1099 }
1100
1101 DBG2("UST filter set successfully for event %s", ua_event->name);
1102
1103 error:
1104 health_code_update();
1105 return ret;
1106 }
1107
1108 /*
1109 * Disable the specified event on to UST tracer for the UST session.
1110 */
1111 static int disable_ust_event(struct ust_app *app,
1112 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1113 {
1114 int ret;
1115
1116 health_code_update();
1117
1118 ret = ustctl_disable(app->sock, ua_event->obj);
1119 if (ret < 0) {
1120 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1121 ERR("UST app event %s disable failed for app (pid: %d) "
1122 "and session handle %d with ret %d",
1123 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1124 } else {
1125 DBG3("UST app disable event failed. Application is dead.");
1126 }
1127 goto error;
1128 }
1129
1130 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1131 ua_event->attr.name, app->pid);
1132
1133 error:
1134 health_code_update();
1135 return ret;
1136 }
1137
1138 /*
1139 * Disable the specified channel on to UST tracer for the UST session.
1140 */
1141 static int disable_ust_channel(struct ust_app *app,
1142 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1143 {
1144 int ret;
1145
1146 health_code_update();
1147
1148 ret = ustctl_disable(app->sock, ua_chan->obj);
1149 if (ret < 0) {
1150 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1151 ERR("UST app channel %s disable failed for app (pid: %d) "
1152 "and session handle %d with ret %d",
1153 ua_chan->name, app->pid, ua_sess->handle, ret);
1154 } else {
1155 DBG3("UST app disable channel failed. Application is dead.");
1156 }
1157 goto error;
1158 }
1159
1160 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1161 ua_chan->name, app->pid);
1162
1163 error:
1164 health_code_update();
1165 return ret;
1166 }
1167
1168 /*
1169 * Enable the specified channel on to UST tracer for the UST session.
1170 */
1171 static int enable_ust_channel(struct ust_app *app,
1172 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1173 {
1174 int ret;
1175
1176 health_code_update();
1177
1178 ret = ustctl_enable(app->sock, ua_chan->obj);
1179 if (ret < 0) {
1180 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1181 ERR("UST app channel %s enable failed for app (pid: %d) "
1182 "and session handle %d with ret %d",
1183 ua_chan->name, app->pid, ua_sess->handle, ret);
1184 } else {
1185 DBG3("UST app enable channel failed. Application is dead.");
1186 }
1187 goto error;
1188 }
1189
1190 ua_chan->enabled = 1;
1191
1192 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1193 ua_chan->name, app->pid);
1194
1195 error:
1196 health_code_update();
1197 return ret;
1198 }
1199
1200 /*
1201 * Enable the specified event on to UST tracer for the UST session.
1202 */
1203 static int enable_ust_event(struct ust_app *app,
1204 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1205 {
1206 int ret;
1207
1208 health_code_update();
1209
1210 ret = ustctl_enable(app->sock, ua_event->obj);
1211 if (ret < 0) {
1212 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1213 ERR("UST app event %s enable failed for app (pid: %d) "
1214 "and session handle %d with ret %d",
1215 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1216 } else {
1217 DBG3("UST app enable event failed. Application is dead.");
1218 }
1219 goto error;
1220 }
1221
1222 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1223 ua_event->attr.name, app->pid);
1224
1225 error:
1226 health_code_update();
1227 return ret;
1228 }
1229
1230 /*
1231 * Send channel and stream buffer to application.
1232 *
1233 * Return 0 on success. On error, a negative value is returned.
1234 */
1235 static int send_channel_pid_to_ust(struct ust_app *app,
1236 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1237 {
1238 int ret;
1239 struct ust_app_stream *stream, *stmp;
1240
1241 assert(app);
1242 assert(ua_sess);
1243 assert(ua_chan);
1244
1245 health_code_update();
1246
1247 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1248 app->sock);
1249
1250 /* Send channel to the application. */
1251 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1252 if (ret < 0) {
1253 goto error;
1254 }
1255
1256 health_code_update();
1257
1258 /* Send all streams to application. */
1259 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1260 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1261 if (ret < 0) {
1262 goto error;
1263 }
1264 /* We don't need the stream anymore once sent to the tracer. */
1265 cds_list_del(&stream->list);
1266 delete_ust_app_stream(-1, stream);
1267 }
1268 /* Flag the channel that it is sent to the application. */
1269 ua_chan->is_sent = 1;
1270
1271 error:
1272 health_code_update();
1273 return ret;
1274 }
1275
1276 /*
1277 * Create the specified event onto the UST tracer for a UST session.
1278 *
1279 * Should be called with session mutex held.
1280 */
1281 static
1282 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1283 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1284 {
1285 int ret = 0;
1286
1287 health_code_update();
1288
1289 /* Create UST event on tracer */
1290 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1291 &ua_event->obj);
1292 if (ret < 0) {
1293 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1294 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1295 ua_event->attr.name, app->pid, ret);
1296 } else {
1297 DBG3("UST app create event failed. Application is dead.");
1298 }
1299 goto error;
1300 }
1301
1302 ua_event->handle = ua_event->obj->handle;
1303
1304 DBG2("UST app event %s created successfully for pid:%d",
1305 ua_event->attr.name, app->pid);
1306
1307 health_code_update();
1308
1309 /* Set filter if one is present. */
1310 if (ua_event->filter) {
1311 ret = set_ust_event_filter(ua_event, app);
1312 if (ret < 0) {
1313 goto error;
1314 }
1315 }
1316
1317 /* If event not enabled, disable it on the tracer */
1318 if (ua_event->enabled == 0) {
1319 ret = disable_ust_event(app, ua_sess, ua_event);
1320 if (ret < 0) {
1321 /*
1322 * If we hit an EPERM, something is wrong with our disable call. If
1323 * we get an EEXIST, there is a problem on the tracer side since we
1324 * just created it.
1325 */
1326 switch (ret) {
1327 case -LTTNG_UST_ERR_PERM:
1328 /* Code flow problem */
1329 assert(0);
1330 case -LTTNG_UST_ERR_EXIST:
1331 /* It's OK for our use case. */
1332 ret = 0;
1333 break;
1334 default:
1335 break;
1336 }
1337 goto error;
1338 }
1339 }
1340
1341 error:
1342 health_code_update();
1343 return ret;
1344 }
1345
1346 /*
1347 * Copy data between an UST app event and a LTT event.
1348 */
1349 static void shadow_copy_event(struct ust_app_event *ua_event,
1350 struct ltt_ust_event *uevent)
1351 {
1352 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1353 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1354
1355 ua_event->enabled = uevent->enabled;
1356
1357 /* Copy event attributes */
1358 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1359
1360 /* Copy filter bytecode */
1361 if (uevent->filter) {
1362 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1363 /* Filter might be NULL here in case of ENONEM. */
1364 }
1365 }
1366
1367 /*
1368 * Copy data between an UST app channel and a LTT channel.
1369 */
1370 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1371 struct ltt_ust_channel *uchan)
1372 {
1373 struct lttng_ht_iter iter;
1374 struct ltt_ust_event *uevent;
1375 struct ltt_ust_context *uctx;
1376 struct ust_app_event *ua_event;
1377 struct ust_app_ctx *ua_ctx;
1378
1379 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1380
1381 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1382 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1383
1384 ua_chan->tracefile_size = uchan->tracefile_size;
1385 ua_chan->tracefile_count = uchan->tracefile_count;
1386
1387 /* Copy event attributes since the layout is different. */
1388 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1389 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1390 ua_chan->attr.overwrite = uchan->attr.overwrite;
1391 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1392 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1393 ua_chan->attr.output = uchan->attr.output;
1394 /*
1395 * Note that the attribute channel type is not set since the channel on the
1396 * tracing registry side does not have this information.
1397 */
1398
1399 ua_chan->enabled = uchan->enabled;
1400 ua_chan->tracing_channel_id = uchan->id;
1401
1402 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1403 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1404 if (ua_ctx == NULL) {
1405 continue;
1406 }
1407 lttng_ht_node_init_ulong(&ua_ctx->node,
1408 (unsigned long) ua_ctx->ctx.ctx);
1409 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1410 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1411 }
1412
1413 /* Copy all events from ltt ust channel to ust app channel */
1414 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1415 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1416 uevent->filter, uevent->attr.loglevel);
1417 if (ua_event == NULL) {
1418 DBG2("UST event %s not found on shadow copy channel",
1419 uevent->attr.name);
1420 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1421 if (ua_event == NULL) {
1422 continue;
1423 }
1424 shadow_copy_event(ua_event, uevent);
1425 add_unique_ust_app_event(ua_chan, ua_event);
1426 }
1427 }
1428
1429 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1430 }
1431
1432 /*
1433 * Copy data between a UST app session and a regular LTT session.
1434 */
1435 static void shadow_copy_session(struct ust_app_session *ua_sess,
1436 struct ltt_ust_session *usess, struct ust_app *app)
1437 {
1438 struct lttng_ht_node_str *ua_chan_node;
1439 struct lttng_ht_iter iter;
1440 struct ltt_ust_channel *uchan;
1441 struct ust_app_channel *ua_chan;
1442 time_t rawtime;
1443 struct tm *timeinfo;
1444 char datetime[16];
1445 int ret;
1446
1447 /* Get date and time for unique app path */
1448 time(&rawtime);
1449 timeinfo = localtime(&rawtime);
1450 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1451
1452 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1453
1454 ua_sess->tracing_id = usess->id;
1455 ua_sess->id = get_next_session_id();
1456 ua_sess->uid = app->uid;
1457 ua_sess->gid = app->gid;
1458 ua_sess->euid = usess->uid;
1459 ua_sess->egid = usess->gid;
1460 ua_sess->buffer_type = usess->buffer_type;
1461 ua_sess->bits_per_long = app->bits_per_long;
1462 /* There is only one consumer object per session possible. */
1463 ua_sess->consumer = usess->consumer;
1464
1465 switch (ua_sess->buffer_type) {
1466 case LTTNG_BUFFER_PER_PID:
1467 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1468 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1469 datetime);
1470 break;
1471 case LTTNG_BUFFER_PER_UID:
1472 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1473 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1474 break;
1475 default:
1476 assert(0);
1477 goto error;
1478 }
1479 if (ret < 0) {
1480 PERROR("asprintf UST shadow copy session");
1481 assert(0);
1482 goto error;
1483 }
1484
1485 /* Iterate over all channels in global domain. */
1486 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1487 uchan, node.node) {
1488 struct lttng_ht_iter uiter;
1489
1490 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1491 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1492 if (ua_chan_node != NULL) {
1493 /* Session exist. Contiuing. */
1494 continue;
1495 }
1496
1497 DBG2("Channel %s not found on shadow session copy, creating it",
1498 uchan->name);
1499 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1500 if (ua_chan == NULL) {
1501 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1502 continue;
1503 }
1504 shadow_copy_channel(ua_chan, uchan);
1505 /*
1506 * The concept of metadata channel does not exist on the tracing
1507 * registry side of the session daemon so this can only be a per CPU
1508 * channel and not metadata.
1509 */
1510 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1511
1512 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1513 }
1514
1515 error:
1516 return;
1517 }
1518
1519 /*
1520 * Lookup sesison wrapper.
1521 */
1522 static
1523 void __lookup_session_by_app(struct ltt_ust_session *usess,
1524 struct ust_app *app, struct lttng_ht_iter *iter)
1525 {
1526 /* Get right UST app session from app */
1527 lttng_ht_lookup(app->sessions, &usess->id, iter);
1528 }
1529
1530 /*
1531 * Return ust app session from the app session hashtable using the UST session
1532 * id.
1533 */
1534 static struct ust_app_session *lookup_session_by_app(
1535 struct ltt_ust_session *usess, struct ust_app *app)
1536 {
1537 struct lttng_ht_iter iter;
1538 struct lttng_ht_node_u64 *node;
1539
1540 __lookup_session_by_app(usess, app, &iter);
1541 node = lttng_ht_iter_get_node_u64(&iter);
1542 if (node == NULL) {
1543 goto error;
1544 }
1545
1546 return caa_container_of(node, struct ust_app_session, node);
1547
1548 error:
1549 return NULL;
1550 }
1551
1552 /*
1553 * Setup buffer registry per PID for the given session and application. If none
1554 * is found, a new one is created, added to the global registry and
1555 * initialized. If regp is valid, it's set with the newly created object.
1556 *
1557 * Return 0 on success or else a negative value.
1558 */
1559 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1560 struct ust_app *app, struct buffer_reg_pid **regp)
1561 {
1562 int ret = 0;
1563 struct buffer_reg_pid *reg_pid;
1564
1565 assert(ua_sess);
1566 assert(app);
1567
1568 rcu_read_lock();
1569
1570 reg_pid = buffer_reg_pid_find(ua_sess->id);
1571 if (!reg_pid) {
1572 /*
1573 * This is the create channel path meaning that if there is NO
1574 * registry available, we have to create one for this session.
1575 */
1576 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1577 if (ret < 0) {
1578 goto error;
1579 }
1580 buffer_reg_pid_add(reg_pid);
1581 } else {
1582 goto end;
1583 }
1584
1585 /* Initialize registry. */
1586 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1587 app->bits_per_long, app->uint8_t_alignment,
1588 app->uint16_t_alignment, app->uint32_t_alignment,
1589 app->uint64_t_alignment, app->long_alignment,
1590 app->byte_order, app->version.major,
1591 app->version.minor);
1592 if (ret < 0) {
1593 goto error;
1594 }
1595
1596 DBG3("UST app buffer registry per PID created successfully");
1597
1598 end:
1599 if (regp) {
1600 *regp = reg_pid;
1601 }
1602 error:
1603 rcu_read_unlock();
1604 return ret;
1605 }
1606
1607 /*
1608 * Setup buffer registry per UID for the given session and application. If none
1609 * is found, a new one is created, added to the global registry and
1610 * initialized. If regp is valid, it's set with the newly created object.
1611 *
1612 * Return 0 on success or else a negative value.
1613 */
1614 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1615 struct ust_app *app, struct buffer_reg_uid **regp)
1616 {
1617 int ret = 0;
1618 struct buffer_reg_uid *reg_uid;
1619
1620 assert(usess);
1621 assert(app);
1622
1623 rcu_read_lock();
1624
1625 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1626 if (!reg_uid) {
1627 /*
1628 * This is the create channel path meaning that if there is NO
1629 * registry available, we have to create one for this session.
1630 */
1631 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1632 LTTNG_DOMAIN_UST, &reg_uid);
1633 if (ret < 0) {
1634 goto error;
1635 }
1636 buffer_reg_uid_add(reg_uid);
1637 } else {
1638 goto end;
1639 }
1640
1641 /* Initialize registry. */
1642 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1643 app->bits_per_long, app->uint8_t_alignment,
1644 app->uint16_t_alignment, app->uint32_t_alignment,
1645 app->uint64_t_alignment, app->long_alignment,
1646 app->byte_order, app->version.major,
1647 app->version.minor);
1648 if (ret < 0) {
1649 goto error;
1650 }
1651 /* Add node to teardown list of the session. */
1652 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1653
1654 DBG3("UST app buffer registry per UID created successfully");
1655
1656 end:
1657 if (regp) {
1658 *regp = reg_uid;
1659 }
1660 error:
1661 rcu_read_unlock();
1662 return ret;
1663 }
1664
1665 /*
1666 * Create a session on the tracer side for the given app.
1667 *
1668 * On success, ua_sess_ptr is populated with the session pointer or else left
1669 * untouched. If the session was created, is_created is set to 1. On error,
1670 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1671 * be NULL.
1672 *
1673 * Returns 0 on success or else a negative code which is either -ENOMEM or
1674 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1675 */
1676 static int create_ust_app_session(struct ltt_ust_session *usess,
1677 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1678 int *is_created)
1679 {
1680 int ret, created = 0;
1681 struct ust_app_session *ua_sess;
1682
1683 assert(usess);
1684 assert(app);
1685 assert(ua_sess_ptr);
1686
1687 health_code_update();
1688
1689 ua_sess = lookup_session_by_app(usess, app);
1690 if (ua_sess == NULL) {
1691 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1692 app->pid, usess->id);
1693 ua_sess = alloc_ust_app_session(app);
1694 if (ua_sess == NULL) {
1695 /* Only malloc can failed so something is really wrong */
1696 ret = -ENOMEM;
1697 goto error;
1698 }
1699 shadow_copy_session(ua_sess, usess, app);
1700 created = 1;
1701 }
1702
1703 switch (usess->buffer_type) {
1704 case LTTNG_BUFFER_PER_PID:
1705 /* Init local registry. */
1706 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1707 if (ret < 0) {
1708 goto error;
1709 }
1710 break;
1711 case LTTNG_BUFFER_PER_UID:
1712 /* Look for a global registry. If none exists, create one. */
1713 ret = setup_buffer_reg_uid(usess, app, NULL);
1714 if (ret < 0) {
1715 goto error;
1716 }
1717 break;
1718 default:
1719 assert(0);
1720 ret = -EINVAL;
1721 goto error;
1722 }
1723
1724 health_code_update();
1725
1726 if (ua_sess->handle == -1) {
1727 ret = ustctl_create_session(app->sock);
1728 if (ret < 0) {
1729 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1730 ERR("Creating session for app pid %d with ret %d",
1731 app->pid, ret);
1732 } else {
1733 DBG("UST app creating session failed. Application is dead");
1734 }
1735 delete_ust_app_session(-1, ua_sess, app);
1736 if (ret != -ENOMEM) {
1737 /*
1738 * Tracer is probably gone or got an internal error so let's
1739 * behave like it will soon unregister or not usable.
1740 */
1741 ret = -ENOTCONN;
1742 }
1743 goto error;
1744 }
1745
1746 ua_sess->handle = ret;
1747
1748 /* Add ust app session to app's HT */
1749 lttng_ht_node_init_u64(&ua_sess->node,
1750 ua_sess->tracing_id);
1751 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1752
1753 DBG2("UST app session created successfully with handle %d", ret);
1754 }
1755
1756 *ua_sess_ptr = ua_sess;
1757 if (is_created) {
1758 *is_created = created;
1759 }
1760
1761 /* Everything went well. */
1762 ret = 0;
1763
1764 error:
1765 health_code_update();
1766 return ret;
1767 }
1768
1769 /*
1770 * Create a context for the channel on the tracer.
1771 *
1772 * Called with UST app session lock held and a RCU read side lock.
1773 */
1774 static
1775 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
1776 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
1777 struct ust_app *app)
1778 {
1779 int ret = 0;
1780 struct lttng_ht_iter iter;
1781 struct lttng_ht_node_ulong *node;
1782 struct ust_app_ctx *ua_ctx;
1783
1784 DBG2("UST app adding context to channel %s", ua_chan->name);
1785
1786 lttng_ht_lookup(ua_chan->ctx, (void *)((unsigned long)uctx->ctx), &iter);
1787 node = lttng_ht_iter_get_node_ulong(&iter);
1788 if (node != NULL) {
1789 ret = -EEXIST;
1790 goto error;
1791 }
1792
1793 ua_ctx = alloc_ust_app_ctx(uctx);
1794 if (ua_ctx == NULL) {
1795 /* malloc failed */
1796 ret = -1;
1797 goto error;
1798 }
1799
1800 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
1801 lttng_ht_add_unique_ulong(ua_chan->ctx, &ua_ctx->node);
1802 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1803
1804 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
1805 if (ret < 0) {
1806 goto error;
1807 }
1808
1809 error:
1810 return ret;
1811 }
1812
1813 /*
1814 * Enable on the tracer side a ust app event for the session and channel.
1815 *
1816 * Called with UST app session lock held.
1817 */
1818 static
1819 int enable_ust_app_event(struct ust_app_session *ua_sess,
1820 struct ust_app_event *ua_event, struct ust_app *app)
1821 {
1822 int ret;
1823
1824 ret = enable_ust_event(app, ua_sess, ua_event);
1825 if (ret < 0) {
1826 goto error;
1827 }
1828
1829 ua_event->enabled = 1;
1830
1831 error:
1832 return ret;
1833 }
1834
1835 /*
1836 * Disable on the tracer side a ust app event for the session and channel.
1837 */
1838 static int disable_ust_app_event(struct ust_app_session *ua_sess,
1839 struct ust_app_event *ua_event, struct ust_app *app)
1840 {
1841 int ret;
1842
1843 ret = disable_ust_event(app, ua_sess, ua_event);
1844 if (ret < 0) {
1845 goto error;
1846 }
1847
1848 ua_event->enabled = 0;
1849
1850 error:
1851 return ret;
1852 }
1853
1854 /*
1855 * Lookup ust app channel for session and disable it on the tracer side.
1856 */
1857 static
1858 int disable_ust_app_channel(struct ust_app_session *ua_sess,
1859 struct ust_app_channel *ua_chan, struct ust_app *app)
1860 {
1861 int ret;
1862
1863 ret = disable_ust_channel(app, ua_sess, ua_chan);
1864 if (ret < 0) {
1865 goto error;
1866 }
1867
1868 ua_chan->enabled = 0;
1869
1870 error:
1871 return ret;
1872 }
1873
1874 /*
1875 * Lookup ust app channel for session and enable it on the tracer side. This
1876 * MUST be called with a RCU read side lock acquired.
1877 */
1878 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
1879 struct ltt_ust_channel *uchan, struct ust_app *app)
1880 {
1881 int ret = 0;
1882 struct lttng_ht_iter iter;
1883 struct lttng_ht_node_str *ua_chan_node;
1884 struct ust_app_channel *ua_chan;
1885
1886 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
1887 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
1888 if (ua_chan_node == NULL) {
1889 DBG2("Unable to find channel %s in ust session id %" PRIu64,
1890 uchan->name, ua_sess->tracing_id);
1891 goto error;
1892 }
1893
1894 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
1895
1896 ret = enable_ust_channel(app, ua_sess, ua_chan);
1897 if (ret < 0) {
1898 goto error;
1899 }
1900
1901 error:
1902 return ret;
1903 }
1904
1905 /*
1906 * Ask the consumer to create a channel and get it if successful.
1907 *
1908 * Return 0 on success or else a negative value.
1909 */
1910 static int do_consumer_create_channel(struct ltt_ust_session *usess,
1911 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
1912 int bitness, struct ust_registry_session *registry)
1913 {
1914 int ret;
1915 unsigned int nb_fd = 0;
1916 struct consumer_socket *socket;
1917
1918 assert(usess);
1919 assert(ua_sess);
1920 assert(ua_chan);
1921 assert(registry);
1922
1923 rcu_read_lock();
1924 health_code_update();
1925
1926 /* Get the right consumer socket for the application. */
1927 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
1928 if (!socket) {
1929 ret = -EINVAL;
1930 goto error;
1931 }
1932
1933 health_code_update();
1934
1935 /* Need one fd for the channel. */
1936 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1937 if (ret < 0) {
1938 ERR("Exhausted number of available FD upon create channel");
1939 goto error;
1940 }
1941
1942 /*
1943 * Ask consumer to create channel. The consumer will return the number of
1944 * stream we have to expect.
1945 */
1946 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
1947 registry);
1948 if (ret < 0) {
1949 goto error_ask;
1950 }
1951
1952 /*
1953 * Compute the number of fd needed before receiving them. It must be 2 per
1954 * stream (2 being the default value here).
1955 */
1956 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
1957
1958 /* Reserve the amount of file descriptor we need. */
1959 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
1960 if (ret < 0) {
1961 ERR("Exhausted number of available FD upon create channel");
1962 goto error_fd_get_stream;
1963 }
1964
1965 health_code_update();
1966
1967 /*
1968 * Now get the channel from the consumer. This call wil populate the stream
1969 * list of that channel and set the ust objects.
1970 */
1971 if (usess->consumer->enabled) {
1972 ret = ust_consumer_get_channel(socket, ua_chan);
1973 if (ret < 0) {
1974 goto error_destroy;
1975 }
1976 }
1977
1978 rcu_read_unlock();
1979 return 0;
1980
1981 error_destroy:
1982 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
1983 error_fd_get_stream:
1984 /*
1985 * Initiate a destroy channel on the consumer since we had an error
1986 * handling it on our side. The return value is of no importance since we
1987 * already have a ret value set by the previous error that we need to
1988 * return.
1989 */
1990 (void) ust_consumer_destroy_channel(socket, ua_chan);
1991 error_ask:
1992 lttng_fd_put(LTTNG_FD_APPS, 1);
1993 error:
1994 health_code_update();
1995 rcu_read_unlock();
1996 return ret;
1997 }
1998
1999 /*
2000 * Duplicate the ust data object of the ust app stream and save it in the
2001 * buffer registry stream.
2002 *
2003 * Return 0 on success or else a negative value.
2004 */
2005 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2006 struct ust_app_stream *stream)
2007 {
2008 int ret;
2009
2010 assert(reg_stream);
2011 assert(stream);
2012
2013 /* Reserve the amount of file descriptor we need. */
2014 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2015 if (ret < 0) {
2016 ERR("Exhausted number of available FD upon duplicate stream");
2017 goto error;
2018 }
2019
2020 /* Duplicate object for stream once the original is in the registry. */
2021 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2022 reg_stream->obj.ust);
2023 if (ret < 0) {
2024 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2025 reg_stream->obj.ust, stream->obj, ret);
2026 lttng_fd_put(LTTNG_FD_APPS, 2);
2027 goto error;
2028 }
2029 stream->handle = stream->obj->handle;
2030
2031 error:
2032 return ret;
2033 }
2034
2035 /*
2036 * Duplicate the ust data object of the ust app. channel and save it in the
2037 * buffer registry channel.
2038 *
2039 * Return 0 on success or else a negative value.
2040 */
2041 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2042 struct ust_app_channel *ua_chan)
2043 {
2044 int ret;
2045
2046 assert(reg_chan);
2047 assert(ua_chan);
2048
2049 /* Need two fds for the channel. */
2050 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2051 if (ret < 0) {
2052 ERR("Exhausted number of available FD upon duplicate channel");
2053 goto error_fd_get;
2054 }
2055
2056 /* Duplicate object for stream once the original is in the registry. */
2057 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2058 if (ret < 0) {
2059 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2060 reg_chan->obj.ust, ua_chan->obj, ret);
2061 goto error;
2062 }
2063 ua_chan->handle = ua_chan->obj->handle;
2064
2065 return 0;
2066
2067 error:
2068 lttng_fd_put(LTTNG_FD_APPS, 1);
2069 error_fd_get:
2070 return ret;
2071 }
2072
2073 /*
2074 * For a given channel buffer registry, setup all streams of the given ust
2075 * application channel.
2076 *
2077 * Return 0 on success or else a negative value.
2078 */
2079 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2080 struct ust_app_channel *ua_chan)
2081 {
2082 int ret = 0;
2083 struct ust_app_stream *stream, *stmp;
2084
2085 assert(reg_chan);
2086 assert(ua_chan);
2087
2088 DBG2("UST app setup buffer registry stream");
2089
2090 /* Send all streams to application. */
2091 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2092 struct buffer_reg_stream *reg_stream;
2093
2094 ret = buffer_reg_stream_create(&reg_stream);
2095 if (ret < 0) {
2096 goto error;
2097 }
2098
2099 /*
2100 * Keep original pointer and nullify it in the stream so the delete
2101 * stream call does not release the object.
2102 */
2103 reg_stream->obj.ust = stream->obj;
2104 stream->obj = NULL;
2105 buffer_reg_stream_add(reg_stream, reg_chan);
2106
2107 /* We don't need the streams anymore. */
2108 cds_list_del(&stream->list);
2109 delete_ust_app_stream(-1, stream);
2110 }
2111
2112 error:
2113 return ret;
2114 }
2115
2116 /*
2117 * Create a buffer registry channel for the given session registry and
2118 * application channel object. If regp pointer is valid, it's set with the
2119 * created object. Important, the created object is NOT added to the session
2120 * registry hash table.
2121 *
2122 * Return 0 on success else a negative value.
2123 */
2124 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2125 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2126 {
2127 int ret;
2128 struct buffer_reg_channel *reg_chan = NULL;
2129
2130 assert(reg_sess);
2131 assert(ua_chan);
2132
2133 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2134
2135 /* Create buffer registry channel. */
2136 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2137 if (ret < 0) {
2138 goto error_create;
2139 }
2140 assert(reg_chan);
2141 reg_chan->consumer_key = ua_chan->key;
2142
2143 /* Create and add a channel registry to session. */
2144 ret = ust_registry_channel_add(reg_sess->reg.ust,
2145 ua_chan->tracing_channel_id);
2146 if (ret < 0) {
2147 goto error;
2148 }
2149 buffer_reg_channel_add(reg_sess, reg_chan);
2150
2151 if (regp) {
2152 *regp = reg_chan;
2153 }
2154
2155 return 0;
2156
2157 error:
2158 /* Safe because the registry channel object was not added to any HT. */
2159 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2160 error_create:
2161 return ret;
2162 }
2163
2164 /*
2165 * Setup buffer registry channel for the given session registry and application
2166 * channel object. If regp pointer is valid, it's set with the created object.
2167 *
2168 * Return 0 on success else a negative value.
2169 */
2170 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2171 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2172 {
2173 int ret;
2174
2175 assert(reg_sess);
2176 assert(reg_chan);
2177 assert(ua_chan);
2178 assert(ua_chan->obj);
2179
2180 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2181
2182 /* Setup all streams for the registry. */
2183 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2184 if (ret < 0) {
2185 goto error;
2186 }
2187
2188 reg_chan->obj.ust = ua_chan->obj;
2189 ua_chan->obj = NULL;
2190
2191 return 0;
2192
2193 error:
2194 buffer_reg_channel_remove(reg_sess, reg_chan);
2195 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2196 return ret;
2197 }
2198
2199 /*
2200 * Send buffer registry channel to the application.
2201 *
2202 * Return 0 on success else a negative value.
2203 */
2204 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2205 struct ust_app *app, struct ust_app_session *ua_sess,
2206 struct ust_app_channel *ua_chan)
2207 {
2208 int ret;
2209 struct buffer_reg_stream *reg_stream;
2210
2211 assert(reg_chan);
2212 assert(app);
2213 assert(ua_sess);
2214 assert(ua_chan);
2215
2216 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2217
2218 ret = duplicate_channel_object(reg_chan, ua_chan);
2219 if (ret < 0) {
2220 goto error;
2221 }
2222
2223 /* Send channel to the application. */
2224 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2225 if (ret < 0) {
2226 goto error;
2227 }
2228
2229 health_code_update();
2230
2231 /* Send all streams to application. */
2232 pthread_mutex_lock(&reg_chan->stream_list_lock);
2233 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2234 struct ust_app_stream stream;
2235
2236 ret = duplicate_stream_object(reg_stream, &stream);
2237 if (ret < 0) {
2238 goto error_stream_unlock;
2239 }
2240
2241 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2242 if (ret < 0) {
2243 (void) release_ust_app_stream(-1, &stream);
2244 goto error_stream_unlock;
2245 }
2246
2247 /*
2248 * The return value is not important here. This function will output an
2249 * error if needed.
2250 */
2251 (void) release_ust_app_stream(-1, &stream);
2252 }
2253 ua_chan->is_sent = 1;
2254
2255 error_stream_unlock:
2256 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2257 error:
2258 return ret;
2259 }
2260
2261 /*
2262 * Create and send to the application the created buffers with per UID buffers.
2263 *
2264 * Return 0 on success else a negative value.
2265 */
2266 static int create_channel_per_uid(struct ust_app *app,
2267 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2268 struct ust_app_channel *ua_chan)
2269 {
2270 int ret;
2271 struct buffer_reg_uid *reg_uid;
2272 struct buffer_reg_channel *reg_chan;
2273
2274 assert(app);
2275 assert(usess);
2276 assert(ua_sess);
2277 assert(ua_chan);
2278
2279 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2280
2281 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2282 /*
2283 * The session creation handles the creation of this global registry
2284 * object. If none can be find, there is a code flow problem or a
2285 * teardown race.
2286 */
2287 assert(reg_uid);
2288
2289 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2290 reg_uid);
2291 if (!reg_chan) {
2292 /* Create the buffer registry channel object. */
2293 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2294 if (ret < 0) {
2295 goto error;
2296 }
2297 assert(reg_chan);
2298
2299 /*
2300 * Create the buffers on the consumer side. This call populates the
2301 * ust app channel object with all streams and data object.
2302 */
2303 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2304 app->bits_per_long, reg_uid->registry->reg.ust);
2305 if (ret < 0) {
2306 /*
2307 * Let's remove the previously created buffer registry channel so
2308 * it's not visible anymore in the session registry.
2309 */
2310 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2311 ua_chan->tracing_channel_id);
2312 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2313 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2314 goto error;
2315 }
2316
2317 /*
2318 * Setup the streams and add it to the session registry.
2319 */
2320 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2321 if (ret < 0) {
2322 goto error;
2323 }
2324
2325 }
2326
2327 /* Send buffers to the application. */
2328 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2329 if (ret < 0) {
2330 goto error;
2331 }
2332
2333 error:
2334 return ret;
2335 }
2336
2337 /*
2338 * Create and send to the application the created buffers with per PID buffers.
2339 *
2340 * Return 0 on success else a negative value.
2341 */
2342 static int create_channel_per_pid(struct ust_app *app,
2343 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2344 struct ust_app_channel *ua_chan)
2345 {
2346 int ret;
2347 struct ust_registry_session *registry;
2348
2349 assert(app);
2350 assert(usess);
2351 assert(ua_sess);
2352 assert(ua_chan);
2353
2354 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2355
2356 rcu_read_lock();
2357
2358 registry = get_session_registry(ua_sess);
2359 assert(registry);
2360
2361 /* Create and add a new channel registry to session. */
2362 ret = ust_registry_channel_add(registry, ua_chan->key);
2363 if (ret < 0) {
2364 goto error;
2365 }
2366
2367 /* Create and get channel on the consumer side. */
2368 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2369 app->bits_per_long, registry);
2370 if (ret < 0) {
2371 goto error;
2372 }
2373
2374 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2375 if (ret < 0) {
2376 goto error;
2377 }
2378
2379 error:
2380 rcu_read_unlock();
2381 return ret;
2382 }
2383
2384 /*
2385 * From an already allocated ust app channel, create the channel buffers if
2386 * need and send it to the application. This MUST be called with a RCU read
2387 * side lock acquired.
2388 *
2389 * Return 0 on success or else a negative value.
2390 */
2391 static int do_create_channel(struct ust_app *app,
2392 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2393 struct ust_app_channel *ua_chan)
2394 {
2395 int ret;
2396
2397 assert(app);
2398 assert(usess);
2399 assert(ua_sess);
2400 assert(ua_chan);
2401
2402 /* Handle buffer type before sending the channel to the application. */
2403 switch (usess->buffer_type) {
2404 case LTTNG_BUFFER_PER_UID:
2405 {
2406 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2407 if (ret < 0) {
2408 goto error;
2409 }
2410 break;
2411 }
2412 case LTTNG_BUFFER_PER_PID:
2413 {
2414 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2415 if (ret < 0) {
2416 goto error;
2417 }
2418 break;
2419 }
2420 default:
2421 assert(0);
2422 ret = -EINVAL;
2423 goto error;
2424 }
2425
2426 /* Initialize ust objd object using the received handle and add it. */
2427 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2428 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2429
2430 /* If channel is not enabled, disable it on the tracer */
2431 if (!ua_chan->enabled) {
2432 ret = disable_ust_channel(app, ua_sess, ua_chan);
2433 if (ret < 0) {
2434 goto error;
2435 }
2436 }
2437
2438 error:
2439 return ret;
2440 }
2441
2442 /*
2443 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2444 * newly created channel if not NULL.
2445 *
2446 * Called with UST app session lock and RCU read-side lock held.
2447 *
2448 * Return 0 on success or else a negative value.
2449 */
2450 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2451 struct ltt_ust_channel *uchan, struct ust_app *app,
2452 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2453 struct ust_app_channel **ua_chanp)
2454 {
2455 int ret = 0;
2456 struct lttng_ht_iter iter;
2457 struct lttng_ht_node_str *ua_chan_node;
2458 struct ust_app_channel *ua_chan;
2459
2460 /* Lookup channel in the ust app session */
2461 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2462 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2463 if (ua_chan_node != NULL) {
2464 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2465 goto end;
2466 }
2467
2468 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2469 if (ua_chan == NULL) {
2470 /* Only malloc can fail here */
2471 ret = -ENOMEM;
2472 goto error_alloc;
2473 }
2474 shadow_copy_channel(ua_chan, uchan);
2475
2476 /* Set channel type. */
2477 ua_chan->attr.type = type;
2478
2479 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2480 if (ret < 0) {
2481 goto error;
2482 }
2483
2484 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2485 app->pid);
2486
2487 /* Only add the channel if successful on the tracer side. */
2488 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2489
2490 end:
2491 if (ua_chanp) {
2492 *ua_chanp = ua_chan;
2493 }
2494
2495 /* Everything went well. */
2496 return 0;
2497
2498 error:
2499 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2500 error_alloc:
2501 return ret;
2502 }
2503
2504 /*
2505 * Create UST app event and create it on the tracer side.
2506 *
2507 * Called with ust app session mutex held.
2508 */
2509 static
2510 int create_ust_app_event(struct ust_app_session *ua_sess,
2511 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2512 struct ust_app *app)
2513 {
2514 int ret = 0;
2515 struct ust_app_event *ua_event;
2516
2517 /* Get event node */
2518 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2519 uevent->filter, uevent->attr.loglevel);
2520 if (ua_event != NULL) {
2521 ret = -EEXIST;
2522 goto end;
2523 }
2524
2525 /* Does not exist so create one */
2526 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2527 if (ua_event == NULL) {
2528 /* Only malloc can failed so something is really wrong */
2529 ret = -ENOMEM;
2530 goto end;
2531 }
2532 shadow_copy_event(ua_event, uevent);
2533
2534 /* Create it on the tracer side */
2535 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2536 if (ret < 0) {
2537 /* Not found previously means that it does not exist on the tracer */
2538 assert(ret != -LTTNG_UST_ERR_EXIST);
2539 goto error;
2540 }
2541
2542 add_unique_ust_app_event(ua_chan, ua_event);
2543
2544 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2545 app->pid);
2546
2547 end:
2548 return ret;
2549
2550 error:
2551 /* Valid. Calling here is already in a read side lock */
2552 delete_ust_app_event(-1, ua_event);
2553 return ret;
2554 }
2555
2556 /*
2557 * Create UST metadata and open it on the tracer side.
2558 *
2559 * Called with UST app session lock held and RCU read side lock.
2560 */
2561 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2562 struct ust_app *app, struct consumer_output *consumer,
2563 struct ustctl_consumer_channel_attr *attr)
2564 {
2565 int ret = 0;
2566 struct ust_app_channel *metadata;
2567 struct consumer_socket *socket;
2568 struct ust_registry_session *registry;
2569
2570 assert(ua_sess);
2571 assert(app);
2572 assert(consumer);
2573
2574 registry = get_session_registry(ua_sess);
2575 assert(registry);
2576
2577 /* Metadata already exists for this registry or it was closed previously */
2578 if (registry->metadata_key || registry->metadata_closed) {
2579 ret = 0;
2580 goto error;
2581 }
2582
2583 /* Allocate UST metadata */
2584 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2585 if (!metadata) {
2586 /* malloc() failed */
2587 ret = -ENOMEM;
2588 goto error;
2589 }
2590
2591 if (!attr) {
2592 /* Set default attributes for metadata. */
2593 metadata->attr.overwrite = DEFAULT_CHANNEL_OVERWRITE;
2594 metadata->attr.subbuf_size = default_get_metadata_subbuf_size();
2595 metadata->attr.num_subbuf = DEFAULT_METADATA_SUBBUF_NUM;
2596 metadata->attr.switch_timer_interval = DEFAULT_METADATA_SWITCH_TIMER;
2597 metadata->attr.read_timer_interval = DEFAULT_METADATA_READ_TIMER;
2598 metadata->attr.output = LTTNG_UST_MMAP;
2599 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2600 } else {
2601 memcpy(&metadata->attr, attr, sizeof(metadata->attr));
2602 metadata->attr.output = LTTNG_UST_MMAP;
2603 metadata->attr.type = LTTNG_UST_CHAN_METADATA;
2604 }
2605
2606 /* Need one fd for the channel. */
2607 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2608 if (ret < 0) {
2609 ERR("Exhausted number of available FD upon create metadata");
2610 goto error;
2611 }
2612
2613 /* Get the right consumer socket for the application. */
2614 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2615 if (!socket) {
2616 ret = -EINVAL;
2617 goto error_consumer;
2618 }
2619
2620 /*
2621 * Keep metadata key so we can identify it on the consumer side. Assign it
2622 * to the registry *before* we ask the consumer so we avoid the race of the
2623 * consumer requesting the metadata and the ask_channel call on our side
2624 * did not returned yet.
2625 */
2626 registry->metadata_key = metadata->key;
2627
2628 /*
2629 * Ask the metadata channel creation to the consumer. The metadata object
2630 * will be created by the consumer and kept their. However, the stream is
2631 * never added or monitored until we do a first push metadata to the
2632 * consumer.
2633 */
2634 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2635 registry);
2636 if (ret < 0) {
2637 /* Nullify the metadata key so we don't try to close it later on. */
2638 registry->metadata_key = 0;
2639 goto error_consumer;
2640 }
2641
2642 /*
2643 * The setup command will make the metadata stream be sent to the relayd,
2644 * if applicable, and the thread managing the metadatas. This is important
2645 * because after this point, if an error occurs, the only way the stream
2646 * can be deleted is to be monitored in the consumer.
2647 */
2648 ret = consumer_setup_metadata(socket, metadata->key);
2649 if (ret < 0) {
2650 /* Nullify the metadata key so we don't try to close it later on. */
2651 registry->metadata_key = 0;
2652 goto error_consumer;
2653 }
2654
2655 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2656 metadata->key, app->pid);
2657
2658 error_consumer:
2659 lttng_fd_put(LTTNG_FD_APPS, 1);
2660 delete_ust_app_channel(-1, metadata, app);
2661 error:
2662 return ret;
2663 }
2664
2665 /*
2666 * Return pointer to traceable apps list.
2667 */
2668 struct lttng_ht *ust_app_get_ht(void)
2669 {
2670 return ust_app_ht;
2671 }
2672
2673 /*
2674 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2675 * acquired before calling this function.
2676 */
2677 struct ust_app *ust_app_find_by_pid(pid_t pid)
2678 {
2679 struct ust_app *app = NULL;
2680 struct lttng_ht_node_ulong *node;
2681 struct lttng_ht_iter iter;
2682
2683 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2684 node = lttng_ht_iter_get_node_ulong(&iter);
2685 if (node == NULL) {
2686 DBG2("UST app no found with pid %d", pid);
2687 goto error;
2688 }
2689
2690 DBG2("Found UST app by pid %d", pid);
2691
2692 app = caa_container_of(node, struct ust_app, pid_n);
2693
2694 error:
2695 return app;
2696 }
2697
2698 /*
2699 * Allocate and init an UST app object using the registration information and
2700 * the command socket. This is called when the command socket connects to the
2701 * session daemon.
2702 *
2703 * The object is returned on success or else NULL.
2704 */
2705 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2706 {
2707 struct ust_app *lta = NULL;
2708
2709 assert(msg);
2710 assert(sock >= 0);
2711
2712 DBG3("UST app creating application for socket %d", sock);
2713
2714 if ((msg->bits_per_long == 64 &&
2715 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2716 || (msg->bits_per_long == 32 &&
2717 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2718 ERR("Registration failed: application \"%s\" (pid: %d) has "
2719 "%d-bit long, but no consumerd for this size is available.\n",
2720 msg->name, msg->pid, msg->bits_per_long);
2721 goto error;
2722 }
2723
2724 lta = zmalloc(sizeof(struct ust_app));
2725 if (lta == NULL) {
2726 PERROR("malloc");
2727 goto error;
2728 }
2729
2730 lta->ppid = msg->ppid;
2731 lta->uid = msg->uid;
2732 lta->gid = msg->gid;
2733
2734 lta->bits_per_long = msg->bits_per_long;
2735 lta->uint8_t_alignment = msg->uint8_t_alignment;
2736 lta->uint16_t_alignment = msg->uint16_t_alignment;
2737 lta->uint32_t_alignment = msg->uint32_t_alignment;
2738 lta->uint64_t_alignment = msg->uint64_t_alignment;
2739 lta->long_alignment = msg->long_alignment;
2740 lta->byte_order = msg->byte_order;
2741
2742 lta->v_major = msg->major;
2743 lta->v_minor = msg->minor;
2744 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2745 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2746 lta->notify_sock = -1;
2747
2748 /* Copy name and make sure it's NULL terminated. */
2749 strncpy(lta->name, msg->name, sizeof(lta->name));
2750 lta->name[UST_APP_PROCNAME_LEN] = '\0';
2751
2752 /*
2753 * Before this can be called, when receiving the registration information,
2754 * the application compatibility is checked. So, at this point, the
2755 * application can work with this session daemon.
2756 */
2757 lta->compatible = 1;
2758
2759 lta->pid = msg->pid;
2760 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
2761 lta->sock = sock;
2762 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
2763
2764 CDS_INIT_LIST_HEAD(&lta->teardown_head);
2765
2766 error:
2767 return lta;
2768 }
2769
2770 /*
2771 * For a given application object, add it to every hash table.
2772 */
2773 void ust_app_add(struct ust_app *app)
2774 {
2775 assert(app);
2776 assert(app->notify_sock >= 0);
2777
2778 rcu_read_lock();
2779
2780 /*
2781 * On a re-registration, we want to kick out the previous registration of
2782 * that pid
2783 */
2784 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
2785
2786 /*
2787 * The socket _should_ be unique until _we_ call close. So, a add_unique
2788 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
2789 * already in the table.
2790 */
2791 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
2792
2793 /* Add application to the notify socket hash table. */
2794 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
2795 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
2796
2797 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
2798 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
2799 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
2800 app->v_minor);
2801
2802 rcu_read_unlock();
2803 }
2804
2805 /*
2806 * Set the application version into the object.
2807 *
2808 * Return 0 on success else a negative value either an errno code or a
2809 * LTTng-UST error code.
2810 */
2811 int ust_app_version(struct ust_app *app)
2812 {
2813 int ret;
2814
2815 assert(app);
2816
2817 ret = ustctl_tracer_version(app->sock, &app->version);
2818 if (ret < 0) {
2819 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
2820 ERR("UST app %d verson failed with ret %d", app->sock, ret);
2821 } else {
2822 DBG3("UST app %d verion failed. Application is dead", app->sock);
2823 }
2824 }
2825
2826 return ret;
2827 }
2828
2829 /*
2830 * Unregister app by removing it from the global traceable app list and freeing
2831 * the data struct.
2832 *
2833 * The socket is already closed at this point so no close to sock.
2834 */
2835 void ust_app_unregister(int sock)
2836 {
2837 struct ust_app *lta;
2838 struct lttng_ht_node_ulong *node;
2839 struct lttng_ht_iter iter;
2840 struct ust_app_session *ua_sess;
2841 int ret;
2842
2843 rcu_read_lock();
2844
2845 /* Get the node reference for a call_rcu */
2846 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
2847 node = lttng_ht_iter_get_node_ulong(&iter);
2848 assert(node);
2849
2850 lta = caa_container_of(node, struct ust_app, sock_n);
2851 DBG("PID %d unregistering with sock %d", lta->pid, sock);
2852
2853 /* Remove application from PID hash table */
2854 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
2855 assert(!ret);
2856
2857 /*
2858 * Remove application from notify hash table. The thread handling the
2859 * notify socket could have deleted the node so ignore on error because
2860 * either way it's valid. The close of that socket is handled by the other
2861 * thread.
2862 */
2863 iter.iter.node = &lta->notify_sock_n.node;
2864 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
2865
2866 /*
2867 * Ignore return value since the node might have been removed before by an
2868 * add replace during app registration because the PID can be reassigned by
2869 * the OS.
2870 */
2871 iter.iter.node = &lta->pid_n.node;
2872 ret = lttng_ht_del(ust_app_ht, &iter);
2873 if (ret) {
2874 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
2875 lta->pid);
2876 }
2877
2878 /* Remove sessions so they are not visible during deletion.*/
2879 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
2880 node.node) {
2881 struct ust_registry_session *registry;
2882
2883 ret = lttng_ht_del(lta->sessions, &iter);
2884 if (ret) {
2885 /* The session was already removed so scheduled for teardown. */
2886 continue;
2887 }
2888
2889 /*
2890 * Add session to list for teardown. This is safe since at this point we
2891 * are the only one using this list.
2892 */
2893 pthread_mutex_lock(&ua_sess->lock);
2894
2895 /*
2896 * Normally, this is done in the delete session process which is
2897 * executed in the call rcu below. However, upon registration we can't
2898 * afford to wait for the grace period before pushing data or else the
2899 * data pending feature can race between the unregistration and stop
2900 * command where the data pending command is sent *before* the grace
2901 * period ended.
2902 *
2903 * The close metadata below nullifies the metadata pointer in the
2904 * session so the delete session will NOT push/close a second time.
2905 */
2906 registry = get_session_registry(ua_sess);
2907 if (registry && !registry->metadata_closed) {
2908 /* Push metadata for application before freeing the application. */
2909 (void) push_metadata(registry, ua_sess->consumer);
2910
2911 /*
2912 * Don't ask to close metadata for global per UID buffers. Close
2913 * metadata only on destroy trace session in this case. Also, the
2914 * previous push metadata could have flag the metadata registry to
2915 * close so don't send a close command if closed.
2916 */
2917 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID &&
2918 !registry->metadata_closed) {
2919 /* And ask to close it for this session registry. */
2920 (void) close_metadata(registry, ua_sess->consumer);
2921 }
2922 }
2923
2924 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
2925 pthread_mutex_unlock(&ua_sess->lock);
2926 }
2927
2928 /* Free memory */
2929 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
2930
2931 rcu_read_unlock();
2932 return;
2933 }
2934
2935 /*
2936 * Return traceable_app_count
2937 */
2938 unsigned long ust_app_list_count(void)
2939 {
2940 unsigned long count;
2941
2942 rcu_read_lock();
2943 count = lttng_ht_get_count(ust_app_ht);
2944 rcu_read_unlock();
2945
2946 return count;
2947 }
2948
2949 /*
2950 * Fill events array with all events name of all registered apps.
2951 */
2952 int ust_app_list_events(struct lttng_event **events)
2953 {
2954 int ret, handle;
2955 size_t nbmem, count = 0;
2956 struct lttng_ht_iter iter;
2957 struct ust_app *app;
2958 struct lttng_event *tmp_event;
2959
2960 nbmem = UST_APP_EVENT_LIST_SIZE;
2961 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
2962 if (tmp_event == NULL) {
2963 PERROR("zmalloc ust app events");
2964 ret = -ENOMEM;
2965 goto error;
2966 }
2967
2968 rcu_read_lock();
2969
2970 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
2971 struct lttng_ust_tracepoint_iter uiter;
2972
2973 health_code_update();
2974
2975 if (!app->compatible) {
2976 /*
2977 * TODO: In time, we should notice the caller of this error by
2978 * telling him that this is a version error.
2979 */
2980 continue;
2981 }
2982 handle = ustctl_tracepoint_list(app->sock);
2983 if (handle < 0) {
2984 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
2985 ERR("UST app list events getting handle failed for app pid %d",
2986 app->pid);
2987 }
2988 continue;
2989 }
2990
2991 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
2992 &uiter)) != -LTTNG_UST_ERR_NOENT) {
2993 /* Handle ustctl error. */
2994 if (ret < 0) {
2995 free(tmp_event);
2996 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
2997 ERR("UST app tp list get failed for app %d with ret %d",
2998 app->sock, ret);
2999 } else {
3000 DBG3("UST app tp list get failed. Application is dead");
3001 }
3002 goto rcu_error;
3003 }
3004
3005 health_code_update();
3006 if (count >= nbmem) {
3007 /* In case the realloc fails, we free the memory */
3008 void *ptr;
3009
3010 DBG2("Reallocating event list from %zu to %zu entries", nbmem,
3011 2 * nbmem);
3012 nbmem *= 2;
3013 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event));
3014 if (ptr == NULL) {
3015 PERROR("realloc ust app events");
3016 free(tmp_event);
3017 ret = -ENOMEM;
3018 goto rcu_error;
3019 }
3020 tmp_event = ptr;
3021 }
3022 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3023 tmp_event[count].loglevel = uiter.loglevel;
3024 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3025 tmp_event[count].pid = app->pid;
3026 tmp_event[count].enabled = -1;
3027 count++;
3028 }
3029 }
3030
3031 ret = count;
3032 *events = tmp_event;
3033
3034 DBG2("UST app list events done (%zu events)", count);
3035
3036 rcu_error:
3037 rcu_read_unlock();
3038 error:
3039 health_code_update();
3040 return ret;
3041 }
3042
3043 /*
3044 * Fill events array with all events name of all registered apps.
3045 */
3046 int ust_app_list_event_fields(struct lttng_event_field **fields)
3047 {
3048 int ret, handle;
3049 size_t nbmem, count = 0;
3050 struct lttng_ht_iter iter;
3051 struct ust_app *app;
3052 struct lttng_event_field *tmp_event;
3053
3054 nbmem = UST_APP_EVENT_LIST_SIZE;
3055 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3056 if (tmp_event == NULL) {
3057 PERROR("zmalloc ust app event fields");
3058 ret = -ENOMEM;
3059 goto error;
3060 }
3061
3062 rcu_read_lock();
3063
3064 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3065 struct lttng_ust_field_iter uiter;
3066
3067 health_code_update();
3068
3069 if (!app->compatible) {
3070 /*
3071 * TODO: In time, we should notice the caller of this error by
3072 * telling him that this is a version error.
3073 */
3074 continue;
3075 }
3076 handle = ustctl_tracepoint_field_list(app->sock);
3077 if (handle < 0) {
3078 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3079 ERR("UST app list field getting handle failed for app pid %d",
3080 app->pid);
3081 }
3082 continue;
3083 }
3084
3085 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3086 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3087 /* Handle ustctl error. */
3088 if (ret < 0) {
3089 free(tmp_event);
3090 if (ret != -LTTNG_UST_ERR_EXITING || ret != -EPIPE) {
3091 ERR("UST app tp list field failed for app %d with ret %d",
3092 app->sock, ret);
3093 } else {
3094 DBG3("UST app tp list field failed. Application is dead");
3095 }
3096 goto rcu_error;
3097 }
3098
3099 health_code_update();
3100 if (count >= nbmem) {
3101 /* In case the realloc fails, we free the memory */
3102 void *ptr;
3103
3104 DBG2("Reallocating event field list from %zu to %zu entries", nbmem,
3105 2 * nbmem);
3106 nbmem *= 2;
3107 ptr = realloc(tmp_event, nbmem * sizeof(struct lttng_event_field));
3108 if (ptr == NULL) {
3109 PERROR("realloc ust app event fields");
3110 free(tmp_event);
3111 ret = -ENOMEM;
3112 goto rcu_error;
3113 }
3114 tmp_event = ptr;
3115 }
3116
3117 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3118 tmp_event[count].type = uiter.type;
3119 tmp_event[count].nowrite = uiter.nowrite;
3120
3121 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3122 tmp_event[count].event.loglevel = uiter.loglevel;
3123 tmp_event[count].event.type = LTTNG_UST_TRACEPOINT;
3124 tmp_event[count].event.pid = app->pid;
3125 tmp_event[count].event.enabled = -1;
3126 count++;
3127 }
3128 }
3129
3130 ret = count;
3131 *fields = tmp_event;
3132
3133 DBG2("UST app list event fields done (%zu events)", count);
3134
3135 rcu_error:
3136 rcu_read_unlock();
3137 error:
3138 health_code_update();
3139 return ret;
3140 }
3141
3142 /*
3143 * Free and clean all traceable apps of the global list.
3144 *
3145 * Should _NOT_ be called with RCU read-side lock held.
3146 */
3147 void ust_app_clean_list(void)
3148 {
3149 int ret;
3150 struct ust_app *app;
3151 struct lttng_ht_iter iter;
3152
3153 DBG2("UST app cleaning registered apps hash table");
3154
3155 rcu_read_lock();
3156
3157 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3158 ret = lttng_ht_del(ust_app_ht, &iter);
3159 assert(!ret);
3160 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3161 }
3162
3163 /* Cleanup socket hash table */
3164 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3165 sock_n.node) {
3166 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3167 assert(!ret);
3168 }
3169
3170 /* Cleanup notify socket hash table */
3171 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3172 notify_sock_n.node) {
3173 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3174 assert(!ret);
3175 }
3176 rcu_read_unlock();
3177
3178 /* Destroy is done only when the ht is empty */
3179 ht_cleanup_push(ust_app_ht);
3180 ht_cleanup_push(ust_app_ht_by_sock);
3181 ht_cleanup_push(ust_app_ht_by_notify_sock);
3182 }
3183
3184 /*
3185 * Init UST app hash table.
3186 */
3187 void ust_app_ht_alloc(void)
3188 {
3189 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3190 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3191 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3192 }
3193
3194 /*
3195 * For a specific UST session, disable the channel for all registered apps.
3196 */
3197 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3198 struct ltt_ust_channel *uchan)
3199 {
3200 int ret = 0;
3201 struct lttng_ht_iter iter;
3202 struct lttng_ht_node_str *ua_chan_node;
3203 struct ust_app *app;
3204 struct ust_app_session *ua_sess;
3205 struct ust_app_channel *ua_chan;
3206
3207 if (usess == NULL || uchan == NULL) {
3208 ERR("Disabling UST global channel with NULL values");
3209 ret = -1;
3210 goto error;
3211 }
3212
3213 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3214 uchan->name, usess->id);
3215
3216 rcu_read_lock();
3217
3218 /* For every registered applications */
3219 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3220 struct lttng_ht_iter uiter;
3221 if (!app->compatible) {
3222 /*
3223 * TODO: In time, we should notice the caller of this error by
3224 * telling him that this is a version error.
3225 */
3226 continue;
3227 }
3228 ua_sess = lookup_session_by_app(usess, app);
3229 if (ua_sess == NULL) {
3230 continue;
3231 }
3232
3233 /* Get channel */
3234 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3235 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3236 /* If the session if found for the app, the channel must be there */
3237 assert(ua_chan_node);
3238
3239 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3240 /* The channel must not be already disabled */
3241 assert(ua_chan->enabled == 1);
3242
3243 /* Disable channel onto application */
3244 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3245 if (ret < 0) {
3246 /* XXX: We might want to report this error at some point... */
3247 continue;
3248 }
3249 }
3250
3251 rcu_read_unlock();
3252
3253 error:
3254 return ret;
3255 }
3256
3257 /*
3258 * For a specific UST session, enable the channel for all registered apps.
3259 */
3260 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3261 struct ltt_ust_channel *uchan)
3262 {
3263 int ret = 0;
3264 struct lttng_ht_iter iter;
3265 struct ust_app *app;
3266 struct ust_app_session *ua_sess;
3267
3268 if (usess == NULL || uchan == NULL) {
3269 ERR("Adding UST global channel to NULL values");
3270 ret = -1;
3271 goto error;
3272 }
3273
3274 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3275 uchan->name, usess->id);
3276
3277 rcu_read_lock();
3278
3279 /* For every registered applications */
3280 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3281 if (!app->compatible) {
3282 /*
3283 * TODO: In time, we should notice the caller of this error by
3284 * telling him that this is a version error.
3285 */
3286 continue;
3287 }
3288 ua_sess = lookup_session_by_app(usess, app);
3289 if (ua_sess == NULL) {
3290 continue;
3291 }
3292
3293 /* Enable channel onto application */
3294 ret = enable_ust_app_channel(ua_sess, uchan, app);
3295 if (ret < 0) {
3296 /* XXX: We might want to report this error at some point... */
3297 continue;
3298 }
3299 }
3300
3301 rcu_read_unlock();
3302
3303 error:
3304 return ret;
3305 }
3306
3307 /*
3308 * Disable an event in a channel and for a specific session.
3309 */
3310 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3311 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3312 {
3313 int ret = 0;
3314 struct lttng_ht_iter iter, uiter;
3315 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3316 struct ust_app *app;
3317 struct ust_app_session *ua_sess;
3318 struct ust_app_channel *ua_chan;
3319 struct ust_app_event *ua_event;
3320
3321 DBG("UST app disabling event %s for all apps in channel "
3322 "%s for session id %" PRIu64,
3323 uevent->attr.name, uchan->name, usess->id);
3324
3325 rcu_read_lock();
3326
3327 /* For all registered applications */
3328 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3329 if (!app->compatible) {
3330 /*
3331 * TODO: In time, we should notice the caller of this error by
3332 * telling him that this is a version error.
3333 */
3334 continue;
3335 }
3336 ua_sess = lookup_session_by_app(usess, app);
3337 if (ua_sess == NULL) {
3338 /* Next app */
3339 continue;
3340 }
3341
3342 /* Lookup channel in the ust app session */
3343 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3344 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3345 if (ua_chan_node == NULL) {
3346 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3347 "Skipping", uchan->name, usess->id, app->pid);
3348 continue;
3349 }
3350 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3351
3352 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3353 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3354 if (ua_event_node == NULL) {
3355 DBG2("Event %s not found in channel %s for app pid %d."
3356 "Skipping", uevent->attr.name, uchan->name, app->pid);
3357 continue;
3358 }
3359 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3360
3361 ret = disable_ust_app_event(ua_sess, ua_event, app);
3362 if (ret < 0) {
3363 /* XXX: Report error someday... */
3364 continue;
3365 }
3366 }
3367
3368 rcu_read_unlock();
3369
3370 return ret;
3371 }
3372
3373 /*
3374 * For a specific UST session and UST channel, the event for all
3375 * registered apps.
3376 */
3377 int ust_app_disable_all_event_glb(struct ltt_ust_session *usess,
3378 struct ltt_ust_channel *uchan)
3379 {
3380 int ret = 0;
3381 struct lttng_ht_iter iter, uiter;
3382 struct lttng_ht_node_str *ua_chan_node;
3383 struct ust_app *app;
3384 struct ust_app_session *ua_sess;
3385 struct ust_app_channel *ua_chan;
3386 struct ust_app_event *ua_event;
3387
3388 DBG("UST app disabling all event for all apps in channel "
3389 "%s for session id %" PRIu64, uchan->name, usess->id);
3390
3391 rcu_read_lock();
3392
3393 /* For all registered applications */
3394 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3395 if (!app->compatible) {
3396 /*
3397 * TODO: In time, we should notice the caller of this error by
3398 * telling him that this is a version error.
3399 */
3400 continue;
3401 }
3402 ua_sess = lookup_session_by_app(usess, app);
3403 if (!ua_sess) {
3404 /* The application has problem or is probably dead. */
3405 continue;
3406 }
3407
3408 /* Lookup channel in the ust app session */
3409 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3410 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3411 /* If the channel is not found, there is a code flow error */
3412 assert(ua_chan_node);
3413
3414 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3415
3416 /* Disable each events of channel */
3417 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
3418 node.node) {
3419 ret = disable_ust_app_event(ua_sess, ua_event, app);
3420 if (ret < 0) {
3421 /* XXX: Report error someday... */
3422 continue;
3423 }
3424 }
3425 }
3426
3427 rcu_read_unlock();
3428
3429 return ret;
3430 }
3431
3432 /*
3433 * For a specific UST session, create the channel for all registered apps.
3434 */
3435 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3436 struct ltt_ust_channel *uchan)
3437 {
3438 int ret = 0, created;
3439 struct lttng_ht_iter iter;
3440 struct ust_app *app;
3441 struct ust_app_session *ua_sess = NULL;
3442
3443 /* Very wrong code flow */
3444 assert(usess);
3445 assert(uchan);
3446
3447 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3448 uchan->name, usess->id);
3449
3450 rcu_read_lock();
3451
3452 /* For every registered applications */
3453 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3454 if (!app->compatible) {
3455 /*
3456 * TODO: In time, we should notice the caller of this error by
3457 * telling him that this is a version error.
3458 */
3459 continue;
3460 }
3461 /*
3462 * Create session on the tracer side and add it to app session HT. Note
3463 * that if session exist, it will simply return a pointer to the ust
3464 * app session.
3465 */
3466 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3467 if (ret < 0) {
3468 switch (ret) {
3469 case -ENOTCONN:
3470 /*
3471 * The application's socket is not valid. Either a bad socket
3472 * or a timeout on it. We can't inform the caller that for a
3473 * specific app, the session failed so lets continue here.
3474 */
3475 continue;
3476 case -ENOMEM:
3477 default:
3478 goto error_rcu_unlock;
3479 }
3480 }
3481 assert(ua_sess);
3482
3483 pthread_mutex_lock(&ua_sess->lock);
3484 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3485 sizeof(uchan->name))) {
3486 struct ustctl_consumer_channel_attr attr;
3487 copy_channel_attr_to_ustctl(&attr, &uchan->attr);
3488 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
3489 &attr);
3490 } else {
3491 /* Create channel onto application. We don't need the chan ref. */
3492 ret = create_ust_app_channel(ua_sess, uchan, app,
3493 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3494 }
3495 pthread_mutex_unlock(&ua_sess->lock);
3496 if (ret < 0) {
3497 if (ret == -ENOMEM) {
3498 /* No more memory is a fatal error. Stop right now. */
3499 goto error_rcu_unlock;
3500 }
3501 /* Cleanup the created session if it's the case. */
3502 if (created) {
3503 destroy_app_session(app, ua_sess);
3504 }
3505 }
3506 }
3507
3508 error_rcu_unlock:
3509 rcu_read_unlock();
3510 return ret;
3511 }
3512
3513 /*
3514 * Enable event for a specific session and channel on the tracer.
3515 */
3516 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3517 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3518 {
3519 int ret = 0;
3520 struct lttng_ht_iter iter, uiter;
3521 struct lttng_ht_node_str *ua_chan_node;
3522 struct ust_app *app;
3523 struct ust_app_session *ua_sess;
3524 struct ust_app_channel *ua_chan;
3525 struct ust_app_event *ua_event;
3526
3527 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3528 uevent->attr.name, usess->id);
3529
3530 /*
3531 * NOTE: At this point, this function is called only if the session and
3532 * channel passed are already created for all apps. and enabled on the
3533 * tracer also.
3534 */
3535
3536 rcu_read_lock();
3537
3538 /* For all registered applications */
3539 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3540 if (!app->compatible) {
3541 /*
3542 * TODO: In time, we should notice the caller of this error by
3543 * telling him that this is a version error.
3544 */
3545 continue;
3546 }
3547 ua_sess = lookup_session_by_app(usess, app);
3548 if (!ua_sess) {
3549 /* The application has problem or is probably dead. */
3550 continue;
3551 }
3552
3553 pthread_mutex_lock(&ua_sess->lock);
3554
3555 /* Lookup channel in the ust app session */
3556 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3557 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3558 /* If the channel is not found, there is a code flow error */
3559 assert(ua_chan_node);
3560
3561 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3562
3563 /* Get event node */
3564 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3565 uevent->filter, uevent->attr.loglevel);
3566 if (ua_event == NULL) {
3567 DBG3("UST app enable event %s not found for app PID %d."
3568 "Skipping app", uevent->attr.name, app->pid);
3569 goto next_app;
3570 }
3571
3572 ret = enable_ust_app_event(ua_sess, ua_event, app);
3573 if (ret < 0) {
3574 pthread_mutex_unlock(&ua_sess->lock);
3575 goto error;
3576 }
3577 next_app:
3578 pthread_mutex_unlock(&ua_sess->lock);
3579 }
3580
3581 error:
3582 rcu_read_unlock();
3583 return ret;
3584 }
3585
3586 /*
3587 * For a specific existing UST session and UST channel, creates the event for
3588 * all registered apps.
3589 */
3590 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3591 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3592 {
3593 int ret = 0;
3594 struct lttng_ht_iter iter, uiter;
3595 struct lttng_ht_node_str *ua_chan_node;
3596 struct ust_app *app;
3597 struct ust_app_session *ua_sess;
3598 struct ust_app_channel *ua_chan;
3599
3600 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3601 uevent->attr.name, usess->id);
3602
3603 rcu_read_lock();
3604
3605 /* For all registered applications */
3606 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3607 if (!app->compatible) {
3608 /*
3609 * TODO: In time, we should notice the caller of this error by
3610 * telling him that this is a version error.
3611 */
3612 continue;
3613 }
3614 ua_sess = lookup_session_by_app(usess, app);
3615 if (!ua_sess) {
3616 /* The application has problem or is probably dead. */
3617 continue;
3618 }
3619
3620 pthread_mutex_lock(&ua_sess->lock);
3621 /* Lookup channel in the ust app session */
3622 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3623 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3624 /* If the channel is not found, there is a code flow error */
3625 assert(ua_chan_node);
3626
3627 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3628
3629 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3630 pthread_mutex_unlock(&ua_sess->lock);
3631 if (ret < 0) {
3632 if (ret != -LTTNG_UST_ERR_EXIST) {
3633 /* Possible value at this point: -ENOMEM. If so, we stop! */
3634 break;
3635 }
3636 DBG2("UST app event %s already exist on app PID %d",
3637 uevent->attr.name, app->pid);
3638 continue;
3639 }
3640 }
3641
3642 rcu_read_unlock();
3643
3644 return ret;
3645 }
3646
3647 /*
3648 * Start tracing for a specific UST session and app.
3649 */
3650 static
3651 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3652 {
3653 int ret = 0;
3654 struct ust_app_session *ua_sess;
3655
3656 DBG("Starting tracing for ust app pid %d", app->pid);
3657
3658 rcu_read_lock();
3659
3660 if (!app->compatible) {
3661 goto end;
3662 }
3663
3664 ua_sess = lookup_session_by_app(usess, app);
3665 if (ua_sess == NULL) {
3666 /* The session is in teardown process. Ignore and continue. */
3667 goto end;
3668 }
3669
3670 pthread_mutex_lock(&ua_sess->lock);
3671
3672 /* Upon restart, we skip the setup, already done */
3673 if (ua_sess->started) {
3674 goto skip_setup;
3675 }
3676
3677 /* Create directories if consumer is LOCAL and has a path defined. */
3678 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3679 strlen(usess->consumer->dst.trace_path) > 0) {
3680 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3681 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3682 if (ret < 0) {
3683 if (ret != -EEXIST) {
3684 ERR("Trace directory creation error");
3685 goto error_unlock;
3686 }
3687 }
3688 }
3689
3690 /*
3691 * Create the metadata for the application. This returns gracefully if a
3692 * metadata was already set for the session.
3693 */
3694 ret = create_ust_app_metadata(ua_sess, app, usess->consumer, NULL);
3695 if (ret < 0) {
3696 goto error_unlock;
3697 }
3698
3699 health_code_update();
3700
3701 skip_setup:
3702 /* This start the UST tracing */
3703 ret = ustctl_start_session(app->sock, ua_sess->handle);
3704 if (ret < 0) {
3705 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3706 ERR("Error starting tracing for app pid: %d (ret: %d)",
3707 app->pid, ret);
3708 } else {
3709 DBG("UST app start session failed. Application is dead.");
3710 }
3711 goto error_unlock;
3712 }
3713
3714 /* Indicate that the session has been started once */
3715 ua_sess->started = 1;
3716
3717 pthread_mutex_unlock(&ua_sess->lock);
3718
3719 health_code_update();
3720
3721 /* Quiescent wait after starting trace */
3722 ret = ustctl_wait_quiescent(app->sock);
3723 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3724 ERR("UST app wait quiescent failed for app pid %d ret %d",
3725 app->pid, ret);
3726 }
3727
3728 end:
3729 rcu_read_unlock();
3730 health_code_update();
3731 return 0;
3732
3733 error_unlock:
3734 pthread_mutex_unlock(&ua_sess->lock);
3735 rcu_read_unlock();
3736 health_code_update();
3737 return -1;
3738 }
3739
3740 /*
3741 * Stop tracing for a specific UST session and app.
3742 */
3743 static
3744 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
3745 {
3746 int ret = 0;
3747 struct ust_app_session *ua_sess;
3748 struct ust_registry_session *registry;
3749
3750 DBG("Stopping tracing for ust app pid %d", app->pid);
3751
3752 rcu_read_lock();
3753
3754 if (!app->compatible) {
3755 goto end_no_session;
3756 }
3757
3758 ua_sess = lookup_session_by_app(usess, app);
3759 if (ua_sess == NULL) {
3760 goto end_no_session;
3761 }
3762
3763 pthread_mutex_lock(&ua_sess->lock);
3764
3765 /*
3766 * If started = 0, it means that stop trace has been called for a session
3767 * that was never started. It's possible since we can have a fail start
3768 * from either the application manager thread or the command thread. Simply
3769 * indicate that this is a stop error.
3770 */
3771 if (!ua_sess->started) {
3772 goto error_rcu_unlock;
3773 }
3774
3775 health_code_update();
3776
3777 /* This inhibits UST tracing */
3778 ret = ustctl_stop_session(app->sock, ua_sess->handle);
3779 if (ret < 0) {
3780 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3781 ERR("Error stopping tracing for app pid: %d (ret: %d)",
3782 app->pid, ret);
3783 } else {
3784 DBG("UST app stop session failed. Application is dead.");
3785 }
3786 goto error_rcu_unlock;
3787 }
3788
3789 health_code_update();
3790
3791 /* Quiescent wait after stopping trace */
3792 ret = ustctl_wait_quiescent(app->sock);
3793 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3794 ERR("UST app wait quiescent failed for app pid %d ret %d",
3795 app->pid, ret);
3796 }
3797
3798 health_code_update();
3799
3800 registry = get_session_registry(ua_sess);
3801 assert(registry);
3802
3803 if (!registry->metadata_closed) {
3804 /* Push metadata for application before freeing the application. */
3805 (void) push_metadata(registry, ua_sess->consumer);
3806 }
3807
3808 pthread_mutex_unlock(&ua_sess->lock);
3809 end_no_session:
3810 rcu_read_unlock();
3811 health_code_update();
3812 return 0;
3813
3814 error_rcu_unlock:
3815 pthread_mutex_unlock(&ua_sess->lock);
3816 rcu_read_unlock();
3817 health_code_update();
3818 return -1;
3819 }
3820
3821 /*
3822 * Flush buffers for a specific UST session and app.
3823 */
3824 static
3825 int ust_app_flush_trace(struct ltt_ust_session *usess, struct ust_app *app)
3826 {
3827 int ret = 0;
3828 struct lttng_ht_iter iter;
3829 struct ust_app_session *ua_sess;
3830 struct ust_app_channel *ua_chan;
3831
3832 DBG("Flushing buffers for ust app pid %d", app->pid);
3833
3834 rcu_read_lock();
3835
3836 if (!app->compatible) {
3837 goto end_no_session;
3838 }
3839
3840 ua_sess = lookup_session_by_app(usess, app);
3841 if (ua_sess == NULL) {
3842 goto end_no_session;
3843 }
3844
3845 pthread_mutex_lock(&ua_sess->lock);
3846
3847 health_code_update();
3848
3849 /* Flushing buffers */
3850 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
3851 node.node) {
3852 health_code_update();
3853 assert(ua_chan->is_sent);
3854 ret = ustctl_sock_flush_buffer(app->sock, ua_chan->obj);
3855 if (ret < 0) {
3856 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3857 ERR("UST app PID %d channel %s flush failed with ret %d",
3858 app->pid, ua_chan->name, ret);
3859 } else {
3860 DBG3("UST app failed to flush %s. Application is dead.",
3861 ua_chan->name);
3862 /* No need to continue. */
3863 break;
3864 }
3865 /* Continuing flushing all buffers */
3866 continue;
3867 }
3868 }
3869
3870 health_code_update();
3871
3872 pthread_mutex_unlock(&ua_sess->lock);
3873 end_no_session:
3874 rcu_read_unlock();
3875 health_code_update();
3876 return 0;
3877 }
3878
3879 /*
3880 * Destroy a specific UST session in apps.
3881 */
3882 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
3883 {
3884 int ret;
3885 struct ust_app_session *ua_sess;
3886 struct lttng_ht_iter iter;
3887 struct lttng_ht_node_u64 *node;
3888
3889 DBG("Destroy tracing for ust app pid %d", app->pid);
3890
3891 rcu_read_lock();
3892
3893 if (!app->compatible) {
3894 goto end;
3895 }
3896
3897 __lookup_session_by_app(usess, app, &iter);
3898 node = lttng_ht_iter_get_node_u64(&iter);
3899 if (node == NULL) {
3900 /* Session is being or is deleted. */
3901 goto end;
3902 }
3903 ua_sess = caa_container_of(node, struct ust_app_session, node);
3904
3905 health_code_update();
3906 destroy_app_session(app, ua_sess);
3907
3908 health_code_update();
3909
3910 /* Quiescent wait after stopping trace */
3911 ret = ustctl_wait_quiescent(app->sock);
3912 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3913 ERR("UST app wait quiescent failed for app pid %d ret %d",
3914 app->pid, ret);
3915 }
3916 end:
3917 rcu_read_unlock();
3918 health_code_update();
3919 return 0;
3920 }
3921
3922 /*
3923 * Start tracing for the UST session.
3924 */
3925 int ust_app_start_trace_all(struct ltt_ust_session *usess)
3926 {
3927 int ret = 0;
3928 struct lttng_ht_iter iter;
3929 struct ust_app *app;
3930
3931 DBG("Starting all UST traces");
3932
3933 rcu_read_lock();
3934
3935 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3936 ret = ust_app_start_trace(usess, app);
3937 if (ret < 0) {
3938 /* Continue to next apps even on error */
3939 continue;
3940 }
3941 }
3942
3943 rcu_read_unlock();
3944
3945 return 0;
3946 }
3947
3948 /*
3949 * Start tracing for the UST session.
3950 */
3951 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
3952 {
3953 int ret = 0;
3954 struct lttng_ht_iter iter;
3955 struct ust_app *app;
3956
3957 DBG("Stopping all UST traces");
3958
3959 rcu_read_lock();
3960
3961 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3962 ret = ust_app_stop_trace(usess, app);
3963 if (ret < 0) {
3964 /* Continue to next apps even on error */
3965 continue;
3966 }
3967 }
3968
3969 /* Flush buffers and push metadata (for UID buffers). */
3970 switch (usess->buffer_type) {
3971 case LTTNG_BUFFER_PER_UID:
3972 {
3973 struct buffer_reg_uid *reg;
3974
3975 /* Flush all per UID buffers associated to that session. */
3976 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
3977 struct ust_registry_session *ust_session_reg;
3978 struct buffer_reg_channel *reg_chan;
3979 struct consumer_socket *socket;
3980
3981 /* Get consumer socket to use to push the metadata.*/
3982 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
3983 usess->consumer);
3984 if (!socket) {
3985 /* Ignore request if no consumer is found for the session. */
3986 continue;
3987 }
3988
3989 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
3990 reg_chan, node.node) {
3991 /*
3992 * The following call will print error values so the return
3993 * code is of little importance because whatever happens, we
3994 * have to try them all.
3995 */
3996 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
3997 }
3998
3999 ust_session_reg = reg->registry->reg.ust;
4000 if (!ust_session_reg->metadata_closed) {
4001 /* Push metadata. */
4002 (void) push_metadata(ust_session_reg, usess->consumer);
4003 }
4004 }
4005
4006 break;
4007 }
4008 case LTTNG_BUFFER_PER_PID:
4009 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4010 ret = ust_app_flush_trace(usess, app);
4011 if (ret < 0) {
4012 /* Continue to next apps even on error */
4013 continue;
4014 }
4015 }
4016 break;
4017 default:
4018 assert(0);
4019 break;
4020 }
4021
4022 rcu_read_unlock();
4023
4024 return 0;
4025 }
4026
4027 /*
4028 * Destroy app UST session.
4029 */
4030 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4031 {
4032 int ret = 0;
4033 struct lttng_ht_iter iter;
4034 struct ust_app *app;
4035
4036 DBG("Destroy all UST traces");
4037
4038 rcu_read_lock();
4039
4040 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4041 ret = destroy_trace(usess, app);
4042 if (ret < 0) {
4043 /* Continue to next apps even on error */
4044 continue;
4045 }
4046 }
4047
4048 rcu_read_unlock();
4049
4050 return 0;
4051 }
4052
4053 /*
4054 * Add channels/events from UST global domain to registered apps at sock.
4055 */
4056 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4057 {
4058 int ret = 0;
4059 struct lttng_ht_iter iter, uiter;
4060 struct ust_app *app;
4061 struct ust_app_session *ua_sess = NULL;
4062 struct ust_app_channel *ua_chan;
4063 struct ust_app_event *ua_event;
4064 struct ust_app_ctx *ua_ctx;
4065
4066 assert(usess);
4067 assert(sock >= 0);
4068
4069 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4070 usess->id);
4071
4072 rcu_read_lock();
4073
4074 app = find_app_by_sock(sock);
4075 if (app == NULL) {
4076 /*
4077 * Application can be unregistered before so this is possible hence
4078 * simply stopping the update.
4079 */
4080 DBG3("UST app update failed to find app sock %d", sock);
4081 goto error;
4082 }
4083
4084 if (!app->compatible) {
4085 goto error;
4086 }
4087
4088 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4089 if (ret < 0) {
4090 /* Tracer is probably gone or ENOMEM. */
4091 goto error;
4092 }
4093 assert(ua_sess);
4094
4095 pthread_mutex_lock(&ua_sess->lock);
4096
4097 /*
4098 * We can iterate safely here over all UST app session since the create ust
4099 * app session above made a shadow copy of the UST global domain from the
4100 * ltt ust session.
4101 */
4102 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4103 node.node) {
4104 /*
4105 * For a metadata channel, handle it differently.
4106 */
4107 if (!strncmp(ua_chan->name, DEFAULT_METADATA_NAME,
4108 sizeof(ua_chan->name))) {
4109 ret = create_ust_app_metadata(ua_sess, app, usess->consumer,
4110 &ua_chan->attr);
4111 if (ret < 0) {
4112 goto error_unlock;
4113 }
4114 /* Remove it from the hash table and continue!. */
4115 ret = lttng_ht_del(ua_sess->channels, &iter);
4116 assert(!ret);
4117 delete_ust_app_channel(-1, ua_chan, app);
4118 continue;
4119 } else {
4120 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4121 if (ret < 0) {
4122 /*
4123 * Stop everything. On error, the application failed, no more
4124 * file descriptor are available or ENOMEM so stopping here is
4125 * the only thing we can do for now.
4126 */
4127 goto error_unlock;
4128 }
4129 }
4130
4131 /*
4132 * Add context using the list so they are enabled in the same order the
4133 * user added them.
4134 */
4135 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4136 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4137 if (ret < 0) {
4138 goto error_unlock;
4139 }
4140 }
4141
4142
4143 /* For each events */
4144 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4145 node.node) {
4146 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4147 if (ret < 0) {
4148 goto error_unlock;
4149 }
4150 }
4151 }
4152
4153 pthread_mutex_unlock(&ua_sess->lock);
4154
4155 if (usess->start_trace) {
4156 ret = ust_app_start_trace(usess, app);
4157 if (ret < 0) {
4158 goto error;
4159 }
4160
4161 DBG2("UST trace started for app pid %d", app->pid);
4162 }
4163
4164 /* Everything went well at this point. */
4165 rcu_read_unlock();
4166 return;
4167
4168 error_unlock:
4169 pthread_mutex_unlock(&ua_sess->lock);
4170 error:
4171 if (ua_sess) {
4172 destroy_app_session(app, ua_sess);
4173 }
4174 rcu_read_unlock();
4175 return;
4176 }
4177
4178 /*
4179 * Add context to a specific channel for global UST domain.
4180 */
4181 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4182 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4183 {
4184 int ret = 0;
4185 struct lttng_ht_node_str *ua_chan_node;
4186 struct lttng_ht_iter iter, uiter;
4187 struct ust_app_channel *ua_chan = NULL;
4188 struct ust_app_session *ua_sess;
4189 struct ust_app *app;
4190
4191 rcu_read_lock();
4192
4193 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4194 if (!app->compatible) {
4195 /*
4196 * TODO: In time, we should notice the caller of this error by
4197 * telling him that this is a version error.
4198 */
4199 continue;
4200 }
4201 ua_sess = lookup_session_by_app(usess, app);
4202 if (ua_sess == NULL) {
4203 continue;
4204 }
4205
4206 pthread_mutex_lock(&ua_sess->lock);
4207 /* Lookup channel in the ust app session */
4208 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4209 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4210 if (ua_chan_node == NULL) {
4211 goto next_app;
4212 }
4213 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4214 node);
4215 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4216 if (ret < 0) {
4217 goto next_app;
4218 }
4219 next_app:
4220 pthread_mutex_unlock(&ua_sess->lock);
4221 }
4222
4223 rcu_read_unlock();
4224 return ret;
4225 }
4226
4227 /*
4228 * Enable event for a channel from a UST session for a specific PID.
4229 */
4230 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4231 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4232 {
4233 int ret = 0;
4234 struct lttng_ht_iter iter;
4235 struct lttng_ht_node_str *ua_chan_node;
4236 struct ust_app *app;
4237 struct ust_app_session *ua_sess;
4238 struct ust_app_channel *ua_chan;
4239 struct ust_app_event *ua_event;
4240
4241 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4242
4243 rcu_read_lock();
4244
4245 app = ust_app_find_by_pid(pid);
4246 if (app == NULL) {
4247 ERR("UST app enable event per PID %d not found", pid);
4248 ret = -1;
4249 goto end;
4250 }
4251
4252 if (!app->compatible) {
4253 ret = 0;
4254 goto end;
4255 }
4256
4257 ua_sess = lookup_session_by_app(usess, app);
4258 if (!ua_sess) {
4259 /* The application has problem or is probably dead. */
4260 ret = 0;
4261 goto end;
4262 }
4263
4264 pthread_mutex_lock(&ua_sess->lock);
4265 /* Lookup channel in the ust app session */
4266 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4267 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4268 /* If the channel is not found, there is a code flow error */
4269 assert(ua_chan_node);
4270
4271 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4272
4273 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4274 uevent->filter, uevent->attr.loglevel);
4275 if (ua_event == NULL) {
4276 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4277 if (ret < 0) {
4278 goto end_unlock;
4279 }
4280 } else {
4281 ret = enable_ust_app_event(ua_sess, ua_event, app);
4282 if (ret < 0) {
4283 goto end_unlock;
4284 }
4285 }
4286
4287 end_unlock:
4288 pthread_mutex_unlock(&ua_sess->lock);
4289 end:
4290 rcu_read_unlock();
4291 return ret;
4292 }
4293
4294 /*
4295 * Disable event for a channel from a UST session for a specific PID.
4296 */
4297 int ust_app_disable_event_pid(struct ltt_ust_session *usess,
4298 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4299 {
4300 int ret = 0;
4301 struct lttng_ht_iter iter;
4302 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
4303 struct ust_app *app;
4304 struct ust_app_session *ua_sess;
4305 struct ust_app_channel *ua_chan;
4306 struct ust_app_event *ua_event;
4307
4308 DBG("UST app disabling event %s for PID %d", uevent->attr.name, pid);
4309
4310 rcu_read_lock();
4311
4312 app = ust_app_find_by_pid(pid);
4313 if (app == NULL) {
4314 ERR("UST app disable event per PID %d not found", pid);
4315 ret = -1;
4316 goto error;
4317 }
4318
4319 if (!app->compatible) {
4320 ret = 0;
4321 goto error;
4322 }
4323
4324 ua_sess = lookup_session_by_app(usess, app);
4325 if (!ua_sess) {
4326 /* The application has problem or is probably dead. */
4327 goto error;
4328 }
4329
4330 /* Lookup channel in the ust app session */
4331 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4332 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4333 if (ua_chan_node == NULL) {
4334 /* Channel does not exist, skip disabling */
4335 goto error;
4336 }
4337 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4338
4339 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &iter);
4340 ua_event_node = lttng_ht_iter_get_node_str(&iter);
4341 if (ua_event_node == NULL) {
4342 /* Event does not exist, skip disabling */
4343 goto error;
4344 }
4345 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
4346
4347 ret = disable_ust_app_event(ua_sess, ua_event, app);
4348 if (ret < 0) {
4349 goto error;
4350 }
4351
4352 error:
4353 rcu_read_unlock();
4354 return ret;
4355 }
4356
4357 /*
4358 * Calibrate registered applications.
4359 */
4360 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4361 {
4362 int ret = 0;
4363 struct lttng_ht_iter iter;
4364 struct ust_app *app;
4365
4366 rcu_read_lock();
4367
4368 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4369 if (!app->compatible) {
4370 /*
4371 * TODO: In time, we should notice the caller of this error by
4372 * telling him that this is a version error.
4373 */
4374 continue;
4375 }
4376
4377 health_code_update();
4378
4379 ret = ustctl_calibrate(app->sock, calibrate);
4380 if (ret < 0) {
4381 switch (ret) {
4382 case -ENOSYS:
4383 /* Means that it's not implemented on the tracer side. */
4384 ret = 0;
4385 break;
4386 default:
4387 DBG2("Calibrate app PID %d returned with error %d",
4388 app->pid, ret);
4389 break;
4390 }
4391 }
4392 }
4393
4394 DBG("UST app global domain calibration finished");
4395
4396 rcu_read_unlock();
4397
4398 health_code_update();
4399
4400 return ret;
4401 }
4402
4403 /*
4404 * Receive registration and populate the given msg structure.
4405 *
4406 * On success return 0 else a negative value returned by the ustctl call.
4407 */
4408 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4409 {
4410 int ret;
4411 uint32_t pid, ppid, uid, gid;
4412
4413 assert(msg);
4414
4415 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4416 &pid, &ppid, &uid, &gid,
4417 &msg->bits_per_long,
4418 &msg->uint8_t_alignment,
4419 &msg->uint16_t_alignment,
4420 &msg->uint32_t_alignment,
4421 &msg->uint64_t_alignment,
4422 &msg->long_alignment,
4423 &msg->byte_order,
4424 msg->name);
4425 if (ret < 0) {
4426 switch (-ret) {
4427 case EPIPE:
4428 case ECONNRESET:
4429 case LTTNG_UST_ERR_EXITING:
4430 DBG3("UST app recv reg message failed. Application died");
4431 break;
4432 case LTTNG_UST_ERR_UNSUP_MAJOR:
4433 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4434 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4435 LTTNG_UST_ABI_MINOR_VERSION);
4436 break;
4437 default:
4438 ERR("UST app recv reg message failed with ret %d", ret);
4439 break;
4440 }
4441 goto error;
4442 }
4443 msg->pid = (pid_t) pid;
4444 msg->ppid = (pid_t) ppid;
4445 msg->uid = (uid_t) uid;
4446 msg->gid = (gid_t) gid;
4447
4448 error:
4449 return ret;
4450 }
4451
4452 /*
4453 * Return a ust app channel object using the application object and the channel
4454 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4455 * lock MUST be acquired before calling this function.
4456 */
4457 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4458 int objd)
4459 {
4460 struct lttng_ht_node_ulong *node;
4461 struct lttng_ht_iter iter;
4462 struct ust_app_channel *ua_chan = NULL;
4463
4464 assert(app);
4465
4466 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4467 node = lttng_ht_iter_get_node_ulong(&iter);
4468 if (node == NULL) {
4469 DBG2("UST app channel find by objd %d not found", objd);
4470 goto error;
4471 }
4472
4473 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4474
4475 error:
4476 return ua_chan;
4477 }
4478
4479 /*
4480 * Reply to a register channel notification from an application on the notify
4481 * socket. The channel metadata is also created.
4482 *
4483 * The session UST registry lock is acquired in this function.
4484 *
4485 * On success 0 is returned else a negative value.
4486 */
4487 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4488 size_t nr_fields, struct ustctl_field *fields)
4489 {
4490 int ret, ret_code = 0;
4491 uint32_t chan_id, reg_count;
4492 uint64_t chan_reg_key;
4493 enum ustctl_channel_header type;
4494 struct ust_app *app;
4495 struct ust_app_channel *ua_chan;
4496 struct ust_app_session *ua_sess;
4497 struct ust_registry_session *registry;
4498 struct ust_registry_channel *chan_reg;
4499
4500 rcu_read_lock();
4501
4502 /* Lookup application. If not found, there is a code flow error. */
4503 app = find_app_by_notify_sock(sock);
4504 if (!app) {
4505 DBG("Application socket %d is being teardown. Abort event notify",
4506 sock);
4507 ret = 0;
4508 free(fields);
4509 goto error_rcu_unlock;
4510 }
4511
4512 /* Lookup channel by UST object descriptor. */
4513 ua_chan = find_channel_by_objd(app, cobjd);
4514 if (!ua_chan) {
4515 DBG("Application channel is being teardown. Abort event notify");
4516 ret = 0;
4517 free(fields);
4518 goto error_rcu_unlock;
4519 }
4520
4521 assert(ua_chan->session);
4522 ua_sess = ua_chan->session;
4523
4524 /* Get right session registry depending on the session buffer type. */
4525 registry = get_session_registry(ua_sess);
4526 assert(registry);
4527
4528 /* Depending on the buffer type, a different channel key is used. */
4529 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4530 chan_reg_key = ua_chan->tracing_channel_id;
4531 } else {
4532 chan_reg_key = ua_chan->key;
4533 }
4534
4535 pthread_mutex_lock(&registry->lock);
4536
4537 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4538 assert(chan_reg);
4539
4540 if (!chan_reg->register_done) {
4541 reg_count = ust_registry_get_event_count(chan_reg);
4542 if (reg_count < 31) {
4543 type = USTCTL_CHANNEL_HEADER_COMPACT;
4544 } else {
4545 type = USTCTL_CHANNEL_HEADER_LARGE;
4546 }
4547
4548 chan_reg->nr_ctx_fields = nr_fields;
4549 chan_reg->ctx_fields = fields;
4550 chan_reg->header_type = type;
4551 } else {
4552 /* Get current already assigned values. */
4553 type = chan_reg->header_type;
4554 free(fields);
4555 /* Set to NULL so the error path does not do a double free. */
4556 fields = NULL;
4557 }
4558 /* Channel id is set during the object creation. */
4559 chan_id = chan_reg->chan_id;
4560
4561 /* Append to metadata */
4562 if (!chan_reg->metadata_dumped) {
4563 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4564 if (ret_code) {
4565 ERR("Error appending channel metadata (errno = %d)", ret_code);
4566 goto reply;
4567 }
4568 }
4569
4570 reply:
4571 DBG3("UST app replying to register channel key %" PRIu64
4572 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4573 ret_code);
4574
4575 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4576 if (ret < 0) {
4577 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4578 ERR("UST app reply channel failed with ret %d", ret);
4579 } else {
4580 DBG3("UST app reply channel failed. Application died");
4581 }
4582 goto error;
4583 }
4584
4585 /* This channel registry registration is completed. */
4586 chan_reg->register_done = 1;
4587
4588 error:
4589 pthread_mutex_unlock(&registry->lock);
4590 error_rcu_unlock:
4591 rcu_read_unlock();
4592 if (ret) {
4593 free(fields);
4594 }
4595 return ret;
4596 }
4597
4598 /*
4599 * Add event to the UST channel registry. When the event is added to the
4600 * registry, the metadata is also created. Once done, this replies to the
4601 * application with the appropriate error code.
4602 *
4603 * The session UST registry lock is acquired in the function.
4604 *
4605 * On success 0 is returned else a negative value.
4606 */
4607 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4608 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4609 char *model_emf_uri)
4610 {
4611 int ret, ret_code;
4612 uint32_t event_id = 0;
4613 uint64_t chan_reg_key;
4614 struct ust_app *app;
4615 struct ust_app_channel *ua_chan;
4616 struct ust_app_session *ua_sess;
4617 struct ust_registry_session *registry;
4618
4619 rcu_read_lock();
4620
4621 /* Lookup application. If not found, there is a code flow error. */
4622 app = find_app_by_notify_sock(sock);
4623 if (!app) {
4624 DBG("Application socket %d is being teardown. Abort event notify",
4625 sock);
4626 ret = 0;
4627 free(sig);
4628 free(fields);
4629 free(model_emf_uri);
4630 goto error_rcu_unlock;
4631 }
4632
4633 /* Lookup channel by UST object descriptor. */
4634 ua_chan = find_channel_by_objd(app, cobjd);
4635 if (!ua_chan) {
4636 DBG("Application channel is being teardown. Abort event notify");
4637 ret = 0;
4638 free(sig);
4639 free(fields);
4640 free(model_emf_uri);
4641 goto error_rcu_unlock;
4642 }
4643
4644 assert(ua_chan->session);
4645 ua_sess = ua_chan->session;
4646
4647 registry = get_session_registry(ua_sess);
4648 assert(registry);
4649
4650 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4651 chan_reg_key = ua_chan->tracing_channel_id;
4652 } else {
4653 chan_reg_key = ua_chan->key;
4654 }
4655
4656 pthread_mutex_lock(&registry->lock);
4657
4658 /*
4659 * From this point on, this call acquires the ownership of the sig, fields
4660 * and model_emf_uri meaning any free are done inside it if needed. These
4661 * three variables MUST NOT be read/write after this.
4662 */
4663 ret_code = ust_registry_create_event(registry, chan_reg_key,
4664 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4665 model_emf_uri, ua_sess->buffer_type, &event_id);
4666
4667 /*
4668 * The return value is returned to ustctl so in case of an error, the
4669 * application can be notified. In case of an error, it's important not to
4670 * return a negative error or else the application will get closed.
4671 */
4672 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4673 if (ret < 0) {
4674 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4675 ERR("UST app reply event failed with ret %d", ret);
4676 } else {
4677 DBG3("UST app reply event failed. Application died");
4678 }
4679 /*
4680 * No need to wipe the create event since the application socket will
4681 * get close on error hence cleaning up everything by itself.
4682 */
4683 goto error;
4684 }
4685
4686 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4687 name, event_id);
4688
4689 error:
4690 pthread_mutex_unlock(&registry->lock);
4691 error_rcu_unlock:
4692 rcu_read_unlock();
4693 return ret;
4694 }
4695
4696 /*
4697 * Handle application notification through the given notify socket.
4698 *
4699 * Return 0 on success or else a negative value.
4700 */
4701 int ust_app_recv_notify(int sock)
4702 {
4703 int ret;
4704 enum ustctl_notify_cmd cmd;
4705
4706 DBG3("UST app receiving notify from sock %d", sock);
4707
4708 ret = ustctl_recv_notify(sock, &cmd);
4709 if (ret < 0) {
4710 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4711 ERR("UST app recv notify failed with ret %d", ret);
4712 } else {
4713 DBG3("UST app recv notify failed. Application died");
4714 }
4715 goto error;
4716 }
4717
4718 switch (cmd) {
4719 case USTCTL_NOTIFY_CMD_EVENT:
4720 {
4721 int sobjd, cobjd, loglevel;
4722 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4723 size_t nr_fields;
4724 struct ustctl_field *fields;
4725
4726 DBG2("UST app ustctl register event received");
4727
4728 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4729 &sig, &nr_fields, &fields, &model_emf_uri);
4730 if (ret < 0) {
4731 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4732 ERR("UST app recv event failed with ret %d", ret);
4733 } else {
4734 DBG3("UST app recv event failed. Application died");
4735 }
4736 goto error;
4737 }
4738
4739 /*
4740 * Add event to the UST registry coming from the notify socket. This
4741 * call will free if needed the sig, fields and model_emf_uri. This
4742 * code path loses the ownsership of these variables and transfer them
4743 * to the this function.
4744 */
4745 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
4746 fields, loglevel, model_emf_uri);
4747 if (ret < 0) {
4748 goto error;
4749 }
4750
4751 break;
4752 }
4753 case USTCTL_NOTIFY_CMD_CHANNEL:
4754 {
4755 int sobjd, cobjd;
4756 size_t nr_fields;
4757 struct ustctl_field *fields;
4758
4759 DBG2("UST app ustctl register channel received");
4760
4761 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
4762 &fields);
4763 if (ret < 0) {
4764 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4765 ERR("UST app recv channel failed with ret %d", ret);
4766 } else {
4767 DBG3("UST app recv channel failed. Application died");
4768 }
4769 goto error;
4770 }
4771
4772 /*
4773 * The fields ownership are transfered to this function call meaning
4774 * that if needed it will be freed. After this, it's invalid to access
4775 * fields or clean it up.
4776 */
4777 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
4778 fields);
4779 if (ret < 0) {
4780 goto error;
4781 }
4782
4783 break;
4784 }
4785 default:
4786 /* Should NEVER happen. */
4787 assert(0);
4788 }
4789
4790 error:
4791 return ret;
4792 }
4793
4794 /*
4795 * Once the notify socket hangs up, this is called. First, it tries to find the
4796 * corresponding application. On failure, the call_rcu to close the socket is
4797 * executed. If an application is found, it tries to delete it from the notify
4798 * socket hash table. Whathever the result, it proceeds to the call_rcu.
4799 *
4800 * Note that an object needs to be allocated here so on ENOMEM failure, the
4801 * call RCU is not done but the rest of the cleanup is.
4802 */
4803 void ust_app_notify_sock_unregister(int sock)
4804 {
4805 int err_enomem = 0;
4806 struct lttng_ht_iter iter;
4807 struct ust_app *app;
4808 struct ust_app_notify_sock_obj *obj;
4809
4810 assert(sock >= 0);
4811
4812 rcu_read_lock();
4813
4814 obj = zmalloc(sizeof(*obj));
4815 if (!obj) {
4816 /*
4817 * An ENOMEM is kind of uncool. If this strikes we continue the
4818 * procedure but the call_rcu will not be called. In this case, we
4819 * accept the fd leak rather than possibly creating an unsynchronized
4820 * state between threads.
4821 *
4822 * TODO: The notify object should be created once the notify socket is
4823 * registered and stored independantely from the ust app object. The
4824 * tricky part is to synchronize the teardown of the application and
4825 * this notify object. Let's keep that in mind so we can avoid this
4826 * kind of shenanigans with ENOMEM in the teardown path.
4827 */
4828 err_enomem = 1;
4829 } else {
4830 obj->fd = sock;
4831 }
4832
4833 DBG("UST app notify socket unregister %d", sock);
4834
4835 /*
4836 * Lookup application by notify socket. If this fails, this means that the
4837 * hash table delete has already been done by the application
4838 * unregistration process so we can safely close the notify socket in a
4839 * call RCU.
4840 */
4841 app = find_app_by_notify_sock(sock);
4842 if (!app) {
4843 goto close_socket;
4844 }
4845
4846 iter.iter.node = &app->notify_sock_n.node;
4847
4848 /*
4849 * Whatever happens here either we fail or succeed, in both cases we have
4850 * to close the socket after a grace period to continue to the call RCU
4851 * here. If the deletion is successful, the application is not visible
4852 * anymore by other threads and is it fails it means that it was already
4853 * deleted from the hash table so either way we just have to close the
4854 * socket.
4855 */
4856 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
4857
4858 close_socket:
4859 rcu_read_unlock();
4860
4861 /*
4862 * Close socket after a grace period to avoid for the socket to be reused
4863 * before the application object is freed creating potential race between
4864 * threads trying to add unique in the global hash table.
4865 */
4866 if (!err_enomem) {
4867 call_rcu(&obj->head, close_notify_sock_rcu);
4868 }
4869 }
4870
4871 /*
4872 * Destroy a ust app data structure and free its memory.
4873 */
4874 void ust_app_destroy(struct ust_app *app)
4875 {
4876 if (!app) {
4877 return;
4878 }
4879
4880 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
4881 }
This page took 0.128782 seconds and 4 git commands to generate.