Fix: sessiond should not error on channel creation vs app exit
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 static
44 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
45
46 /* Next available channel key. Access under next_channel_key_lock. */
47 static uint64_t _next_channel_key;
48 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
49
50 /* Next available session ID. Access under next_session_id_lock. */
51 static uint64_t _next_session_id;
52 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
53
54 /*
55 * Return the incremented value of next_channel_key.
56 */
57 static uint64_t get_next_channel_key(void)
58 {
59 uint64_t ret;
60
61 pthread_mutex_lock(&next_channel_key_lock);
62 ret = ++_next_channel_key;
63 pthread_mutex_unlock(&next_channel_key_lock);
64 return ret;
65 }
66
67 /*
68 * Return the atomically incremented value of next_session_id.
69 */
70 static uint64_t get_next_session_id(void)
71 {
72 uint64_t ret;
73
74 pthread_mutex_lock(&next_session_id_lock);
75 ret = ++_next_session_id;
76 pthread_mutex_unlock(&next_session_id_lock);
77 return ret;
78 }
79
80 static void copy_channel_attr_to_ustctl(
81 struct ustctl_consumer_channel_attr *attr,
82 struct lttng_ust_channel_attr *uattr)
83 {
84 /* Copy event attributes since the layout is different. */
85 attr->subbuf_size = uattr->subbuf_size;
86 attr->num_subbuf = uattr->num_subbuf;
87 attr->overwrite = uattr->overwrite;
88 attr->switch_timer_interval = uattr->switch_timer_interval;
89 attr->read_timer_interval = uattr->read_timer_interval;
90 attr->output = uattr->output;
91 }
92
93 /*
94 * Match function for the hash table lookup.
95 *
96 * It matches an ust app event based on three attributes which are the event
97 * name, the filter bytecode and the loglevel.
98 */
99 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
100 {
101 struct ust_app_event *event;
102 const struct ust_app_ht_key *key;
103
104 assert(node);
105 assert(_key);
106
107 event = caa_container_of(node, struct ust_app_event, node.node);
108 key = _key;
109
110 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
111
112 /* Event name */
113 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
114 goto no_match;
115 }
116
117 /* Event loglevel. */
118 if (event->attr.loglevel != key->loglevel) {
119 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
120 && key->loglevel == 0 && event->attr.loglevel == -1) {
121 /*
122 * Match is accepted. This is because on event creation, the
123 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
124 * -1 are accepted for this loglevel type since 0 is the one set by
125 * the API when receiving an enable event.
126 */
127 } else {
128 goto no_match;
129 }
130 }
131
132 /* One of the filters is NULL, fail. */
133 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
134 goto no_match;
135 }
136
137 if (key->filter && event->filter) {
138 /* Both filters exists, check length followed by the bytecode. */
139 if (event->filter->len != key->filter->len ||
140 memcmp(event->filter->data, key->filter->data,
141 event->filter->len) != 0) {
142 goto no_match;
143 }
144 }
145
146 /* One of the exclusions is NULL, fail. */
147 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
148 goto no_match;
149 }
150
151 if (key->exclusion && event->exclusion) {
152 /* Both exclusions exists, check count followed by the names. */
153 if (event->exclusion->count != key->exclusion->count ||
154 memcmp(event->exclusion->names, key->exclusion->names,
155 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
156 goto no_match;
157 }
158 }
159
160
161 /* Match. */
162 return 1;
163
164 no_match:
165 return 0;
166 }
167
168 /*
169 * Unique add of an ust app event in the given ht. This uses the custom
170 * ht_match_ust_app_event match function and the event name as hash.
171 */
172 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
173 struct ust_app_event *event)
174 {
175 struct cds_lfht_node *node_ptr;
176 struct ust_app_ht_key key;
177 struct lttng_ht *ht;
178
179 assert(ua_chan);
180 assert(ua_chan->events);
181 assert(event);
182
183 ht = ua_chan->events;
184 key.name = event->attr.name;
185 key.filter = event->filter;
186 key.loglevel = event->attr.loglevel;
187 key.exclusion = event->exclusion;
188
189 node_ptr = cds_lfht_add_unique(ht->ht,
190 ht->hash_fct(event->node.key, lttng_ht_seed),
191 ht_match_ust_app_event, &key, &event->node.node);
192 assert(node_ptr == &event->node.node);
193 }
194
195 /*
196 * Close the notify socket from the given RCU head object. This MUST be called
197 * through a call_rcu().
198 */
199 static void close_notify_sock_rcu(struct rcu_head *head)
200 {
201 int ret;
202 struct ust_app_notify_sock_obj *obj =
203 caa_container_of(head, struct ust_app_notify_sock_obj, head);
204
205 /* Must have a valid fd here. */
206 assert(obj->fd >= 0);
207
208 ret = close(obj->fd);
209 if (ret) {
210 ERR("close notify sock %d RCU", obj->fd);
211 }
212 lttng_fd_put(LTTNG_FD_APPS, 1);
213
214 free(obj);
215 }
216
217 /*
218 * Return the session registry according to the buffer type of the given
219 * session.
220 *
221 * A registry per UID object MUST exists before calling this function or else
222 * it assert() if not found. RCU read side lock must be acquired.
223 */
224 static struct ust_registry_session *get_session_registry(
225 struct ust_app_session *ua_sess)
226 {
227 struct ust_registry_session *registry = NULL;
228
229 assert(ua_sess);
230
231 switch (ua_sess->buffer_type) {
232 case LTTNG_BUFFER_PER_PID:
233 {
234 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
235 if (!reg_pid) {
236 goto error;
237 }
238 registry = reg_pid->registry->reg.ust;
239 break;
240 }
241 case LTTNG_BUFFER_PER_UID:
242 {
243 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
244 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
245 if (!reg_uid) {
246 goto error;
247 }
248 registry = reg_uid->registry->reg.ust;
249 break;
250 }
251 default:
252 assert(0);
253 };
254
255 error:
256 return registry;
257 }
258
259 /*
260 * Delete ust context safely. RCU read lock must be held before calling
261 * this function.
262 */
263 static
264 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
265 {
266 int ret;
267
268 assert(ua_ctx);
269
270 if (ua_ctx->obj) {
271 ret = ustctl_release_object(sock, ua_ctx->obj);
272 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
273 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
274 sock, ua_ctx->obj->handle, ret);
275 }
276 free(ua_ctx->obj);
277 }
278 free(ua_ctx);
279 }
280
281 /*
282 * Delete ust app event safely. RCU read lock must be held before calling
283 * this function.
284 */
285 static
286 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
287 {
288 int ret;
289
290 assert(ua_event);
291
292 free(ua_event->filter);
293 if (ua_event->exclusion != NULL)
294 free(ua_event->exclusion);
295 if (ua_event->obj != NULL) {
296 ret = ustctl_release_object(sock, ua_event->obj);
297 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
298 ERR("UST app sock %d release event obj failed with ret %d",
299 sock, ret);
300 }
301 free(ua_event->obj);
302 }
303 free(ua_event);
304 }
305
306 /*
307 * Release ust data object of the given stream.
308 *
309 * Return 0 on success or else a negative value.
310 */
311 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
312 {
313 int ret = 0;
314
315 assert(stream);
316
317 if (stream->obj) {
318 ret = ustctl_release_object(sock, stream->obj);
319 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
320 ERR("UST app sock %d release stream obj failed with ret %d",
321 sock, ret);
322 }
323 lttng_fd_put(LTTNG_FD_APPS, 2);
324 free(stream->obj);
325 }
326
327 return ret;
328 }
329
330 /*
331 * Delete ust app stream safely. RCU read lock must be held before calling
332 * this function.
333 */
334 static
335 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
336 {
337 assert(stream);
338
339 (void) release_ust_app_stream(sock, stream);
340 free(stream);
341 }
342
343 /*
344 * We need to execute ht_destroy outside of RCU read-side critical
345 * section and outside of call_rcu thread, so we postpone its execution
346 * using ht_cleanup_push. It is simpler than to change the semantic of
347 * the many callers of delete_ust_app_session().
348 */
349 static
350 void delete_ust_app_channel_rcu(struct rcu_head *head)
351 {
352 struct ust_app_channel *ua_chan =
353 caa_container_of(head, struct ust_app_channel, rcu_head);
354
355 ht_cleanup_push(ua_chan->ctx);
356 ht_cleanup_push(ua_chan->events);
357 free(ua_chan);
358 }
359
360 /*
361 * Delete ust app channel safely. RCU read lock must be held before calling
362 * this function.
363 */
364 static
365 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
366 struct ust_app *app)
367 {
368 int ret;
369 struct lttng_ht_iter iter;
370 struct ust_app_event *ua_event;
371 struct ust_app_ctx *ua_ctx;
372 struct ust_app_stream *stream, *stmp;
373 struct ust_registry_session *registry;
374
375 assert(ua_chan);
376
377 DBG3("UST app deleting channel %s", ua_chan->name);
378
379 /* Wipe stream */
380 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
381 cds_list_del(&stream->list);
382 delete_ust_app_stream(sock, stream);
383 }
384
385 /* Wipe context */
386 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
387 cds_list_del(&ua_ctx->list);
388 ret = lttng_ht_del(ua_chan->ctx, &iter);
389 assert(!ret);
390 delete_ust_app_ctx(sock, ua_ctx);
391 }
392
393 /* Wipe events */
394 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
395 node.node) {
396 ret = lttng_ht_del(ua_chan->events, &iter);
397 assert(!ret);
398 delete_ust_app_event(sock, ua_event);
399 }
400
401 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
402 /* Wipe and free registry from session registry. */
403 registry = get_session_registry(ua_chan->session);
404 if (registry) {
405 ust_registry_channel_del_free(registry, ua_chan->key);
406 }
407 }
408
409 if (ua_chan->obj != NULL) {
410 /* Remove channel from application UST object descriptor. */
411 iter.iter.node = &ua_chan->ust_objd_node.node;
412 ret = lttng_ht_del(app->ust_objd, &iter);
413 assert(!ret);
414 ret = ustctl_release_object(sock, ua_chan->obj);
415 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
416 ERR("UST app sock %d release channel obj failed with ret %d",
417 sock, ret);
418 }
419 lttng_fd_put(LTTNG_FD_APPS, 1);
420 free(ua_chan->obj);
421 }
422 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
423 }
424
425 /*
426 * Push metadata to consumer socket.
427 *
428 * RCU read-side lock must be held to guarantee existance of socket.
429 * Must be called with the ust app session lock held.
430 * Must be called with the registry lock held.
431 *
432 * On success, return the len of metadata pushed or else a negative value.
433 * Returning a -EPIPE return value means we could not send the metadata,
434 * but it can be caused by recoverable errors (e.g. the application has
435 * terminated concurrently).
436 */
437 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
438 struct consumer_socket *socket, int send_zero_data)
439 {
440 int ret;
441 char *metadata_str = NULL;
442 size_t len, offset;
443 ssize_t ret_val;
444
445 assert(registry);
446 assert(socket);
447
448 /*
449 * Means that no metadata was assigned to the session. This can
450 * happens if no start has been done previously.
451 */
452 if (!registry->metadata_key) {
453 return 0;
454 }
455
456 /*
457 * On a push metadata error either the consumer is dead or the
458 * metadata channel has been destroyed because its endpoint
459 * might have died (e.g: relayd), or because the application has
460 * exited. If so, the metadata closed flag is set to 1 so we
461 * deny pushing metadata again which is not valid anymore on the
462 * consumer side.
463 */
464 if (registry->metadata_closed) {
465 return -EPIPE;
466 }
467
468 offset = registry->metadata_len_sent;
469 len = registry->metadata_len - registry->metadata_len_sent;
470 if (len == 0) {
471 DBG3("No metadata to push for metadata key %" PRIu64,
472 registry->metadata_key);
473 ret_val = len;
474 if (send_zero_data) {
475 DBG("No metadata to push");
476 goto push_data;
477 }
478 goto end;
479 }
480
481 /* Allocate only what we have to send. */
482 metadata_str = zmalloc(len);
483 if (!metadata_str) {
484 PERROR("zmalloc ust app metadata string");
485 ret_val = -ENOMEM;
486 goto error;
487 }
488 /* Copy what we haven't send out. */
489 memcpy(metadata_str, registry->metadata + offset, len);
490 registry->metadata_len_sent += len;
491
492 push_data:
493 ret = consumer_push_metadata(socket, registry->metadata_key,
494 metadata_str, len, offset);
495 if (ret < 0) {
496 /*
497 * There is an acceptable race here between the registry
498 * metadata key assignment and the creation on the
499 * consumer. The session daemon can concurrently push
500 * metadata for this registry while being created on the
501 * consumer since the metadata key of the registry is
502 * assigned *before* it is setup to avoid the consumer
503 * to ask for metadata that could possibly be not found
504 * in the session daemon.
505 *
506 * The metadata will get pushed either by the session
507 * being stopped or the consumer requesting metadata if
508 * that race is triggered.
509 */
510 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
511 ret = 0;
512 }
513
514 /*
515 * Update back the actual metadata len sent since it
516 * failed here.
517 */
518 registry->metadata_len_sent -= len;
519 ret_val = ret;
520 goto error_push;
521 }
522
523 free(metadata_str);
524 return len;
525
526 end:
527 error:
528 if (ret_val) {
529 /*
530 * On error, flag the registry that the metadata is
531 * closed. We were unable to push anything and this
532 * means that either the consumer is not responding or
533 * the metadata cache has been destroyed on the
534 * consumer.
535 */
536 registry->metadata_closed = 1;
537 }
538 error_push:
539 free(metadata_str);
540 return ret_val;
541 }
542
543 /*
544 * For a given application and session, push metadata to consumer.
545 * Either sock or consumer is required : if sock is NULL, the default
546 * socket to send the metadata is retrieved from consumer, if sock
547 * is not NULL we use it to send the metadata.
548 * RCU read-side lock must be held while calling this function,
549 * therefore ensuring existance of registry. It also ensures existance
550 * of socket throughout this function.
551 *
552 * Return 0 on success else a negative error.
553 * Returning a -EPIPE return value means we could not send the metadata,
554 * but it can be caused by recoverable errors (e.g. the application has
555 * terminated concurrently).
556 */
557 static int push_metadata(struct ust_registry_session *registry,
558 struct consumer_output *consumer)
559 {
560 int ret_val;
561 ssize_t ret;
562 struct consumer_socket *socket;
563
564 assert(registry);
565 assert(consumer);
566
567 pthread_mutex_lock(&registry->lock);
568 if (registry->metadata_closed) {
569 ret_val = -EPIPE;
570 goto error;
571 }
572
573 /* Get consumer socket to use to push the metadata.*/
574 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
575 consumer);
576 if (!socket) {
577 ret_val = -1;
578 goto error;
579 }
580
581 ret = ust_app_push_metadata(registry, socket, 0);
582 if (ret < 0) {
583 ret_val = ret;
584 goto error;
585 }
586 pthread_mutex_unlock(&registry->lock);
587 return 0;
588
589 error:
590 pthread_mutex_unlock(&registry->lock);
591 return ret_val;
592 }
593
594 /*
595 * Send to the consumer a close metadata command for the given session. Once
596 * done, the metadata channel is deleted and the session metadata pointer is
597 * nullified. The session lock MUST be held unless the application is
598 * in the destroy path.
599 *
600 * Return 0 on success else a negative value.
601 */
602 static int close_metadata(struct ust_registry_session *registry,
603 struct consumer_output *consumer)
604 {
605 int ret;
606 struct consumer_socket *socket;
607
608 assert(registry);
609 assert(consumer);
610
611 rcu_read_lock();
612
613 pthread_mutex_lock(&registry->lock);
614
615 if (!registry->metadata_key || registry->metadata_closed) {
616 ret = 0;
617 goto end;
618 }
619
620 /* Get consumer socket to use to push the metadata.*/
621 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
622 consumer);
623 if (!socket) {
624 ret = -1;
625 goto error;
626 }
627
628 ret = consumer_close_metadata(socket, registry->metadata_key);
629 if (ret < 0) {
630 goto error;
631 }
632
633 error:
634 /*
635 * Metadata closed. Even on error this means that the consumer is not
636 * responding or not found so either way a second close should NOT be emit
637 * for this registry.
638 */
639 registry->metadata_closed = 1;
640 end:
641 pthread_mutex_unlock(&registry->lock);
642 rcu_read_unlock();
643 return ret;
644 }
645
646 /*
647 * We need to execute ht_destroy outside of RCU read-side critical
648 * section and outside of call_rcu thread, so we postpone its execution
649 * using ht_cleanup_push. It is simpler than to change the semantic of
650 * the many callers of delete_ust_app_session().
651 */
652 static
653 void delete_ust_app_session_rcu(struct rcu_head *head)
654 {
655 struct ust_app_session *ua_sess =
656 caa_container_of(head, struct ust_app_session, rcu_head);
657
658 ht_cleanup_push(ua_sess->channels);
659 free(ua_sess);
660 }
661
662 /*
663 * Delete ust app session safely. RCU read lock must be held before calling
664 * this function.
665 */
666 static
667 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
668 struct ust_app *app)
669 {
670 int ret;
671 struct lttng_ht_iter iter;
672 struct ust_app_channel *ua_chan;
673 struct ust_registry_session *registry;
674
675 assert(ua_sess);
676
677 pthread_mutex_lock(&ua_sess->lock);
678
679 assert(!ua_sess->deleted);
680 ua_sess->deleted = true;
681
682 registry = get_session_registry(ua_sess);
683 if (registry) {
684 /* Push metadata for application before freeing the application. */
685 (void) push_metadata(registry, ua_sess->consumer);
686
687 /*
688 * Don't ask to close metadata for global per UID buffers. Close
689 * metadata only on destroy trace session in this case. Also, the
690 * previous push metadata could have flag the metadata registry to
691 * close so don't send a close command if closed.
692 */
693 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
694 /* And ask to close it for this session registry. */
695 (void) close_metadata(registry, ua_sess->consumer);
696 }
697 }
698
699 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
700 node.node) {
701 ret = lttng_ht_del(ua_sess->channels, &iter);
702 assert(!ret);
703 delete_ust_app_channel(sock, ua_chan, app);
704 }
705
706 /* In case of per PID, the registry is kept in the session. */
707 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
708 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
709 if (reg_pid) {
710 buffer_reg_pid_remove(reg_pid);
711 buffer_reg_pid_destroy(reg_pid);
712 }
713 }
714
715 if (ua_sess->handle != -1) {
716 ret = ustctl_release_handle(sock, ua_sess->handle);
717 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
718 ERR("UST app sock %d release session handle failed with ret %d",
719 sock, ret);
720 }
721 }
722 pthread_mutex_unlock(&ua_sess->lock);
723
724 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
725 }
726
727 /*
728 * Delete a traceable application structure from the global list. Never call
729 * this function outside of a call_rcu call.
730 *
731 * RCU read side lock should _NOT_ be held when calling this function.
732 */
733 static
734 void delete_ust_app(struct ust_app *app)
735 {
736 int ret, sock;
737 struct ust_app_session *ua_sess, *tmp_ua_sess;
738
739 /* Delete ust app sessions info */
740 sock = app->sock;
741 app->sock = -1;
742
743 /* Wipe sessions */
744 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
745 teardown_node) {
746 /* Free every object in the session and the session. */
747 rcu_read_lock();
748 delete_ust_app_session(sock, ua_sess, app);
749 rcu_read_unlock();
750 }
751
752 ht_cleanup_push(app->sessions);
753 ht_cleanup_push(app->ust_objd);
754
755 /*
756 * Wait until we have deleted the application from the sock hash table
757 * before closing this socket, otherwise an application could re-use the
758 * socket ID and race with the teardown, using the same hash table entry.
759 *
760 * It's OK to leave the close in call_rcu. We want it to stay unique for
761 * all RCU readers that could run concurrently with unregister app,
762 * therefore we _need_ to only close that socket after a grace period. So
763 * it should stay in this RCU callback.
764 *
765 * This close() is a very important step of the synchronization model so
766 * every modification to this function must be carefully reviewed.
767 */
768 ret = close(sock);
769 if (ret) {
770 PERROR("close");
771 }
772 lttng_fd_put(LTTNG_FD_APPS, 1);
773
774 DBG2("UST app pid %d deleted", app->pid);
775 free(app);
776 }
777
778 /*
779 * URCU intermediate call to delete an UST app.
780 */
781 static
782 void delete_ust_app_rcu(struct rcu_head *head)
783 {
784 struct lttng_ht_node_ulong *node =
785 caa_container_of(head, struct lttng_ht_node_ulong, head);
786 struct ust_app *app =
787 caa_container_of(node, struct ust_app, pid_n);
788
789 DBG3("Call RCU deleting app PID %d", app->pid);
790 delete_ust_app(app);
791 }
792
793 /*
794 * Delete the session from the application ht and delete the data structure by
795 * freeing every object inside and releasing them.
796 */
797 static void destroy_app_session(struct ust_app *app,
798 struct ust_app_session *ua_sess)
799 {
800 int ret;
801 struct lttng_ht_iter iter;
802
803 assert(app);
804 assert(ua_sess);
805
806 iter.iter.node = &ua_sess->node.node;
807 ret = lttng_ht_del(app->sessions, &iter);
808 if (ret) {
809 /* Already scheduled for teardown. */
810 goto end;
811 }
812
813 /* Once deleted, free the data structure. */
814 delete_ust_app_session(app->sock, ua_sess, app);
815
816 end:
817 return;
818 }
819
820 /*
821 * Alloc new UST app session.
822 */
823 static
824 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
825 {
826 struct ust_app_session *ua_sess;
827
828 /* Init most of the default value by allocating and zeroing */
829 ua_sess = zmalloc(sizeof(struct ust_app_session));
830 if (ua_sess == NULL) {
831 PERROR("malloc");
832 goto error_free;
833 }
834
835 ua_sess->handle = -1;
836 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
837 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
838 pthread_mutex_init(&ua_sess->lock, NULL);
839
840 return ua_sess;
841
842 error_free:
843 return NULL;
844 }
845
846 /*
847 * Alloc new UST app channel.
848 */
849 static
850 struct ust_app_channel *alloc_ust_app_channel(char *name,
851 struct ust_app_session *ua_sess,
852 struct lttng_ust_channel_attr *attr)
853 {
854 struct ust_app_channel *ua_chan;
855
856 /* Init most of the default value by allocating and zeroing */
857 ua_chan = zmalloc(sizeof(struct ust_app_channel));
858 if (ua_chan == NULL) {
859 PERROR("malloc");
860 goto error;
861 }
862
863 /* Setup channel name */
864 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
865 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
866
867 ua_chan->enabled = 1;
868 ua_chan->handle = -1;
869 ua_chan->session = ua_sess;
870 ua_chan->key = get_next_channel_key();
871 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
872 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
873 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
874
875 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
876 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
877
878 /* Copy attributes */
879 if (attr) {
880 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
881 ua_chan->attr.subbuf_size = attr->subbuf_size;
882 ua_chan->attr.num_subbuf = attr->num_subbuf;
883 ua_chan->attr.overwrite = attr->overwrite;
884 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
885 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
886 ua_chan->attr.output = attr->output;
887 }
888 /* By default, the channel is a per cpu channel. */
889 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
890
891 DBG3("UST app channel %s allocated", ua_chan->name);
892
893 return ua_chan;
894
895 error:
896 return NULL;
897 }
898
899 /*
900 * Allocate and initialize a UST app stream.
901 *
902 * Return newly allocated stream pointer or NULL on error.
903 */
904 struct ust_app_stream *ust_app_alloc_stream(void)
905 {
906 struct ust_app_stream *stream = NULL;
907
908 stream = zmalloc(sizeof(*stream));
909 if (stream == NULL) {
910 PERROR("zmalloc ust app stream");
911 goto error;
912 }
913
914 /* Zero could be a valid value for a handle so flag it to -1. */
915 stream->handle = -1;
916
917 error:
918 return stream;
919 }
920
921 /*
922 * Alloc new UST app event.
923 */
924 static
925 struct ust_app_event *alloc_ust_app_event(char *name,
926 struct lttng_ust_event *attr)
927 {
928 struct ust_app_event *ua_event;
929
930 /* Init most of the default value by allocating and zeroing */
931 ua_event = zmalloc(sizeof(struct ust_app_event));
932 if (ua_event == NULL) {
933 PERROR("malloc");
934 goto error;
935 }
936
937 ua_event->enabled = 1;
938 strncpy(ua_event->name, name, sizeof(ua_event->name));
939 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
940 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
941
942 /* Copy attributes */
943 if (attr) {
944 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
945 }
946
947 DBG3("UST app event %s allocated", ua_event->name);
948
949 return ua_event;
950
951 error:
952 return NULL;
953 }
954
955 /*
956 * Alloc new UST app context.
957 */
958 static
959 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
960 {
961 struct ust_app_ctx *ua_ctx;
962
963 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
964 if (ua_ctx == NULL) {
965 goto error;
966 }
967
968 CDS_INIT_LIST_HEAD(&ua_ctx->list);
969
970 if (uctx) {
971 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
972 }
973
974 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
975
976 error:
977 return ua_ctx;
978 }
979
980 /*
981 * Allocate a filter and copy the given original filter.
982 *
983 * Return allocated filter or NULL on error.
984 */
985 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
986 struct lttng_ust_filter_bytecode *orig_f)
987 {
988 struct lttng_ust_filter_bytecode *filter = NULL;
989
990 /* Copy filter bytecode */
991 filter = zmalloc(sizeof(*filter) + orig_f->len);
992 if (!filter) {
993 PERROR("zmalloc alloc ust app filter");
994 goto error;
995 }
996
997 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
998
999 error:
1000 return filter;
1001 }
1002
1003 /*
1004 * Find an ust_app using the sock and return it. RCU read side lock must be
1005 * held before calling this helper function.
1006 */
1007 struct ust_app *ust_app_find_by_sock(int sock)
1008 {
1009 struct lttng_ht_node_ulong *node;
1010 struct lttng_ht_iter iter;
1011
1012 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1013 node = lttng_ht_iter_get_node_ulong(&iter);
1014 if (node == NULL) {
1015 DBG2("UST app find by sock %d not found", sock);
1016 goto error;
1017 }
1018
1019 return caa_container_of(node, struct ust_app, sock_n);
1020
1021 error:
1022 return NULL;
1023 }
1024
1025 /*
1026 * Find an ust_app using the notify sock and return it. RCU read side lock must
1027 * be held before calling this helper function.
1028 */
1029 static struct ust_app *find_app_by_notify_sock(int sock)
1030 {
1031 struct lttng_ht_node_ulong *node;
1032 struct lttng_ht_iter iter;
1033
1034 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1035 &iter);
1036 node = lttng_ht_iter_get_node_ulong(&iter);
1037 if (node == NULL) {
1038 DBG2("UST app find by notify sock %d not found", sock);
1039 goto error;
1040 }
1041
1042 return caa_container_of(node, struct ust_app, notify_sock_n);
1043
1044 error:
1045 return NULL;
1046 }
1047
1048 /*
1049 * Lookup for an ust app event based on event name, filter bytecode and the
1050 * event loglevel.
1051 *
1052 * Return an ust_app_event object or NULL on error.
1053 */
1054 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1055 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1056 const struct lttng_event_exclusion *exclusion)
1057 {
1058 struct lttng_ht_iter iter;
1059 struct lttng_ht_node_str *node;
1060 struct ust_app_event *event = NULL;
1061 struct ust_app_ht_key key;
1062
1063 assert(name);
1064 assert(ht);
1065
1066 /* Setup key for event lookup. */
1067 key.name = name;
1068 key.filter = filter;
1069 key.loglevel = loglevel;
1070 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1071 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1072
1073 /* Lookup using the event name as hash and a custom match fct. */
1074 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1075 ht_match_ust_app_event, &key, &iter.iter);
1076 node = lttng_ht_iter_get_node_str(&iter);
1077 if (node == NULL) {
1078 goto end;
1079 }
1080
1081 event = caa_container_of(node, struct ust_app_event, node);
1082
1083 end:
1084 return event;
1085 }
1086
1087 /*
1088 * Create the channel context on the tracer.
1089 *
1090 * Called with UST app session lock held.
1091 */
1092 static
1093 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1094 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1095 {
1096 int ret;
1097
1098 health_code_update();
1099
1100 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1101 ua_chan->obj, &ua_ctx->obj);
1102 if (ret < 0) {
1103 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1104 ERR("UST app create channel context failed for app (pid: %d) "
1105 "with ret %d", app->pid, ret);
1106 } else {
1107 /*
1108 * This is normal behavior, an application can die during the
1109 * creation process. Don't report an error so the execution can
1110 * continue normally.
1111 */
1112 ret = 0;
1113 DBG3("UST app disable event failed. Application is dead.");
1114 }
1115 goto error;
1116 }
1117
1118 ua_ctx->handle = ua_ctx->obj->handle;
1119
1120 DBG2("UST app context handle %d created successfully for channel %s",
1121 ua_ctx->handle, ua_chan->name);
1122
1123 error:
1124 health_code_update();
1125 return ret;
1126 }
1127
1128 /*
1129 * Set the filter on the tracer.
1130 */
1131 static
1132 int set_ust_event_filter(struct ust_app_event *ua_event,
1133 struct ust_app *app)
1134 {
1135 int ret;
1136
1137 health_code_update();
1138
1139 if (!ua_event->filter) {
1140 ret = 0;
1141 goto error;
1142 }
1143
1144 ret = ustctl_set_filter(app->sock, ua_event->filter,
1145 ua_event->obj);
1146 if (ret < 0) {
1147 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1148 ERR("UST app event %s filter failed for app (pid: %d) "
1149 "with ret %d", ua_event->attr.name, app->pid, ret);
1150 } else {
1151 /*
1152 * This is normal behavior, an application can die during the
1153 * creation process. Don't report an error so the execution can
1154 * continue normally.
1155 */
1156 ret = 0;
1157 DBG3("UST app filter event failed. Application is dead.");
1158 }
1159 goto error;
1160 }
1161
1162 DBG2("UST filter set successfully for event %s", ua_event->name);
1163
1164 error:
1165 health_code_update();
1166 return ret;
1167 }
1168
1169 /*
1170 * Set event exclusions on the tracer.
1171 */
1172 static
1173 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1174 struct ust_app *app)
1175 {
1176 int ret;
1177
1178 health_code_update();
1179
1180 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1181 ret = 0;
1182 goto error;
1183 }
1184
1185 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1186 ua_event->obj);
1187 if (ret < 0) {
1188 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1189 ERR("UST app event %s exclusions failed for app (pid: %d) "
1190 "with ret %d", ua_event->attr.name, app->pid, ret);
1191 } else {
1192 /*
1193 * This is normal behavior, an application can die during the
1194 * creation process. Don't report an error so the execution can
1195 * continue normally.
1196 */
1197 ret = 0;
1198 DBG3("UST app event exclusion failed. Application is dead.");
1199 }
1200 goto error;
1201 }
1202
1203 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1204
1205 error:
1206 health_code_update();
1207 return ret;
1208 }
1209
1210 /*
1211 * Disable the specified event on to UST tracer for the UST session.
1212 */
1213 static int disable_ust_event(struct ust_app *app,
1214 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1215 {
1216 int ret;
1217
1218 health_code_update();
1219
1220 ret = ustctl_disable(app->sock, ua_event->obj);
1221 if (ret < 0) {
1222 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1223 ERR("UST app event %s disable failed for app (pid: %d) "
1224 "and session handle %d with ret %d",
1225 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1226 } else {
1227 /*
1228 * This is normal behavior, an application can die during the
1229 * creation process. Don't report an error so the execution can
1230 * continue normally.
1231 */
1232 ret = 0;
1233 DBG3("UST app disable event failed. Application is dead.");
1234 }
1235 goto error;
1236 }
1237
1238 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1239 ua_event->attr.name, app->pid);
1240
1241 error:
1242 health_code_update();
1243 return ret;
1244 }
1245
1246 /*
1247 * Disable the specified channel on to UST tracer for the UST session.
1248 */
1249 static int disable_ust_channel(struct ust_app *app,
1250 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1251 {
1252 int ret;
1253
1254 health_code_update();
1255
1256 ret = ustctl_disable(app->sock, ua_chan->obj);
1257 if (ret < 0) {
1258 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1259 ERR("UST app channel %s disable failed for app (pid: %d) "
1260 "and session handle %d with ret %d",
1261 ua_chan->name, app->pid, ua_sess->handle, ret);
1262 } else {
1263 /*
1264 * This is normal behavior, an application can die during the
1265 * creation process. Don't report an error so the execution can
1266 * continue normally.
1267 */
1268 ret = 0;
1269 DBG3("UST app disable channel failed. Application is dead.");
1270 }
1271 goto error;
1272 }
1273
1274 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1275 ua_chan->name, app->pid);
1276
1277 error:
1278 health_code_update();
1279 return ret;
1280 }
1281
1282 /*
1283 * Enable the specified channel on to UST tracer for the UST session.
1284 */
1285 static int enable_ust_channel(struct ust_app *app,
1286 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1287 {
1288 int ret;
1289
1290 health_code_update();
1291
1292 ret = ustctl_enable(app->sock, ua_chan->obj);
1293 if (ret < 0) {
1294 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1295 ERR("UST app channel %s enable failed for app (pid: %d) "
1296 "and session handle %d with ret %d",
1297 ua_chan->name, app->pid, ua_sess->handle, ret);
1298 } else {
1299 /*
1300 * This is normal behavior, an application can die during the
1301 * creation process. Don't report an error so the execution can
1302 * continue normally.
1303 */
1304 ret = 0;
1305 DBG3("UST app enable channel failed. Application is dead.");
1306 }
1307 goto error;
1308 }
1309
1310 ua_chan->enabled = 1;
1311
1312 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1313 ua_chan->name, app->pid);
1314
1315 error:
1316 health_code_update();
1317 return ret;
1318 }
1319
1320 /*
1321 * Enable the specified event on to UST tracer for the UST session.
1322 */
1323 static int enable_ust_event(struct ust_app *app,
1324 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1325 {
1326 int ret;
1327
1328 health_code_update();
1329
1330 ret = ustctl_enable(app->sock, ua_event->obj);
1331 if (ret < 0) {
1332 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1333 ERR("UST app event %s enable failed for app (pid: %d) "
1334 "and session handle %d with ret %d",
1335 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1336 } else {
1337 /*
1338 * This is normal behavior, an application can die during the
1339 * creation process. Don't report an error so the execution can
1340 * continue normally.
1341 */
1342 ret = 0;
1343 DBG3("UST app enable event failed. Application is dead.");
1344 }
1345 goto error;
1346 }
1347
1348 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1349 ua_event->attr.name, app->pid);
1350
1351 error:
1352 health_code_update();
1353 return ret;
1354 }
1355
1356 /*
1357 * Send channel and stream buffer to application.
1358 *
1359 * Return 0 on success. On error, a negative value is returned.
1360 */
1361 static int send_channel_pid_to_ust(struct ust_app *app,
1362 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1363 {
1364 int ret;
1365 struct ust_app_stream *stream, *stmp;
1366
1367 assert(app);
1368 assert(ua_sess);
1369 assert(ua_chan);
1370
1371 health_code_update();
1372
1373 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1374 app->sock);
1375
1376 /* Send channel to the application. */
1377 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1378 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1379 ret = -ENOTCONN; /* Caused by app exiting. */
1380 goto error;
1381 } else if (ret < 0) {
1382 goto error;
1383 }
1384
1385 health_code_update();
1386
1387 /* Send all streams to application. */
1388 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1389 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1390 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1391 ret = -ENOTCONN; /* Caused by app exiting. */
1392 goto error;
1393 } else if (ret < 0) {
1394 goto error;
1395 }
1396 /* We don't need the stream anymore once sent to the tracer. */
1397 cds_list_del(&stream->list);
1398 delete_ust_app_stream(-1, stream);
1399 }
1400 /* Flag the channel that it is sent to the application. */
1401 ua_chan->is_sent = 1;
1402
1403 error:
1404 health_code_update();
1405 return ret;
1406 }
1407
1408 /*
1409 * Create the specified event onto the UST tracer for a UST session.
1410 *
1411 * Should be called with session mutex held.
1412 */
1413 static
1414 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1415 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1416 {
1417 int ret = 0;
1418
1419 health_code_update();
1420
1421 /* Create UST event on tracer */
1422 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1423 &ua_event->obj);
1424 if (ret < 0) {
1425 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1426 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1427 ua_event->attr.name, app->pid, ret);
1428 } else {
1429 /*
1430 * This is normal behavior, an application can die during the
1431 * creation process. Don't report an error so the execution can
1432 * continue normally.
1433 */
1434 ret = 0;
1435 DBG3("UST app create event failed. Application is dead.");
1436 }
1437 goto error;
1438 }
1439
1440 ua_event->handle = ua_event->obj->handle;
1441
1442 DBG2("UST app event %s created successfully for pid:%d",
1443 ua_event->attr.name, app->pid);
1444
1445 health_code_update();
1446
1447 /* Set filter if one is present. */
1448 if (ua_event->filter) {
1449 ret = set_ust_event_filter(ua_event, app);
1450 if (ret < 0) {
1451 goto error;
1452 }
1453 }
1454
1455 /* Set exclusions for the event */
1456 if (ua_event->exclusion) {
1457 ret = set_ust_event_exclusion(ua_event, app);
1458 if (ret < 0) {
1459 goto error;
1460 }
1461 }
1462
1463 /* If event not enabled, disable it on the tracer */
1464 if (ua_event->enabled) {
1465 /*
1466 * We now need to explicitly enable the event, since it
1467 * is now disabled at creation.
1468 */
1469 ret = enable_ust_event(app, ua_sess, ua_event);
1470 if (ret < 0) {
1471 /*
1472 * If we hit an EPERM, something is wrong with our enable call. If
1473 * we get an EEXIST, there is a problem on the tracer side since we
1474 * just created it.
1475 */
1476 switch (ret) {
1477 case -LTTNG_UST_ERR_PERM:
1478 /* Code flow problem */
1479 assert(0);
1480 case -LTTNG_UST_ERR_EXIST:
1481 /* It's OK for our use case. */
1482 ret = 0;
1483 break;
1484 default:
1485 break;
1486 }
1487 goto error;
1488 }
1489 } else {
1490 ret = disable_ust_event(app, ua_sess, ua_event);
1491 if (ret < 0) {
1492 /*
1493 * If we hit an EPERM, something is wrong with our disable call. If
1494 * we get an EEXIST, there is a problem on the tracer side since we
1495 * just created it.
1496 */
1497 switch (ret) {
1498 case -LTTNG_UST_ERR_PERM:
1499 /* Code flow problem */
1500 assert(0);
1501 case -LTTNG_UST_ERR_EXIST:
1502 /* It's OK for our use case. */
1503 ret = 0;
1504 break;
1505 default:
1506 break;
1507 }
1508 goto error;
1509 }
1510 }
1511
1512 error:
1513 health_code_update();
1514 return ret;
1515 }
1516
1517 /*
1518 * Copy data between an UST app event and a LTT event.
1519 */
1520 static void shadow_copy_event(struct ust_app_event *ua_event,
1521 struct ltt_ust_event *uevent)
1522 {
1523 size_t exclusion_alloc_size;
1524
1525 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1526 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1527
1528 ua_event->enabled = uevent->enabled;
1529
1530 /* Copy event attributes */
1531 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1532
1533 /* Copy filter bytecode */
1534 if (uevent->filter) {
1535 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1536 /* Filter might be NULL here in case of ENONEM. */
1537 }
1538
1539 /* Copy exclusion data */
1540 if (uevent->exclusion) {
1541 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1542 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1543 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1544 if (ua_event->exclusion == NULL) {
1545 PERROR("malloc");
1546 } else {
1547 memcpy(ua_event->exclusion, uevent->exclusion,
1548 exclusion_alloc_size);
1549 }
1550 }
1551 }
1552
1553 /*
1554 * Copy data between an UST app channel and a LTT channel.
1555 */
1556 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1557 struct ltt_ust_channel *uchan)
1558 {
1559 struct lttng_ht_iter iter;
1560 struct ltt_ust_event *uevent;
1561 struct ltt_ust_context *uctx;
1562 struct ust_app_event *ua_event;
1563 struct ust_app_ctx *ua_ctx;
1564
1565 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1566
1567 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1568 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1569
1570 ua_chan->tracefile_size = uchan->tracefile_size;
1571 ua_chan->tracefile_count = uchan->tracefile_count;
1572
1573 /* Copy event attributes since the layout is different. */
1574 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1575 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1576 ua_chan->attr.overwrite = uchan->attr.overwrite;
1577 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1578 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1579 ua_chan->attr.output = uchan->attr.output;
1580 /*
1581 * Note that the attribute channel type is not set since the channel on the
1582 * tracing registry side does not have this information.
1583 */
1584
1585 ua_chan->enabled = uchan->enabled;
1586 ua_chan->tracing_channel_id = uchan->id;
1587
1588 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1589 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1590 if (ua_ctx == NULL) {
1591 continue;
1592 }
1593 lttng_ht_node_init_ulong(&ua_ctx->node,
1594 (unsigned long) ua_ctx->ctx.ctx);
1595 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1596 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1597 }
1598
1599 /* Copy all events from ltt ust channel to ust app channel */
1600 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1601 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1602 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1603 if (ua_event == NULL) {
1604 DBG2("UST event %s not found on shadow copy channel",
1605 uevent->attr.name);
1606 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1607 if (ua_event == NULL) {
1608 continue;
1609 }
1610 shadow_copy_event(ua_event, uevent);
1611 add_unique_ust_app_event(ua_chan, ua_event);
1612 }
1613 }
1614
1615 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1616 }
1617
1618 /*
1619 * Copy data between a UST app session and a regular LTT session.
1620 */
1621 static void shadow_copy_session(struct ust_app_session *ua_sess,
1622 struct ltt_ust_session *usess, struct ust_app *app)
1623 {
1624 struct lttng_ht_node_str *ua_chan_node;
1625 struct lttng_ht_iter iter;
1626 struct ltt_ust_channel *uchan;
1627 struct ust_app_channel *ua_chan;
1628 time_t rawtime;
1629 struct tm *timeinfo;
1630 char datetime[16];
1631 int ret;
1632
1633 /* Get date and time for unique app path */
1634 time(&rawtime);
1635 timeinfo = localtime(&rawtime);
1636 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1637
1638 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1639
1640 ua_sess->tracing_id = usess->id;
1641 ua_sess->id = get_next_session_id();
1642 ua_sess->uid = app->uid;
1643 ua_sess->gid = app->gid;
1644 ua_sess->euid = usess->uid;
1645 ua_sess->egid = usess->gid;
1646 ua_sess->buffer_type = usess->buffer_type;
1647 ua_sess->bits_per_long = app->bits_per_long;
1648 /* There is only one consumer object per session possible. */
1649 ua_sess->consumer = usess->consumer;
1650 ua_sess->output_traces = usess->output_traces;
1651 ua_sess->live_timer_interval = usess->live_timer_interval;
1652 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1653 &usess->metadata_attr);
1654
1655 switch (ua_sess->buffer_type) {
1656 case LTTNG_BUFFER_PER_PID:
1657 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1658 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1659 datetime);
1660 break;
1661 case LTTNG_BUFFER_PER_UID:
1662 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1663 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1664 break;
1665 default:
1666 assert(0);
1667 goto error;
1668 }
1669 if (ret < 0) {
1670 PERROR("asprintf UST shadow copy session");
1671 assert(0);
1672 goto error;
1673 }
1674
1675 /* Iterate over all channels in global domain. */
1676 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1677 uchan, node.node) {
1678 struct lttng_ht_iter uiter;
1679
1680 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1681 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1682 if (ua_chan_node != NULL) {
1683 /* Session exist. Contiuing. */
1684 continue;
1685 }
1686
1687 DBG2("Channel %s not found on shadow session copy, creating it",
1688 uchan->name);
1689 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1690 if (ua_chan == NULL) {
1691 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1692 continue;
1693 }
1694 shadow_copy_channel(ua_chan, uchan);
1695 /*
1696 * The concept of metadata channel does not exist on the tracing
1697 * registry side of the session daemon so this can only be a per CPU
1698 * channel and not metadata.
1699 */
1700 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1701
1702 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1703 }
1704
1705 error:
1706 return;
1707 }
1708
1709 /*
1710 * Lookup sesison wrapper.
1711 */
1712 static
1713 void __lookup_session_by_app(struct ltt_ust_session *usess,
1714 struct ust_app *app, struct lttng_ht_iter *iter)
1715 {
1716 /* Get right UST app session from app */
1717 lttng_ht_lookup(app->sessions, &usess->id, iter);
1718 }
1719
1720 /*
1721 * Return ust app session from the app session hashtable using the UST session
1722 * id.
1723 */
1724 static struct ust_app_session *lookup_session_by_app(
1725 struct ltt_ust_session *usess, struct ust_app *app)
1726 {
1727 struct lttng_ht_iter iter;
1728 struct lttng_ht_node_u64 *node;
1729
1730 __lookup_session_by_app(usess, app, &iter);
1731 node = lttng_ht_iter_get_node_u64(&iter);
1732 if (node == NULL) {
1733 goto error;
1734 }
1735
1736 return caa_container_of(node, struct ust_app_session, node);
1737
1738 error:
1739 return NULL;
1740 }
1741
1742 /*
1743 * Setup buffer registry per PID for the given session and application. If none
1744 * is found, a new one is created, added to the global registry and
1745 * initialized. If regp is valid, it's set with the newly created object.
1746 *
1747 * Return 0 on success or else a negative value.
1748 */
1749 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1750 struct ust_app *app, struct buffer_reg_pid **regp)
1751 {
1752 int ret = 0;
1753 struct buffer_reg_pid *reg_pid;
1754
1755 assert(ua_sess);
1756 assert(app);
1757
1758 rcu_read_lock();
1759
1760 reg_pid = buffer_reg_pid_find(ua_sess->id);
1761 if (!reg_pid) {
1762 /*
1763 * This is the create channel path meaning that if there is NO
1764 * registry available, we have to create one for this session.
1765 */
1766 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1767 if (ret < 0) {
1768 goto error;
1769 }
1770 buffer_reg_pid_add(reg_pid);
1771 } else {
1772 goto end;
1773 }
1774
1775 /* Initialize registry. */
1776 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1777 app->bits_per_long, app->uint8_t_alignment,
1778 app->uint16_t_alignment, app->uint32_t_alignment,
1779 app->uint64_t_alignment, app->long_alignment,
1780 app->byte_order, app->version.major,
1781 app->version.minor);
1782 if (ret < 0) {
1783 goto error;
1784 }
1785
1786 DBG3("UST app buffer registry per PID created successfully");
1787
1788 end:
1789 if (regp) {
1790 *regp = reg_pid;
1791 }
1792 error:
1793 rcu_read_unlock();
1794 return ret;
1795 }
1796
1797 /*
1798 * Setup buffer registry per UID for the given session and application. If none
1799 * is found, a new one is created, added to the global registry and
1800 * initialized. If regp is valid, it's set with the newly created object.
1801 *
1802 * Return 0 on success or else a negative value.
1803 */
1804 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1805 struct ust_app *app, struct buffer_reg_uid **regp)
1806 {
1807 int ret = 0;
1808 struct buffer_reg_uid *reg_uid;
1809
1810 assert(usess);
1811 assert(app);
1812
1813 rcu_read_lock();
1814
1815 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1816 if (!reg_uid) {
1817 /*
1818 * This is the create channel path meaning that if there is NO
1819 * registry available, we have to create one for this session.
1820 */
1821 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1822 LTTNG_DOMAIN_UST, &reg_uid);
1823 if (ret < 0) {
1824 goto error;
1825 }
1826 buffer_reg_uid_add(reg_uid);
1827 } else {
1828 goto end;
1829 }
1830
1831 /* Initialize registry. */
1832 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1833 app->bits_per_long, app->uint8_t_alignment,
1834 app->uint16_t_alignment, app->uint32_t_alignment,
1835 app->uint64_t_alignment, app->long_alignment,
1836 app->byte_order, app->version.major,
1837 app->version.minor);
1838 if (ret < 0) {
1839 goto error;
1840 }
1841 /* Add node to teardown list of the session. */
1842 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1843
1844 DBG3("UST app buffer registry per UID created successfully");
1845
1846 end:
1847 if (regp) {
1848 *regp = reg_uid;
1849 }
1850 error:
1851 rcu_read_unlock();
1852 return ret;
1853 }
1854
1855 /*
1856 * Create a session on the tracer side for the given app.
1857 *
1858 * On success, ua_sess_ptr is populated with the session pointer or else left
1859 * untouched. If the session was created, is_created is set to 1. On error,
1860 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1861 * be NULL.
1862 *
1863 * Returns 0 on success or else a negative code which is either -ENOMEM or
1864 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1865 */
1866 static int create_ust_app_session(struct ltt_ust_session *usess,
1867 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1868 int *is_created)
1869 {
1870 int ret, created = 0;
1871 struct ust_app_session *ua_sess;
1872
1873 assert(usess);
1874 assert(app);
1875 assert(ua_sess_ptr);
1876
1877 health_code_update();
1878
1879 ua_sess = lookup_session_by_app(usess, app);
1880 if (ua_sess == NULL) {
1881 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1882 app->pid, usess->id);
1883 ua_sess = alloc_ust_app_session(app);
1884 if (ua_sess == NULL) {
1885 /* Only malloc can failed so something is really wrong */
1886 ret = -ENOMEM;
1887 goto error;
1888 }
1889 shadow_copy_session(ua_sess, usess, app);
1890 created = 1;
1891 }
1892
1893 switch (usess->buffer_type) {
1894 case LTTNG_BUFFER_PER_PID:
1895 /* Init local registry. */
1896 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1897 if (ret < 0) {
1898 goto error;
1899 }
1900 break;
1901 case LTTNG_BUFFER_PER_UID:
1902 /* Look for a global registry. If none exists, create one. */
1903 ret = setup_buffer_reg_uid(usess, app, NULL);
1904 if (ret < 0) {
1905 goto error;
1906 }
1907 break;
1908 default:
1909 assert(0);
1910 ret = -EINVAL;
1911 goto error;
1912 }
1913
1914 health_code_update();
1915
1916 if (ua_sess->handle == -1) {
1917 ret = ustctl_create_session(app->sock);
1918 if (ret < 0) {
1919 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1920 ERR("Creating session for app pid %d with ret %d",
1921 app->pid, ret);
1922 } else {
1923 DBG("UST app creating session failed. Application is dead");
1924 /*
1925 * This is normal behavior, an application can die during the
1926 * creation process. Don't report an error so the execution can
1927 * continue normally. This will get flagged ENOTCONN and the
1928 * caller will handle it.
1929 */
1930 ret = 0;
1931 }
1932 delete_ust_app_session(-1, ua_sess, app);
1933 if (ret != -ENOMEM) {
1934 /*
1935 * Tracer is probably gone or got an internal error so let's
1936 * behave like it will soon unregister or not usable.
1937 */
1938 ret = -ENOTCONN;
1939 }
1940 goto error;
1941 }
1942
1943 ua_sess->handle = ret;
1944
1945 /* Add ust app session to app's HT */
1946 lttng_ht_node_init_u64(&ua_sess->node,
1947 ua_sess->tracing_id);
1948 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1949
1950 DBG2("UST app session created successfully with handle %d", ret);
1951 }
1952
1953 *ua_sess_ptr = ua_sess;
1954 if (is_created) {
1955 *is_created = created;
1956 }
1957
1958 /* Everything went well. */
1959 ret = 0;
1960
1961 error:
1962 health_code_update();
1963 return ret;
1964 }
1965
1966 /*
1967 * Match function for a hash table lookup of ust_app_ctx.
1968 *
1969 * It matches an ust app context based on the context type and, in the case
1970 * of perf counters, their name.
1971 */
1972 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
1973 {
1974 struct ust_app_ctx *ctx;
1975 const struct lttng_ust_context *key;
1976
1977 assert(node);
1978 assert(_key);
1979
1980 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
1981 key = _key;
1982
1983 /* Context type */
1984 if (ctx->ctx.ctx != key->ctx) {
1985 goto no_match;
1986 }
1987
1988 /* Check the name in the case of perf thread counters. */
1989 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
1990 if (strncmp(key->u.perf_counter.name,
1991 ctx->ctx.u.perf_counter.name,
1992 sizeof(key->u.perf_counter.name))) {
1993 goto no_match;
1994 }
1995 }
1996
1997 /* Match. */
1998 return 1;
1999
2000 no_match:
2001 return 0;
2002 }
2003
2004 /*
2005 * Lookup for an ust app context from an lttng_ust_context.
2006 *
2007 * Must be called while holding RCU read side lock.
2008 * Return an ust_app_ctx object or NULL on error.
2009 */
2010 static
2011 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2012 struct lttng_ust_context *uctx)
2013 {
2014 struct lttng_ht_iter iter;
2015 struct lttng_ht_node_ulong *node;
2016 struct ust_app_ctx *app_ctx = NULL;
2017
2018 assert(uctx);
2019 assert(ht);
2020
2021 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2022 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2023 ht_match_ust_app_ctx, uctx, &iter.iter);
2024 node = lttng_ht_iter_get_node_ulong(&iter);
2025 if (!node) {
2026 goto end;
2027 }
2028
2029 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2030
2031 end:
2032 return app_ctx;
2033 }
2034
2035 /*
2036 * Create a context for the channel on the tracer.
2037 *
2038 * Called with UST app session lock held and a RCU read side lock.
2039 */
2040 static
2041 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2042 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2043 struct ust_app *app)
2044 {
2045 int ret = 0;
2046 struct ust_app_ctx *ua_ctx;
2047
2048 DBG2("UST app adding context to channel %s", ua_chan->name);
2049
2050 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2051 if (ua_ctx) {
2052 ret = -EEXIST;
2053 goto error;
2054 }
2055
2056 ua_ctx = alloc_ust_app_ctx(uctx);
2057 if (ua_ctx == NULL) {
2058 /* malloc failed */
2059 ret = -1;
2060 goto error;
2061 }
2062
2063 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2064 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2065 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2066
2067 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2068 if (ret < 0) {
2069 goto error;
2070 }
2071
2072 error:
2073 return ret;
2074 }
2075
2076 /*
2077 * Enable on the tracer side a ust app event for the session and channel.
2078 *
2079 * Called with UST app session lock held.
2080 */
2081 static
2082 int enable_ust_app_event(struct ust_app_session *ua_sess,
2083 struct ust_app_event *ua_event, struct ust_app *app)
2084 {
2085 int ret;
2086
2087 ret = enable_ust_event(app, ua_sess, ua_event);
2088 if (ret < 0) {
2089 goto error;
2090 }
2091
2092 ua_event->enabled = 1;
2093
2094 error:
2095 return ret;
2096 }
2097
2098 /*
2099 * Disable on the tracer side a ust app event for the session and channel.
2100 */
2101 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2102 struct ust_app_event *ua_event, struct ust_app *app)
2103 {
2104 int ret;
2105
2106 ret = disable_ust_event(app, ua_sess, ua_event);
2107 if (ret < 0) {
2108 goto error;
2109 }
2110
2111 ua_event->enabled = 0;
2112
2113 error:
2114 return ret;
2115 }
2116
2117 /*
2118 * Lookup ust app channel for session and disable it on the tracer side.
2119 */
2120 static
2121 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2122 struct ust_app_channel *ua_chan, struct ust_app *app)
2123 {
2124 int ret;
2125
2126 ret = disable_ust_channel(app, ua_sess, ua_chan);
2127 if (ret < 0) {
2128 goto error;
2129 }
2130
2131 ua_chan->enabled = 0;
2132
2133 error:
2134 return ret;
2135 }
2136
2137 /*
2138 * Lookup ust app channel for session and enable it on the tracer side. This
2139 * MUST be called with a RCU read side lock acquired.
2140 */
2141 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2142 struct ltt_ust_channel *uchan, struct ust_app *app)
2143 {
2144 int ret = 0;
2145 struct lttng_ht_iter iter;
2146 struct lttng_ht_node_str *ua_chan_node;
2147 struct ust_app_channel *ua_chan;
2148
2149 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2150 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2151 if (ua_chan_node == NULL) {
2152 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2153 uchan->name, ua_sess->tracing_id);
2154 goto error;
2155 }
2156
2157 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2158
2159 ret = enable_ust_channel(app, ua_sess, ua_chan);
2160 if (ret < 0) {
2161 goto error;
2162 }
2163
2164 error:
2165 return ret;
2166 }
2167
2168 /*
2169 * Ask the consumer to create a channel and get it if successful.
2170 *
2171 * Return 0 on success or else a negative value.
2172 */
2173 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2174 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2175 int bitness, struct ust_registry_session *registry)
2176 {
2177 int ret;
2178 unsigned int nb_fd = 0;
2179 struct consumer_socket *socket;
2180
2181 assert(usess);
2182 assert(ua_sess);
2183 assert(ua_chan);
2184 assert(registry);
2185
2186 rcu_read_lock();
2187 health_code_update();
2188
2189 /* Get the right consumer socket for the application. */
2190 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2191 if (!socket) {
2192 ret = -EINVAL;
2193 goto error;
2194 }
2195
2196 health_code_update();
2197
2198 /* Need one fd for the channel. */
2199 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2200 if (ret < 0) {
2201 ERR("Exhausted number of available FD upon create channel");
2202 goto error;
2203 }
2204
2205 /*
2206 * Ask consumer to create channel. The consumer will return the number of
2207 * stream we have to expect.
2208 */
2209 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2210 registry);
2211 if (ret < 0) {
2212 goto error_ask;
2213 }
2214
2215 /*
2216 * Compute the number of fd needed before receiving them. It must be 2 per
2217 * stream (2 being the default value here).
2218 */
2219 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2220
2221 /* Reserve the amount of file descriptor we need. */
2222 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2223 if (ret < 0) {
2224 ERR("Exhausted number of available FD upon create channel");
2225 goto error_fd_get_stream;
2226 }
2227
2228 health_code_update();
2229
2230 /*
2231 * Now get the channel from the consumer. This call wil populate the stream
2232 * list of that channel and set the ust objects.
2233 */
2234 if (usess->consumer->enabled) {
2235 ret = ust_consumer_get_channel(socket, ua_chan);
2236 if (ret < 0) {
2237 goto error_destroy;
2238 }
2239 }
2240
2241 rcu_read_unlock();
2242 return 0;
2243
2244 error_destroy:
2245 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2246 error_fd_get_stream:
2247 /*
2248 * Initiate a destroy channel on the consumer since we had an error
2249 * handling it on our side. The return value is of no importance since we
2250 * already have a ret value set by the previous error that we need to
2251 * return.
2252 */
2253 (void) ust_consumer_destroy_channel(socket, ua_chan);
2254 error_ask:
2255 lttng_fd_put(LTTNG_FD_APPS, 1);
2256 error:
2257 health_code_update();
2258 rcu_read_unlock();
2259 return ret;
2260 }
2261
2262 /*
2263 * Duplicate the ust data object of the ust app stream and save it in the
2264 * buffer registry stream.
2265 *
2266 * Return 0 on success or else a negative value.
2267 */
2268 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2269 struct ust_app_stream *stream)
2270 {
2271 int ret;
2272
2273 assert(reg_stream);
2274 assert(stream);
2275
2276 /* Reserve the amount of file descriptor we need. */
2277 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2278 if (ret < 0) {
2279 ERR("Exhausted number of available FD upon duplicate stream");
2280 goto error;
2281 }
2282
2283 /* Duplicate object for stream once the original is in the registry. */
2284 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2285 reg_stream->obj.ust);
2286 if (ret < 0) {
2287 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2288 reg_stream->obj.ust, stream->obj, ret);
2289 lttng_fd_put(LTTNG_FD_APPS, 2);
2290 goto error;
2291 }
2292 stream->handle = stream->obj->handle;
2293
2294 error:
2295 return ret;
2296 }
2297
2298 /*
2299 * Duplicate the ust data object of the ust app. channel and save it in the
2300 * buffer registry channel.
2301 *
2302 * Return 0 on success or else a negative value.
2303 */
2304 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2305 struct ust_app_channel *ua_chan)
2306 {
2307 int ret;
2308
2309 assert(reg_chan);
2310 assert(ua_chan);
2311
2312 /* Need two fds for the channel. */
2313 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2314 if (ret < 0) {
2315 ERR("Exhausted number of available FD upon duplicate channel");
2316 goto error_fd_get;
2317 }
2318
2319 /* Duplicate object for stream once the original is in the registry. */
2320 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2321 if (ret < 0) {
2322 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2323 reg_chan->obj.ust, ua_chan->obj, ret);
2324 goto error;
2325 }
2326 ua_chan->handle = ua_chan->obj->handle;
2327
2328 return 0;
2329
2330 error:
2331 lttng_fd_put(LTTNG_FD_APPS, 1);
2332 error_fd_get:
2333 return ret;
2334 }
2335
2336 /*
2337 * For a given channel buffer registry, setup all streams of the given ust
2338 * application channel.
2339 *
2340 * Return 0 on success or else a negative value.
2341 */
2342 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2343 struct ust_app_channel *ua_chan)
2344 {
2345 int ret = 0;
2346 struct ust_app_stream *stream, *stmp;
2347
2348 assert(reg_chan);
2349 assert(ua_chan);
2350
2351 DBG2("UST app setup buffer registry stream");
2352
2353 /* Send all streams to application. */
2354 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2355 struct buffer_reg_stream *reg_stream;
2356
2357 ret = buffer_reg_stream_create(&reg_stream);
2358 if (ret < 0) {
2359 goto error;
2360 }
2361
2362 /*
2363 * Keep original pointer and nullify it in the stream so the delete
2364 * stream call does not release the object.
2365 */
2366 reg_stream->obj.ust = stream->obj;
2367 stream->obj = NULL;
2368 buffer_reg_stream_add(reg_stream, reg_chan);
2369
2370 /* We don't need the streams anymore. */
2371 cds_list_del(&stream->list);
2372 delete_ust_app_stream(-1, stream);
2373 }
2374
2375 error:
2376 return ret;
2377 }
2378
2379 /*
2380 * Create a buffer registry channel for the given session registry and
2381 * application channel object. If regp pointer is valid, it's set with the
2382 * created object. Important, the created object is NOT added to the session
2383 * registry hash table.
2384 *
2385 * Return 0 on success else a negative value.
2386 */
2387 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2388 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2389 {
2390 int ret;
2391 struct buffer_reg_channel *reg_chan = NULL;
2392
2393 assert(reg_sess);
2394 assert(ua_chan);
2395
2396 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2397
2398 /* Create buffer registry channel. */
2399 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2400 if (ret < 0) {
2401 goto error_create;
2402 }
2403 assert(reg_chan);
2404 reg_chan->consumer_key = ua_chan->key;
2405 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2406 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2407
2408 /* Create and add a channel registry to session. */
2409 ret = ust_registry_channel_add(reg_sess->reg.ust,
2410 ua_chan->tracing_channel_id);
2411 if (ret < 0) {
2412 goto error;
2413 }
2414 buffer_reg_channel_add(reg_sess, reg_chan);
2415
2416 if (regp) {
2417 *regp = reg_chan;
2418 }
2419
2420 return 0;
2421
2422 error:
2423 /* Safe because the registry channel object was not added to any HT. */
2424 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2425 error_create:
2426 return ret;
2427 }
2428
2429 /*
2430 * Setup buffer registry channel for the given session registry and application
2431 * channel object. If regp pointer is valid, it's set with the created object.
2432 *
2433 * Return 0 on success else a negative value.
2434 */
2435 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2436 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2437 {
2438 int ret;
2439
2440 assert(reg_sess);
2441 assert(reg_chan);
2442 assert(ua_chan);
2443 assert(ua_chan->obj);
2444
2445 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2446
2447 /* Setup all streams for the registry. */
2448 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2449 if (ret < 0) {
2450 goto error;
2451 }
2452
2453 reg_chan->obj.ust = ua_chan->obj;
2454 ua_chan->obj = NULL;
2455
2456 return 0;
2457
2458 error:
2459 buffer_reg_channel_remove(reg_sess, reg_chan);
2460 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2461 return ret;
2462 }
2463
2464 /*
2465 * Send buffer registry channel to the application.
2466 *
2467 * Return 0 on success else a negative value.
2468 */
2469 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2470 struct ust_app *app, struct ust_app_session *ua_sess,
2471 struct ust_app_channel *ua_chan)
2472 {
2473 int ret;
2474 struct buffer_reg_stream *reg_stream;
2475
2476 assert(reg_chan);
2477 assert(app);
2478 assert(ua_sess);
2479 assert(ua_chan);
2480
2481 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2482
2483 ret = duplicate_channel_object(reg_chan, ua_chan);
2484 if (ret < 0) {
2485 goto error;
2486 }
2487
2488 /* Send channel to the application. */
2489 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2490 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2491 ret = -ENOTCONN; /* Caused by app exiting. */
2492 goto error;
2493 } else if (ret < 0) {
2494 goto error;
2495 }
2496
2497 health_code_update();
2498
2499 /* Send all streams to application. */
2500 pthread_mutex_lock(&reg_chan->stream_list_lock);
2501 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2502 struct ust_app_stream stream;
2503
2504 ret = duplicate_stream_object(reg_stream, &stream);
2505 if (ret < 0) {
2506 goto error_stream_unlock;
2507 }
2508
2509 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2510 if (ret < 0) {
2511 (void) release_ust_app_stream(-1, &stream);
2512 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2513 ret = -ENOTCONN; /* Caused by app exiting. */
2514 goto error_stream_unlock;
2515 } else if (ret < 0) {
2516 goto error_stream_unlock;
2517 }
2518 goto error_stream_unlock;
2519 }
2520
2521 /*
2522 * The return value is not important here. This function will output an
2523 * error if needed.
2524 */
2525 (void) release_ust_app_stream(-1, &stream);
2526 }
2527 ua_chan->is_sent = 1;
2528
2529 error_stream_unlock:
2530 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2531 error:
2532 return ret;
2533 }
2534
2535 /*
2536 * Create and send to the application the created buffers with per UID buffers.
2537 *
2538 * Return 0 on success else a negative value.
2539 */
2540 static int create_channel_per_uid(struct ust_app *app,
2541 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2542 struct ust_app_channel *ua_chan)
2543 {
2544 int ret;
2545 struct buffer_reg_uid *reg_uid;
2546 struct buffer_reg_channel *reg_chan;
2547
2548 assert(app);
2549 assert(usess);
2550 assert(ua_sess);
2551 assert(ua_chan);
2552
2553 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2554
2555 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2556 /*
2557 * The session creation handles the creation of this global registry
2558 * object. If none can be find, there is a code flow problem or a
2559 * teardown race.
2560 */
2561 assert(reg_uid);
2562
2563 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2564 reg_uid);
2565 if (!reg_chan) {
2566 /* Create the buffer registry channel object. */
2567 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2568 if (ret < 0) {
2569 ERR("Error creating the UST channel \"%s\" registry instance",
2570 ua_chan->name);
2571 goto error;
2572 }
2573 assert(reg_chan);
2574
2575 /*
2576 * Create the buffers on the consumer side. This call populates the
2577 * ust app channel object with all streams and data object.
2578 */
2579 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2580 app->bits_per_long, reg_uid->registry->reg.ust);
2581 if (ret < 0) {
2582 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2583 ua_chan->name);
2584
2585 /*
2586 * Let's remove the previously created buffer registry channel so
2587 * it's not visible anymore in the session registry.
2588 */
2589 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2590 ua_chan->tracing_channel_id);
2591 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2592 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2593 goto error;
2594 }
2595
2596 /*
2597 * Setup the streams and add it to the session registry.
2598 */
2599 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2600 if (ret < 0) {
2601 ERR("Error setting up UST channel \"%s\"",
2602 ua_chan->name);
2603 goto error;
2604 }
2605
2606 }
2607
2608 /* Send buffers to the application. */
2609 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2610 if (ret < 0) {
2611 if (ret != -ENOTCONN) {
2612 ERR("Error sending channel to application");
2613 }
2614 goto error;
2615 }
2616
2617 error:
2618 return ret;
2619 }
2620
2621 /*
2622 * Create and send to the application the created buffers with per PID buffers.
2623 *
2624 * Return 0 on success else a negative value.
2625 */
2626 static int create_channel_per_pid(struct ust_app *app,
2627 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2628 struct ust_app_channel *ua_chan)
2629 {
2630 int ret;
2631 struct ust_registry_session *registry;
2632
2633 assert(app);
2634 assert(usess);
2635 assert(ua_sess);
2636 assert(ua_chan);
2637
2638 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2639
2640 rcu_read_lock();
2641
2642 registry = get_session_registry(ua_sess);
2643 assert(registry);
2644
2645 /* Create and add a new channel registry to session. */
2646 ret = ust_registry_channel_add(registry, ua_chan->key);
2647 if (ret < 0) {
2648 ERR("Error creating the UST channel \"%s\" registry instance",
2649 ua_chan->name);
2650 goto error;
2651 }
2652
2653 /* Create and get channel on the consumer side. */
2654 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2655 app->bits_per_long, registry);
2656 if (ret < 0) {
2657 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2658 ua_chan->name);
2659 goto error;
2660 }
2661
2662 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2663 if (ret < 0) {
2664 if (ret != -ENOTCONN) {
2665 ERR("Error sending channel to application");
2666 }
2667 goto error;
2668 }
2669
2670 error:
2671 rcu_read_unlock();
2672 return ret;
2673 }
2674
2675 /*
2676 * From an already allocated ust app channel, create the channel buffers if
2677 * need and send it to the application. This MUST be called with a RCU read
2678 * side lock acquired.
2679 *
2680 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2681 * the application exited concurrently.
2682 */
2683 static int do_create_channel(struct ust_app *app,
2684 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2685 struct ust_app_channel *ua_chan)
2686 {
2687 int ret;
2688
2689 assert(app);
2690 assert(usess);
2691 assert(ua_sess);
2692 assert(ua_chan);
2693
2694 /* Handle buffer type before sending the channel to the application. */
2695 switch (usess->buffer_type) {
2696 case LTTNG_BUFFER_PER_UID:
2697 {
2698 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2699 if (ret < 0) {
2700 goto error;
2701 }
2702 break;
2703 }
2704 case LTTNG_BUFFER_PER_PID:
2705 {
2706 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2707 if (ret < 0) {
2708 goto error;
2709 }
2710 break;
2711 }
2712 default:
2713 assert(0);
2714 ret = -EINVAL;
2715 goto error;
2716 }
2717
2718 /* Initialize ust objd object using the received handle and add it. */
2719 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2720 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2721
2722 /* If channel is not enabled, disable it on the tracer */
2723 if (!ua_chan->enabled) {
2724 ret = disable_ust_channel(app, ua_sess, ua_chan);
2725 if (ret < 0) {
2726 goto error;
2727 }
2728 }
2729
2730 error:
2731 return ret;
2732 }
2733
2734 /*
2735 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2736 * newly created channel if not NULL.
2737 *
2738 * Called with UST app session lock and RCU read-side lock held.
2739 *
2740 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2741 * the application exited concurrently.
2742 */
2743 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2744 struct ltt_ust_channel *uchan, struct ust_app *app,
2745 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2746 struct ust_app_channel **ua_chanp)
2747 {
2748 int ret = 0;
2749 struct lttng_ht_iter iter;
2750 struct lttng_ht_node_str *ua_chan_node;
2751 struct ust_app_channel *ua_chan;
2752
2753 /* Lookup channel in the ust app session */
2754 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2755 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2756 if (ua_chan_node != NULL) {
2757 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2758 goto end;
2759 }
2760
2761 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2762 if (ua_chan == NULL) {
2763 /* Only malloc can fail here */
2764 ret = -ENOMEM;
2765 goto error_alloc;
2766 }
2767 shadow_copy_channel(ua_chan, uchan);
2768
2769 /* Set channel type. */
2770 ua_chan->attr.type = type;
2771
2772 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2773 if (ret < 0) {
2774 goto error;
2775 }
2776
2777 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2778 app->pid);
2779
2780 /* Only add the channel if successful on the tracer side. */
2781 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2782
2783 end:
2784 if (ua_chanp) {
2785 *ua_chanp = ua_chan;
2786 }
2787
2788 /* Everything went well. */
2789 return 0;
2790
2791 error:
2792 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2793 error_alloc:
2794 return ret;
2795 }
2796
2797 /*
2798 * Create UST app event and create it on the tracer side.
2799 *
2800 * Called with ust app session mutex held.
2801 */
2802 static
2803 int create_ust_app_event(struct ust_app_session *ua_sess,
2804 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2805 struct ust_app *app)
2806 {
2807 int ret = 0;
2808 struct ust_app_event *ua_event;
2809
2810 /* Get event node */
2811 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2812 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2813 if (ua_event != NULL) {
2814 ret = -EEXIST;
2815 goto end;
2816 }
2817
2818 /* Does not exist so create one */
2819 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2820 if (ua_event == NULL) {
2821 /* Only malloc can failed so something is really wrong */
2822 ret = -ENOMEM;
2823 goto end;
2824 }
2825 shadow_copy_event(ua_event, uevent);
2826
2827 /* Create it on the tracer side */
2828 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2829 if (ret < 0) {
2830 /* Not found previously means that it does not exist on the tracer */
2831 assert(ret != -LTTNG_UST_ERR_EXIST);
2832 goto error;
2833 }
2834
2835 add_unique_ust_app_event(ua_chan, ua_event);
2836
2837 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2838 app->pid);
2839
2840 end:
2841 return ret;
2842
2843 error:
2844 /* Valid. Calling here is already in a read side lock */
2845 delete_ust_app_event(-1, ua_event);
2846 return ret;
2847 }
2848
2849 /*
2850 * Create UST metadata and open it on the tracer side.
2851 *
2852 * Called with UST app session lock held and RCU read side lock.
2853 */
2854 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2855 struct ust_app *app, struct consumer_output *consumer)
2856 {
2857 int ret = 0;
2858 struct ust_app_channel *metadata;
2859 struct consumer_socket *socket;
2860 struct ust_registry_session *registry;
2861
2862 assert(ua_sess);
2863 assert(app);
2864 assert(consumer);
2865
2866 registry = get_session_registry(ua_sess);
2867 assert(registry);
2868
2869 pthread_mutex_lock(&registry->lock);
2870
2871 /* Metadata already exists for this registry or it was closed previously */
2872 if (registry->metadata_key || registry->metadata_closed) {
2873 ret = 0;
2874 goto error;
2875 }
2876
2877 /* Allocate UST metadata */
2878 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2879 if (!metadata) {
2880 /* malloc() failed */
2881 ret = -ENOMEM;
2882 goto error;
2883 }
2884
2885 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
2886
2887 /* Need one fd for the channel. */
2888 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2889 if (ret < 0) {
2890 ERR("Exhausted number of available FD upon create metadata");
2891 goto error;
2892 }
2893
2894 /* Get the right consumer socket for the application. */
2895 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2896 if (!socket) {
2897 ret = -EINVAL;
2898 goto error_consumer;
2899 }
2900
2901 /*
2902 * Keep metadata key so we can identify it on the consumer side. Assign it
2903 * to the registry *before* we ask the consumer so we avoid the race of the
2904 * consumer requesting the metadata and the ask_channel call on our side
2905 * did not returned yet.
2906 */
2907 registry->metadata_key = metadata->key;
2908
2909 /*
2910 * Ask the metadata channel creation to the consumer. The metadata object
2911 * will be created by the consumer and kept their. However, the stream is
2912 * never added or monitored until we do a first push metadata to the
2913 * consumer.
2914 */
2915 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2916 registry);
2917 if (ret < 0) {
2918 /* Nullify the metadata key so we don't try to close it later on. */
2919 registry->metadata_key = 0;
2920 goto error_consumer;
2921 }
2922
2923 /*
2924 * The setup command will make the metadata stream be sent to the relayd,
2925 * if applicable, and the thread managing the metadatas. This is important
2926 * because after this point, if an error occurs, the only way the stream
2927 * can be deleted is to be monitored in the consumer.
2928 */
2929 ret = consumer_setup_metadata(socket, metadata->key);
2930 if (ret < 0) {
2931 /* Nullify the metadata key so we don't try to close it later on. */
2932 registry->metadata_key = 0;
2933 goto error_consumer;
2934 }
2935
2936 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2937 metadata->key, app->pid);
2938
2939 error_consumer:
2940 lttng_fd_put(LTTNG_FD_APPS, 1);
2941 delete_ust_app_channel(-1, metadata, app);
2942 error:
2943 pthread_mutex_unlock(&registry->lock);
2944 return ret;
2945 }
2946
2947 /*
2948 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2949 * acquired before calling this function.
2950 */
2951 struct ust_app *ust_app_find_by_pid(pid_t pid)
2952 {
2953 struct ust_app *app = NULL;
2954 struct lttng_ht_node_ulong *node;
2955 struct lttng_ht_iter iter;
2956
2957 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2958 node = lttng_ht_iter_get_node_ulong(&iter);
2959 if (node == NULL) {
2960 DBG2("UST app no found with pid %d", pid);
2961 goto error;
2962 }
2963
2964 DBG2("Found UST app by pid %d", pid);
2965
2966 app = caa_container_of(node, struct ust_app, pid_n);
2967
2968 error:
2969 return app;
2970 }
2971
2972 /*
2973 * Allocate and init an UST app object using the registration information and
2974 * the command socket. This is called when the command socket connects to the
2975 * session daemon.
2976 *
2977 * The object is returned on success or else NULL.
2978 */
2979 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2980 {
2981 struct ust_app *lta = NULL;
2982
2983 assert(msg);
2984 assert(sock >= 0);
2985
2986 DBG3("UST app creating application for socket %d", sock);
2987
2988 if ((msg->bits_per_long == 64 &&
2989 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2990 || (msg->bits_per_long == 32 &&
2991 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2992 ERR("Registration failed: application \"%s\" (pid: %d) has "
2993 "%d-bit long, but no consumerd for this size is available.\n",
2994 msg->name, msg->pid, msg->bits_per_long);
2995 goto error;
2996 }
2997
2998 lta = zmalloc(sizeof(struct ust_app));
2999 if (lta == NULL) {
3000 PERROR("malloc");
3001 goto error;
3002 }
3003
3004 lta->ppid = msg->ppid;
3005 lta->uid = msg->uid;
3006 lta->gid = msg->gid;
3007
3008 lta->bits_per_long = msg->bits_per_long;
3009 lta->uint8_t_alignment = msg->uint8_t_alignment;
3010 lta->uint16_t_alignment = msg->uint16_t_alignment;
3011 lta->uint32_t_alignment = msg->uint32_t_alignment;
3012 lta->uint64_t_alignment = msg->uint64_t_alignment;
3013 lta->long_alignment = msg->long_alignment;
3014 lta->byte_order = msg->byte_order;
3015
3016 lta->v_major = msg->major;
3017 lta->v_minor = msg->minor;
3018 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3019 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3020 lta->notify_sock = -1;
3021
3022 /* Copy name and make sure it's NULL terminated. */
3023 strncpy(lta->name, msg->name, sizeof(lta->name));
3024 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3025
3026 /*
3027 * Before this can be called, when receiving the registration information,
3028 * the application compatibility is checked. So, at this point, the
3029 * application can work with this session daemon.
3030 */
3031 lta->compatible = 1;
3032
3033 lta->pid = msg->pid;
3034 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3035 lta->sock = sock;
3036 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3037
3038 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3039
3040 error:
3041 return lta;
3042 }
3043
3044 /*
3045 * For a given application object, add it to every hash table.
3046 */
3047 void ust_app_add(struct ust_app *app)
3048 {
3049 assert(app);
3050 assert(app->notify_sock >= 0);
3051
3052 rcu_read_lock();
3053
3054 /*
3055 * On a re-registration, we want to kick out the previous registration of
3056 * that pid
3057 */
3058 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3059
3060 /*
3061 * The socket _should_ be unique until _we_ call close. So, a add_unique
3062 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3063 * already in the table.
3064 */
3065 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3066
3067 /* Add application to the notify socket hash table. */
3068 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3069 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3070
3071 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3072 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3073 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3074 app->v_minor);
3075
3076 rcu_read_unlock();
3077 }
3078
3079 /*
3080 * Set the application version into the object.
3081 *
3082 * Return 0 on success else a negative value either an errno code or a
3083 * LTTng-UST error code.
3084 */
3085 int ust_app_version(struct ust_app *app)
3086 {
3087 int ret;
3088
3089 assert(app);
3090
3091 ret = ustctl_tracer_version(app->sock, &app->version);
3092 if (ret < 0) {
3093 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3094 ERR("UST app %d version failed with ret %d", app->sock, ret);
3095 } else {
3096 DBG3("UST app %d version failed. Application is dead", app->sock);
3097 }
3098 }
3099
3100 return ret;
3101 }
3102
3103 /*
3104 * Unregister app by removing it from the global traceable app list and freeing
3105 * the data struct.
3106 *
3107 * The socket is already closed at this point so no close to sock.
3108 */
3109 void ust_app_unregister(int sock)
3110 {
3111 struct ust_app *lta;
3112 struct lttng_ht_node_ulong *node;
3113 struct lttng_ht_iter ust_app_sock_iter;
3114 struct lttng_ht_iter iter;
3115 struct ust_app_session *ua_sess;
3116 int ret;
3117
3118 rcu_read_lock();
3119
3120 /* Get the node reference for a call_rcu */
3121 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3122 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3123 assert(node);
3124
3125 lta = caa_container_of(node, struct ust_app, sock_n);
3126 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3127
3128 /*
3129 * For per-PID buffers, perform "push metadata" and flush all
3130 * application streams before removing app from hash tables,
3131 * ensuring proper behavior of data_pending check.
3132 * Remove sessions so they are not visible during deletion.
3133 */
3134 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3135 node.node) {
3136 struct ust_registry_session *registry;
3137
3138 ret = lttng_ht_del(lta->sessions, &iter);
3139 if (ret) {
3140 /* The session was already removed so scheduled for teardown. */
3141 continue;
3142 }
3143
3144 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3145 (void) ust_app_flush_app_session(lta, ua_sess);
3146 }
3147
3148 /*
3149 * Add session to list for teardown. This is safe since at this point we
3150 * are the only one using this list.
3151 */
3152 pthread_mutex_lock(&ua_sess->lock);
3153
3154 if (ua_sess->deleted) {
3155 pthread_mutex_unlock(&ua_sess->lock);
3156 continue;
3157 }
3158
3159 /*
3160 * Normally, this is done in the delete session process which is
3161 * executed in the call rcu below. However, upon registration we can't
3162 * afford to wait for the grace period before pushing data or else the
3163 * data pending feature can race between the unregistration and stop
3164 * command where the data pending command is sent *before* the grace
3165 * period ended.
3166 *
3167 * The close metadata below nullifies the metadata pointer in the
3168 * session so the delete session will NOT push/close a second time.
3169 */
3170 registry = get_session_registry(ua_sess);
3171 if (registry) {
3172 /* Push metadata for application before freeing the application. */
3173 (void) push_metadata(registry, ua_sess->consumer);
3174
3175 /*
3176 * Don't ask to close metadata for global per UID buffers. Close
3177 * metadata only on destroy trace session in this case. Also, the
3178 * previous push metadata could have flag the metadata registry to
3179 * close so don't send a close command if closed.
3180 */
3181 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3182 /* And ask to close it for this session registry. */
3183 (void) close_metadata(registry, ua_sess->consumer);
3184 }
3185 }
3186 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3187
3188 pthread_mutex_unlock(&ua_sess->lock);
3189 }
3190
3191 /* Remove application from PID hash table */
3192 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3193 assert(!ret);
3194
3195 /*
3196 * Remove application from notify hash table. The thread handling the
3197 * notify socket could have deleted the node so ignore on error because
3198 * either way it's valid. The close of that socket is handled by the other
3199 * thread.
3200 */
3201 iter.iter.node = &lta->notify_sock_n.node;
3202 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3203
3204 /*
3205 * Ignore return value since the node might have been removed before by an
3206 * add replace during app registration because the PID can be reassigned by
3207 * the OS.
3208 */
3209 iter.iter.node = &lta->pid_n.node;
3210 ret = lttng_ht_del(ust_app_ht, &iter);
3211 if (ret) {
3212 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3213 lta->pid);
3214 }
3215
3216 /* Free memory */
3217 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3218
3219 rcu_read_unlock();
3220 return;
3221 }
3222
3223 /*
3224 * Fill events array with all events name of all registered apps.
3225 */
3226 int ust_app_list_events(struct lttng_event **events)
3227 {
3228 int ret, handle;
3229 size_t nbmem, count = 0;
3230 struct lttng_ht_iter iter;
3231 struct ust_app *app;
3232 struct lttng_event *tmp_event;
3233
3234 nbmem = UST_APP_EVENT_LIST_SIZE;
3235 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3236 if (tmp_event == NULL) {
3237 PERROR("zmalloc ust app events");
3238 ret = -ENOMEM;
3239 goto error;
3240 }
3241
3242 rcu_read_lock();
3243
3244 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3245 struct lttng_ust_tracepoint_iter uiter;
3246
3247 health_code_update();
3248
3249 if (!app->compatible) {
3250 /*
3251 * TODO: In time, we should notice the caller of this error by
3252 * telling him that this is a version error.
3253 */
3254 continue;
3255 }
3256 handle = ustctl_tracepoint_list(app->sock);
3257 if (handle < 0) {
3258 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3259 ERR("UST app list events getting handle failed for app pid %d",
3260 app->pid);
3261 }
3262 continue;
3263 }
3264
3265 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3266 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3267 /* Handle ustctl error. */
3268 if (ret < 0) {
3269 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3270 ERR("UST app tp list get failed for app %d with ret %d",
3271 app->sock, ret);
3272 } else {
3273 DBG3("UST app tp list get failed. Application is dead");
3274 /*
3275 * This is normal behavior, an application can die during the
3276 * creation process. Don't report an error so the execution can
3277 * continue normally. Continue normal execution.
3278 */
3279 break;
3280 }
3281 free(tmp_event);
3282 goto rcu_error;
3283 }
3284
3285 health_code_update();
3286 if (count >= nbmem) {
3287 /* In case the realloc fails, we free the memory */
3288 struct lttng_event *new_tmp_event;
3289 size_t new_nbmem;
3290
3291 new_nbmem = nbmem << 1;
3292 DBG2("Reallocating event list from %zu to %zu entries",
3293 nbmem, new_nbmem);
3294 new_tmp_event = realloc(tmp_event,
3295 new_nbmem * sizeof(struct lttng_event));
3296 if (new_tmp_event == NULL) {
3297 PERROR("realloc ust app events");
3298 free(tmp_event);
3299 ret = -ENOMEM;
3300 goto rcu_error;
3301 }
3302 /* Zero the new memory */
3303 memset(new_tmp_event + nbmem, 0,
3304 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3305 nbmem = new_nbmem;
3306 tmp_event = new_tmp_event;
3307 }
3308 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3309 tmp_event[count].loglevel = uiter.loglevel;
3310 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3311 tmp_event[count].pid = app->pid;
3312 tmp_event[count].enabled = -1;
3313 count++;
3314 }
3315 }
3316
3317 ret = count;
3318 *events = tmp_event;
3319
3320 DBG2("UST app list events done (%zu events)", count);
3321
3322 rcu_error:
3323 rcu_read_unlock();
3324 error:
3325 health_code_update();
3326 return ret;
3327 }
3328
3329 /*
3330 * Fill events array with all events name of all registered apps.
3331 */
3332 int ust_app_list_event_fields(struct lttng_event_field **fields)
3333 {
3334 int ret, handle;
3335 size_t nbmem, count = 0;
3336 struct lttng_ht_iter iter;
3337 struct ust_app *app;
3338 struct lttng_event_field *tmp_event;
3339
3340 nbmem = UST_APP_EVENT_LIST_SIZE;
3341 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3342 if (tmp_event == NULL) {
3343 PERROR("zmalloc ust app event fields");
3344 ret = -ENOMEM;
3345 goto error;
3346 }
3347
3348 rcu_read_lock();
3349
3350 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3351 struct lttng_ust_field_iter uiter;
3352
3353 health_code_update();
3354
3355 if (!app->compatible) {
3356 /*
3357 * TODO: In time, we should notice the caller of this error by
3358 * telling him that this is a version error.
3359 */
3360 continue;
3361 }
3362 handle = ustctl_tracepoint_field_list(app->sock);
3363 if (handle < 0) {
3364 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3365 ERR("UST app list field getting handle failed for app pid %d",
3366 app->pid);
3367 }
3368 continue;
3369 }
3370
3371 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3372 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3373 /* Handle ustctl error. */
3374 if (ret < 0) {
3375 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3376 ERR("UST app tp list field failed for app %d with ret %d",
3377 app->sock, ret);
3378 } else {
3379 DBG3("UST app tp list field failed. Application is dead");
3380 /*
3381 * This is normal behavior, an application can die during the
3382 * creation process. Don't report an error so the execution can
3383 * continue normally. Reset list and count for next app.
3384 */
3385 break;
3386 }
3387 free(tmp_event);
3388 goto rcu_error;
3389 }
3390
3391 health_code_update();
3392 if (count >= nbmem) {
3393 /* In case the realloc fails, we free the memory */
3394 struct lttng_event_field *new_tmp_event;
3395 size_t new_nbmem;
3396
3397 new_nbmem = nbmem << 1;
3398 DBG2("Reallocating event field list from %zu to %zu entries",
3399 nbmem, new_nbmem);
3400 new_tmp_event = realloc(tmp_event,
3401 new_nbmem * sizeof(struct lttng_event_field));
3402 if (new_tmp_event == NULL) {
3403 PERROR("realloc ust app event fields");
3404 free(tmp_event);
3405 ret = -ENOMEM;
3406 goto rcu_error;
3407 }
3408 /* Zero the new memory */
3409 memset(new_tmp_event + nbmem, 0,
3410 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3411 nbmem = new_nbmem;
3412 tmp_event = new_tmp_event;
3413 }
3414
3415 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3416 /* Mapping between these enums matches 1 to 1. */
3417 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3418 tmp_event[count].nowrite = uiter.nowrite;
3419
3420 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3421 tmp_event[count].event.loglevel = uiter.loglevel;
3422 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3423 tmp_event[count].event.pid = app->pid;
3424 tmp_event[count].event.enabled = -1;
3425 count++;
3426 }
3427 }
3428
3429 ret = count;
3430 *fields = tmp_event;
3431
3432 DBG2("UST app list event fields done (%zu events)", count);
3433
3434 rcu_error:
3435 rcu_read_unlock();
3436 error:
3437 health_code_update();
3438 return ret;
3439 }
3440
3441 /*
3442 * Free and clean all traceable apps of the global list.
3443 *
3444 * Should _NOT_ be called with RCU read-side lock held.
3445 */
3446 void ust_app_clean_list(void)
3447 {
3448 int ret;
3449 struct ust_app *app;
3450 struct lttng_ht_iter iter;
3451
3452 DBG2("UST app cleaning registered apps hash table");
3453
3454 rcu_read_lock();
3455
3456 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3457 ret = lttng_ht_del(ust_app_ht, &iter);
3458 assert(!ret);
3459 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3460 }
3461
3462 /* Cleanup socket hash table */
3463 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3464 sock_n.node) {
3465 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3466 assert(!ret);
3467 }
3468
3469 /* Cleanup notify socket hash table */
3470 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3471 notify_sock_n.node) {
3472 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3473 assert(!ret);
3474 }
3475 rcu_read_unlock();
3476
3477 /* Destroy is done only when the ht is empty */
3478 ht_cleanup_push(ust_app_ht);
3479 ht_cleanup_push(ust_app_ht_by_sock);
3480 ht_cleanup_push(ust_app_ht_by_notify_sock);
3481 }
3482
3483 /*
3484 * Init UST app hash table.
3485 */
3486 void ust_app_ht_alloc(void)
3487 {
3488 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3489 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3490 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3491 }
3492
3493 /*
3494 * For a specific UST session, disable the channel for all registered apps.
3495 */
3496 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3497 struct ltt_ust_channel *uchan)
3498 {
3499 int ret = 0;
3500 struct lttng_ht_iter iter;
3501 struct lttng_ht_node_str *ua_chan_node;
3502 struct ust_app *app;
3503 struct ust_app_session *ua_sess;
3504 struct ust_app_channel *ua_chan;
3505
3506 if (usess == NULL || uchan == NULL) {
3507 ERR("Disabling UST global channel with NULL values");
3508 ret = -1;
3509 goto error;
3510 }
3511
3512 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3513 uchan->name, usess->id);
3514
3515 rcu_read_lock();
3516
3517 /* For every registered applications */
3518 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3519 struct lttng_ht_iter uiter;
3520 if (!app->compatible) {
3521 /*
3522 * TODO: In time, we should notice the caller of this error by
3523 * telling him that this is a version error.
3524 */
3525 continue;
3526 }
3527 ua_sess = lookup_session_by_app(usess, app);
3528 if (ua_sess == NULL) {
3529 continue;
3530 }
3531
3532 /* Get channel */
3533 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3534 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3535 /* If the session if found for the app, the channel must be there */
3536 assert(ua_chan_node);
3537
3538 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3539 /* The channel must not be already disabled */
3540 assert(ua_chan->enabled == 1);
3541
3542 /* Disable channel onto application */
3543 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3544 if (ret < 0) {
3545 /* XXX: We might want to report this error at some point... */
3546 continue;
3547 }
3548 }
3549
3550 rcu_read_unlock();
3551
3552 error:
3553 return ret;
3554 }
3555
3556 /*
3557 * For a specific UST session, enable the channel for all registered apps.
3558 */
3559 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3560 struct ltt_ust_channel *uchan)
3561 {
3562 int ret = 0;
3563 struct lttng_ht_iter iter;
3564 struct ust_app *app;
3565 struct ust_app_session *ua_sess;
3566
3567 if (usess == NULL || uchan == NULL) {
3568 ERR("Adding UST global channel to NULL values");
3569 ret = -1;
3570 goto error;
3571 }
3572
3573 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3574 uchan->name, usess->id);
3575
3576 rcu_read_lock();
3577
3578 /* For every registered applications */
3579 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3580 if (!app->compatible) {
3581 /*
3582 * TODO: In time, we should notice the caller of this error by
3583 * telling him that this is a version error.
3584 */
3585 continue;
3586 }
3587 ua_sess = lookup_session_by_app(usess, app);
3588 if (ua_sess == NULL) {
3589 continue;
3590 }
3591
3592 /* Enable channel onto application */
3593 ret = enable_ust_app_channel(ua_sess, uchan, app);
3594 if (ret < 0) {
3595 /* XXX: We might want to report this error at some point... */
3596 continue;
3597 }
3598 }
3599
3600 rcu_read_unlock();
3601
3602 error:
3603 return ret;
3604 }
3605
3606 /*
3607 * Disable an event in a channel and for a specific session.
3608 */
3609 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3610 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3611 {
3612 int ret = 0;
3613 struct lttng_ht_iter iter, uiter;
3614 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3615 struct ust_app *app;
3616 struct ust_app_session *ua_sess;
3617 struct ust_app_channel *ua_chan;
3618 struct ust_app_event *ua_event;
3619
3620 DBG("UST app disabling event %s for all apps in channel "
3621 "%s for session id %" PRIu64,
3622 uevent->attr.name, uchan->name, usess->id);
3623
3624 rcu_read_lock();
3625
3626 /* For all registered applications */
3627 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3628 if (!app->compatible) {
3629 /*
3630 * TODO: In time, we should notice the caller of this error by
3631 * telling him that this is a version error.
3632 */
3633 continue;
3634 }
3635 ua_sess = lookup_session_by_app(usess, app);
3636 if (ua_sess == NULL) {
3637 /* Next app */
3638 continue;
3639 }
3640
3641 /* Lookup channel in the ust app session */
3642 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3643 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3644 if (ua_chan_node == NULL) {
3645 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3646 "Skipping", uchan->name, usess->id, app->pid);
3647 continue;
3648 }
3649 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3650
3651 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3652 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3653 if (ua_event_node == NULL) {
3654 DBG2("Event %s not found in channel %s for app pid %d."
3655 "Skipping", uevent->attr.name, uchan->name, app->pid);
3656 continue;
3657 }
3658 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3659
3660 ret = disable_ust_app_event(ua_sess, ua_event, app);
3661 if (ret < 0) {
3662 /* XXX: Report error someday... */
3663 continue;
3664 }
3665 }
3666
3667 rcu_read_unlock();
3668
3669 return ret;
3670 }
3671
3672 /*
3673 * For a specific UST session, create the channel for all registered apps.
3674 */
3675 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3676 struct ltt_ust_channel *uchan)
3677 {
3678 int ret = 0, created;
3679 struct lttng_ht_iter iter;
3680 struct ust_app *app;
3681 struct ust_app_session *ua_sess = NULL;
3682
3683 /* Very wrong code flow */
3684 assert(usess);
3685 assert(uchan);
3686
3687 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3688 uchan->name, usess->id);
3689
3690 rcu_read_lock();
3691
3692 /* For every registered applications */
3693 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3694 if (!app->compatible) {
3695 /*
3696 * TODO: In time, we should notice the caller of this error by
3697 * telling him that this is a version error.
3698 */
3699 continue;
3700 }
3701 /*
3702 * Create session on the tracer side and add it to app session HT. Note
3703 * that if session exist, it will simply return a pointer to the ust
3704 * app session.
3705 */
3706 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3707 if (ret < 0) {
3708 switch (ret) {
3709 case -ENOTCONN:
3710 /*
3711 * The application's socket is not valid. Either a bad socket
3712 * or a timeout on it. We can't inform the caller that for a
3713 * specific app, the session failed so lets continue here.
3714 */
3715 ret = 0; /* Not an error. */
3716 continue;
3717 case -ENOMEM:
3718 default:
3719 goto error_rcu_unlock;
3720 }
3721 }
3722 assert(ua_sess);
3723
3724 pthread_mutex_lock(&ua_sess->lock);
3725
3726 if (ua_sess->deleted) {
3727 pthread_mutex_unlock(&ua_sess->lock);
3728 continue;
3729 }
3730
3731 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3732 sizeof(uchan->name))) {
3733 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3734 ret = 0;
3735 } else {
3736 /* Create channel onto application. We don't need the chan ref. */
3737 ret = create_ust_app_channel(ua_sess, uchan, app,
3738 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3739 }
3740 pthread_mutex_unlock(&ua_sess->lock);
3741 if (ret < 0) {
3742 /* Cleanup the created session if it's the case. */
3743 if (created) {
3744 destroy_app_session(app, ua_sess);
3745 }
3746 switch (ret) {
3747 case -ENOTCONN:
3748 /*
3749 * The application's socket is not valid. Either a bad socket
3750 * or a timeout on it. We can't inform the caller that for a
3751 * specific app, the session failed so lets continue here.
3752 */
3753 ret = 0; /* Not an error. */
3754 continue;
3755 case -ENOMEM:
3756 default:
3757 goto error_rcu_unlock;
3758 }
3759 }
3760 }
3761
3762 error_rcu_unlock:
3763 rcu_read_unlock();
3764 return ret;
3765 }
3766
3767 /*
3768 * Enable event for a specific session and channel on the tracer.
3769 */
3770 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3771 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3772 {
3773 int ret = 0;
3774 struct lttng_ht_iter iter, uiter;
3775 struct lttng_ht_node_str *ua_chan_node;
3776 struct ust_app *app;
3777 struct ust_app_session *ua_sess;
3778 struct ust_app_channel *ua_chan;
3779 struct ust_app_event *ua_event;
3780
3781 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3782 uevent->attr.name, usess->id);
3783
3784 /*
3785 * NOTE: At this point, this function is called only if the session and
3786 * channel passed are already created for all apps. and enabled on the
3787 * tracer also.
3788 */
3789
3790 rcu_read_lock();
3791
3792 /* For all registered applications */
3793 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3794 if (!app->compatible) {
3795 /*
3796 * TODO: In time, we should notice the caller of this error by
3797 * telling him that this is a version error.
3798 */
3799 continue;
3800 }
3801 ua_sess = lookup_session_by_app(usess, app);
3802 if (!ua_sess) {
3803 /* The application has problem or is probably dead. */
3804 continue;
3805 }
3806
3807 pthread_mutex_lock(&ua_sess->lock);
3808
3809 if (ua_sess->deleted) {
3810 pthread_mutex_unlock(&ua_sess->lock);
3811 continue;
3812 }
3813
3814 /* Lookup channel in the ust app session */
3815 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3816 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3817 /*
3818 * It is possible that the channel cannot be found is
3819 * the channel/event creation occurs concurrently with
3820 * an application exit.
3821 */
3822 if (!ua_chan_node) {
3823 pthread_mutex_unlock(&ua_sess->lock);
3824 continue;
3825 }
3826
3827 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3828
3829 /* Get event node */
3830 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3831 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3832 if (ua_event == NULL) {
3833 DBG3("UST app enable event %s not found for app PID %d."
3834 "Skipping app", uevent->attr.name, app->pid);
3835 goto next_app;
3836 }
3837
3838 ret = enable_ust_app_event(ua_sess, ua_event, app);
3839 if (ret < 0) {
3840 pthread_mutex_unlock(&ua_sess->lock);
3841 goto error;
3842 }
3843 next_app:
3844 pthread_mutex_unlock(&ua_sess->lock);
3845 }
3846
3847 error:
3848 rcu_read_unlock();
3849 return ret;
3850 }
3851
3852 /*
3853 * For a specific existing UST session and UST channel, creates the event for
3854 * all registered apps.
3855 */
3856 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3857 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3858 {
3859 int ret = 0;
3860 struct lttng_ht_iter iter, uiter;
3861 struct lttng_ht_node_str *ua_chan_node;
3862 struct ust_app *app;
3863 struct ust_app_session *ua_sess;
3864 struct ust_app_channel *ua_chan;
3865
3866 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3867 uevent->attr.name, usess->id);
3868
3869 rcu_read_lock();
3870
3871 /* For all registered applications */
3872 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3873 if (!app->compatible) {
3874 /*
3875 * TODO: In time, we should notice the caller of this error by
3876 * telling him that this is a version error.
3877 */
3878 continue;
3879 }
3880 ua_sess = lookup_session_by_app(usess, app);
3881 if (!ua_sess) {
3882 /* The application has problem or is probably dead. */
3883 continue;
3884 }
3885
3886 pthread_mutex_lock(&ua_sess->lock);
3887
3888 if (ua_sess->deleted) {
3889 pthread_mutex_unlock(&ua_sess->lock);
3890 continue;
3891 }
3892
3893 /* Lookup channel in the ust app session */
3894 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3895 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3896 /* If the channel is not found, there is a code flow error */
3897 assert(ua_chan_node);
3898
3899 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3900
3901 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3902 pthread_mutex_unlock(&ua_sess->lock);
3903 if (ret < 0) {
3904 if (ret != -LTTNG_UST_ERR_EXIST) {
3905 /* Possible value at this point: -ENOMEM. If so, we stop! */
3906 break;
3907 }
3908 DBG2("UST app event %s already exist on app PID %d",
3909 uevent->attr.name, app->pid);
3910 continue;
3911 }
3912 }
3913
3914 rcu_read_unlock();
3915
3916 return ret;
3917 }
3918
3919 /*
3920 * Start tracing for a specific UST session and app.
3921 */
3922 static
3923 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3924 {
3925 int ret = 0;
3926 struct ust_app_session *ua_sess;
3927
3928 DBG("Starting tracing for ust app pid %d", app->pid);
3929
3930 rcu_read_lock();
3931
3932 if (!app->compatible) {
3933 goto end;
3934 }
3935
3936 ua_sess = lookup_session_by_app(usess, app);
3937 if (ua_sess == NULL) {
3938 /* The session is in teardown process. Ignore and continue. */
3939 goto end;
3940 }
3941
3942 pthread_mutex_lock(&ua_sess->lock);
3943
3944 if (ua_sess->deleted) {
3945 pthread_mutex_unlock(&ua_sess->lock);
3946 goto end;
3947 }
3948
3949 /* Upon restart, we skip the setup, already done */
3950 if (ua_sess->started) {
3951 goto skip_setup;
3952 }
3953
3954 /* Create directories if consumer is LOCAL and has a path defined. */
3955 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3956 strlen(usess->consumer->dst.trace_path) > 0) {
3957 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3958 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3959 if (ret < 0) {
3960 if (ret != -EEXIST) {
3961 ERR("Trace directory creation error");
3962 goto error_unlock;
3963 }
3964 }
3965 }
3966
3967 /*
3968 * Create the metadata for the application. This returns gracefully if a
3969 * metadata was already set for the session.
3970 */
3971 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
3972 if (ret < 0) {
3973 goto error_unlock;
3974 }
3975
3976 health_code_update();
3977
3978 skip_setup:
3979 /* This start the UST tracing */
3980 ret = ustctl_start_session(app->sock, ua_sess->handle);
3981 if (ret < 0) {
3982 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3983 ERR("Error starting tracing for app pid: %d (ret: %d)",
3984 app->pid, ret);
3985 } else {
3986 DBG("UST app start session failed. Application is dead.");
3987 /*
3988 * This is normal behavior, an application can die during the
3989 * creation process. Don't report an error so the execution can
3990 * continue normally.
3991 */
3992 pthread_mutex_unlock(&ua_sess->lock);
3993 goto end;
3994 }
3995 goto error_unlock;
3996 }
3997
3998 /* Indicate that the session has been started once */
3999 ua_sess->started = 1;
4000
4001 pthread_mutex_unlock(&ua_sess->lock);
4002
4003 health_code_update();
4004
4005 /* Quiescent wait after starting trace */
4006 ret = ustctl_wait_quiescent(app->sock);
4007 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4008 ERR("UST app wait quiescent failed for app pid %d ret %d",
4009 app->pid, ret);
4010 }
4011
4012 end:
4013 rcu_read_unlock();
4014 health_code_update();
4015 return 0;
4016
4017 error_unlock:
4018 pthread_mutex_unlock(&ua_sess->lock);
4019 rcu_read_unlock();
4020 health_code_update();
4021 return -1;
4022 }
4023
4024 /*
4025 * Stop tracing for a specific UST session and app.
4026 */
4027 static
4028 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4029 {
4030 int ret = 0;
4031 struct ust_app_session *ua_sess;
4032 struct ust_registry_session *registry;
4033
4034 DBG("Stopping tracing for ust app pid %d", app->pid);
4035
4036 rcu_read_lock();
4037
4038 if (!app->compatible) {
4039 goto end_no_session;
4040 }
4041
4042 ua_sess = lookup_session_by_app(usess, app);
4043 if (ua_sess == NULL) {
4044 goto end_no_session;
4045 }
4046
4047 pthread_mutex_lock(&ua_sess->lock);
4048
4049 if (ua_sess->deleted) {
4050 pthread_mutex_unlock(&ua_sess->lock);
4051 goto end_no_session;
4052 }
4053
4054 /*
4055 * If started = 0, it means that stop trace has been called for a session
4056 * that was never started. It's possible since we can have a fail start
4057 * from either the application manager thread or the command thread. Simply
4058 * indicate that this is a stop error.
4059 */
4060 if (!ua_sess->started) {
4061 goto error_rcu_unlock;
4062 }
4063
4064 health_code_update();
4065
4066 /* This inhibits UST tracing */
4067 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4068 if (ret < 0) {
4069 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4070 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4071 app->pid, ret);
4072 } else {
4073 DBG("UST app stop session failed. Application is dead.");
4074 /*
4075 * This is normal behavior, an application can die during the
4076 * creation process. Don't report an error so the execution can
4077 * continue normally.
4078 */
4079 goto end_unlock;
4080 }
4081 goto error_rcu_unlock;
4082 }
4083
4084 health_code_update();
4085
4086 /* Quiescent wait after stopping trace */
4087 ret = ustctl_wait_quiescent(app->sock);
4088 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4089 ERR("UST app wait quiescent failed for app pid %d ret %d",
4090 app->pid, ret);
4091 }
4092
4093 health_code_update();
4094
4095 registry = get_session_registry(ua_sess);
4096 assert(registry);
4097
4098 /* Push metadata for application before freeing the application. */
4099 (void) push_metadata(registry, ua_sess->consumer);
4100
4101 end_unlock:
4102 pthread_mutex_unlock(&ua_sess->lock);
4103 end_no_session:
4104 rcu_read_unlock();
4105 health_code_update();
4106 return 0;
4107
4108 error_rcu_unlock:
4109 pthread_mutex_unlock(&ua_sess->lock);
4110 rcu_read_unlock();
4111 health_code_update();
4112 return -1;
4113 }
4114
4115 static
4116 int ust_app_flush_app_session(struct ust_app *app,
4117 struct ust_app_session *ua_sess)
4118 {
4119 int ret, retval = 0;
4120 struct lttng_ht_iter iter;
4121 struct ust_app_channel *ua_chan;
4122 struct consumer_socket *socket;
4123
4124 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4125
4126 rcu_read_lock();
4127
4128 if (!app->compatible) {
4129 goto end_not_compatible;
4130 }
4131
4132 pthread_mutex_lock(&ua_sess->lock);
4133
4134 if (ua_sess->deleted) {
4135 goto end_deleted;
4136 }
4137
4138 health_code_update();
4139
4140 /* Flushing buffers */
4141 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4142 ua_sess->consumer);
4143
4144 /* Flush buffers and push metadata. */
4145 switch (ua_sess->buffer_type) {
4146 case LTTNG_BUFFER_PER_PID:
4147 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4148 node.node) {
4149 health_code_update();
4150 assert(ua_chan->is_sent);
4151 ret = consumer_flush_channel(socket, ua_chan->key);
4152 if (ret) {
4153 ERR("Error flushing consumer channel");
4154 retval = -1;
4155 continue;
4156 }
4157 }
4158 break;
4159 case LTTNG_BUFFER_PER_UID:
4160 default:
4161 assert(0);
4162 break;
4163 }
4164
4165 health_code_update();
4166
4167 end_deleted:
4168 pthread_mutex_unlock(&ua_sess->lock);
4169
4170 end_not_compatible:
4171 rcu_read_unlock();
4172 health_code_update();
4173 return retval;
4174 }
4175
4176 /*
4177 * Flush buffers for all applications for a specific UST session.
4178 * Called with UST session lock held.
4179 */
4180 static
4181 int ust_app_flush_session(struct ltt_ust_session *usess)
4182
4183 {
4184 int ret = 0;
4185
4186 DBG("Flushing session buffers for all ust apps");
4187
4188 rcu_read_lock();
4189
4190 /* Flush buffers and push metadata. */
4191 switch (usess->buffer_type) {
4192 case LTTNG_BUFFER_PER_UID:
4193 {
4194 struct buffer_reg_uid *reg;
4195 struct lttng_ht_iter iter;
4196
4197 /* Flush all per UID buffers associated to that session. */
4198 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4199 struct ust_registry_session *ust_session_reg;
4200 struct buffer_reg_channel *reg_chan;
4201 struct consumer_socket *socket;
4202
4203 /* Get consumer socket to use to push the metadata.*/
4204 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4205 usess->consumer);
4206 if (!socket) {
4207 /* Ignore request if no consumer is found for the session. */
4208 continue;
4209 }
4210
4211 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4212 reg_chan, node.node) {
4213 /*
4214 * The following call will print error values so the return
4215 * code is of little importance because whatever happens, we
4216 * have to try them all.
4217 */
4218 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4219 }
4220
4221 ust_session_reg = reg->registry->reg.ust;
4222 /* Push metadata. */
4223 (void) push_metadata(ust_session_reg, usess->consumer);
4224 }
4225 break;
4226 }
4227 case LTTNG_BUFFER_PER_PID:
4228 {
4229 struct ust_app_session *ua_sess;
4230 struct lttng_ht_iter iter;
4231 struct ust_app *app;
4232
4233 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4234 ua_sess = lookup_session_by_app(usess, app);
4235 if (ua_sess == NULL) {
4236 continue;
4237 }
4238 (void) ust_app_flush_app_session(app, ua_sess);
4239 }
4240 break;
4241 }
4242 default:
4243 ret = -1;
4244 assert(0);
4245 break;
4246 }
4247
4248 rcu_read_unlock();
4249 health_code_update();
4250 return ret;
4251 }
4252
4253 /*
4254 * Destroy a specific UST session in apps.
4255 */
4256 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4257 {
4258 int ret;
4259 struct ust_app_session *ua_sess;
4260 struct lttng_ht_iter iter;
4261 struct lttng_ht_node_u64 *node;
4262
4263 DBG("Destroy tracing for ust app pid %d", app->pid);
4264
4265 rcu_read_lock();
4266
4267 if (!app->compatible) {
4268 goto end;
4269 }
4270
4271 __lookup_session_by_app(usess, app, &iter);
4272 node = lttng_ht_iter_get_node_u64(&iter);
4273 if (node == NULL) {
4274 /* Session is being or is deleted. */
4275 goto end;
4276 }
4277 ua_sess = caa_container_of(node, struct ust_app_session, node);
4278
4279 health_code_update();
4280 destroy_app_session(app, ua_sess);
4281
4282 health_code_update();
4283
4284 /* Quiescent wait after stopping trace */
4285 ret = ustctl_wait_quiescent(app->sock);
4286 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4287 ERR("UST app wait quiescent failed for app pid %d ret %d",
4288 app->pid, ret);
4289 }
4290 end:
4291 rcu_read_unlock();
4292 health_code_update();
4293 return 0;
4294 }
4295
4296 /*
4297 * Start tracing for the UST session.
4298 */
4299 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4300 {
4301 int ret = 0;
4302 struct lttng_ht_iter iter;
4303 struct ust_app *app;
4304
4305 DBG("Starting all UST traces");
4306
4307 rcu_read_lock();
4308
4309 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4310 ret = ust_app_start_trace(usess, app);
4311 if (ret < 0) {
4312 /* Continue to next apps even on error */
4313 continue;
4314 }
4315 }
4316
4317 rcu_read_unlock();
4318
4319 return 0;
4320 }
4321
4322 /*
4323 * Start tracing for the UST session.
4324 * Called with UST session lock held.
4325 */
4326 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4327 {
4328 int ret = 0;
4329 struct lttng_ht_iter iter;
4330 struct ust_app *app;
4331
4332 DBG("Stopping all UST traces");
4333
4334 rcu_read_lock();
4335
4336 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4337 ret = ust_app_stop_trace(usess, app);
4338 if (ret < 0) {
4339 /* Continue to next apps even on error */
4340 continue;
4341 }
4342 }
4343
4344 (void) ust_app_flush_session(usess);
4345
4346 rcu_read_unlock();
4347
4348 return 0;
4349 }
4350
4351 /*
4352 * Destroy app UST session.
4353 */
4354 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4355 {
4356 int ret = 0;
4357 struct lttng_ht_iter iter;
4358 struct ust_app *app;
4359
4360 DBG("Destroy all UST traces");
4361
4362 rcu_read_lock();
4363
4364 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4365 ret = destroy_trace(usess, app);
4366 if (ret < 0) {
4367 /* Continue to next apps even on error */
4368 continue;
4369 }
4370 }
4371
4372 rcu_read_unlock();
4373
4374 return 0;
4375 }
4376
4377 /*
4378 * Add channels/events from UST global domain to registered apps at sock.
4379 */
4380 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4381 {
4382 int ret = 0;
4383 struct lttng_ht_iter iter, uiter;
4384 struct ust_app *app;
4385 struct ust_app_session *ua_sess = NULL;
4386 struct ust_app_channel *ua_chan;
4387 struct ust_app_event *ua_event;
4388 struct ust_app_ctx *ua_ctx;
4389
4390 assert(usess);
4391 assert(sock >= 0);
4392
4393 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4394 usess->id);
4395
4396 rcu_read_lock();
4397
4398 app = ust_app_find_by_sock(sock);
4399 if (app == NULL) {
4400 /*
4401 * Application can be unregistered before so this is possible hence
4402 * simply stopping the update.
4403 */
4404 DBG3("UST app update failed to find app sock %d", sock);
4405 goto error;
4406 }
4407
4408 if (!app->compatible) {
4409 goto error;
4410 }
4411
4412 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4413 if (ret < 0) {
4414 /* Tracer is probably gone or ENOMEM. */
4415 goto error;
4416 }
4417 assert(ua_sess);
4418
4419 pthread_mutex_lock(&ua_sess->lock);
4420
4421 if (ua_sess->deleted) {
4422 pthread_mutex_unlock(&ua_sess->lock);
4423 goto error;
4424 }
4425
4426 /*
4427 * We can iterate safely here over all UST app session since the create ust
4428 * app session above made a shadow copy of the UST global domain from the
4429 * ltt ust session.
4430 */
4431 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4432 node.node) {
4433 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4434 if (ret < 0 && ret != -ENOTCONN) {
4435 /*
4436 * Stop everything. On error, the application
4437 * failed, no more file descriptor are available
4438 * or ENOMEM so stopping here is the only thing
4439 * we can do for now. The only exception is
4440 * -ENOTCONN, which indicates that the application
4441 * has exit.
4442 */
4443 goto error_unlock;
4444 }
4445
4446 /*
4447 * Add context using the list so they are enabled in the same order the
4448 * user added them.
4449 */
4450 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4451 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4452 if (ret < 0) {
4453 goto error_unlock;
4454 }
4455 }
4456
4457
4458 /* For each events */
4459 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4460 node.node) {
4461 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4462 if (ret < 0) {
4463 goto error_unlock;
4464 }
4465 }
4466 }
4467
4468 pthread_mutex_unlock(&ua_sess->lock);
4469
4470 if (usess->active) {
4471 ret = ust_app_start_trace(usess, app);
4472 if (ret < 0) {
4473 goto error;
4474 }
4475
4476 DBG2("UST trace started for app pid %d", app->pid);
4477 }
4478
4479 /* Everything went well at this point. */
4480 rcu_read_unlock();
4481 return;
4482
4483 error_unlock:
4484 pthread_mutex_unlock(&ua_sess->lock);
4485 error:
4486 if (ua_sess) {
4487 destroy_app_session(app, ua_sess);
4488 }
4489 rcu_read_unlock();
4490 return;
4491 }
4492
4493 /*
4494 * Add context to a specific channel for global UST domain.
4495 */
4496 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4497 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4498 {
4499 int ret = 0;
4500 struct lttng_ht_node_str *ua_chan_node;
4501 struct lttng_ht_iter iter, uiter;
4502 struct ust_app_channel *ua_chan = NULL;
4503 struct ust_app_session *ua_sess;
4504 struct ust_app *app;
4505
4506 rcu_read_lock();
4507
4508 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4509 if (!app->compatible) {
4510 /*
4511 * TODO: In time, we should notice the caller of this error by
4512 * telling him that this is a version error.
4513 */
4514 continue;
4515 }
4516 ua_sess = lookup_session_by_app(usess, app);
4517 if (ua_sess == NULL) {
4518 continue;
4519 }
4520
4521 pthread_mutex_lock(&ua_sess->lock);
4522
4523 if (ua_sess->deleted) {
4524 pthread_mutex_unlock(&ua_sess->lock);
4525 continue;
4526 }
4527
4528 /* Lookup channel in the ust app session */
4529 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4530 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4531 if (ua_chan_node == NULL) {
4532 goto next_app;
4533 }
4534 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4535 node);
4536 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4537 if (ret < 0) {
4538 goto next_app;
4539 }
4540 next_app:
4541 pthread_mutex_unlock(&ua_sess->lock);
4542 }
4543
4544 rcu_read_unlock();
4545 return ret;
4546 }
4547
4548 /*
4549 * Enable event for a channel from a UST session for a specific PID.
4550 */
4551 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4552 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4553 {
4554 int ret = 0;
4555 struct lttng_ht_iter iter;
4556 struct lttng_ht_node_str *ua_chan_node;
4557 struct ust_app *app;
4558 struct ust_app_session *ua_sess;
4559 struct ust_app_channel *ua_chan;
4560 struct ust_app_event *ua_event;
4561
4562 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4563
4564 rcu_read_lock();
4565
4566 app = ust_app_find_by_pid(pid);
4567 if (app == NULL) {
4568 ERR("UST app enable event per PID %d not found", pid);
4569 ret = -1;
4570 goto end;
4571 }
4572
4573 if (!app->compatible) {
4574 ret = 0;
4575 goto end;
4576 }
4577
4578 ua_sess = lookup_session_by_app(usess, app);
4579 if (!ua_sess) {
4580 /* The application has problem or is probably dead. */
4581 ret = 0;
4582 goto end;
4583 }
4584
4585 pthread_mutex_lock(&ua_sess->lock);
4586
4587 if (ua_sess->deleted) {
4588 ret = 0;
4589 goto end_unlock;
4590 }
4591
4592 /* Lookup channel in the ust app session */
4593 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4594 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4595 /* If the channel is not found, there is a code flow error */
4596 assert(ua_chan_node);
4597
4598 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4599
4600 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4601 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4602 if (ua_event == NULL) {
4603 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4604 if (ret < 0) {
4605 goto end_unlock;
4606 }
4607 } else {
4608 ret = enable_ust_app_event(ua_sess, ua_event, app);
4609 if (ret < 0) {
4610 goto end_unlock;
4611 }
4612 }
4613
4614 end_unlock:
4615 pthread_mutex_unlock(&ua_sess->lock);
4616 end:
4617 rcu_read_unlock();
4618 return ret;
4619 }
4620
4621 /*
4622 * Calibrate registered applications.
4623 */
4624 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4625 {
4626 int ret = 0;
4627 struct lttng_ht_iter iter;
4628 struct ust_app *app;
4629
4630 rcu_read_lock();
4631
4632 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4633 if (!app->compatible) {
4634 /*
4635 * TODO: In time, we should notice the caller of this error by
4636 * telling him that this is a version error.
4637 */
4638 continue;
4639 }
4640
4641 health_code_update();
4642
4643 ret = ustctl_calibrate(app->sock, calibrate);
4644 if (ret < 0) {
4645 switch (ret) {
4646 case -ENOSYS:
4647 /* Means that it's not implemented on the tracer side. */
4648 ret = 0;
4649 break;
4650 default:
4651 DBG2("Calibrate app PID %d returned with error %d",
4652 app->pid, ret);
4653 break;
4654 }
4655 }
4656 }
4657
4658 DBG("UST app global domain calibration finished");
4659
4660 rcu_read_unlock();
4661
4662 health_code_update();
4663
4664 return ret;
4665 }
4666
4667 /*
4668 * Receive registration and populate the given msg structure.
4669 *
4670 * On success return 0 else a negative value returned by the ustctl call.
4671 */
4672 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4673 {
4674 int ret;
4675 uint32_t pid, ppid, uid, gid;
4676
4677 assert(msg);
4678
4679 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4680 &pid, &ppid, &uid, &gid,
4681 &msg->bits_per_long,
4682 &msg->uint8_t_alignment,
4683 &msg->uint16_t_alignment,
4684 &msg->uint32_t_alignment,
4685 &msg->uint64_t_alignment,
4686 &msg->long_alignment,
4687 &msg->byte_order,
4688 msg->name);
4689 if (ret < 0) {
4690 switch (-ret) {
4691 case EPIPE:
4692 case ECONNRESET:
4693 case LTTNG_UST_ERR_EXITING:
4694 DBG3("UST app recv reg message failed. Application died");
4695 break;
4696 case LTTNG_UST_ERR_UNSUP_MAJOR:
4697 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4698 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4699 LTTNG_UST_ABI_MINOR_VERSION);
4700 break;
4701 default:
4702 ERR("UST app recv reg message failed with ret %d", ret);
4703 break;
4704 }
4705 goto error;
4706 }
4707 msg->pid = (pid_t) pid;
4708 msg->ppid = (pid_t) ppid;
4709 msg->uid = (uid_t) uid;
4710 msg->gid = (gid_t) gid;
4711
4712 error:
4713 return ret;
4714 }
4715
4716 /*
4717 * Return a ust app channel object using the application object and the channel
4718 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4719 * lock MUST be acquired before calling this function.
4720 */
4721 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4722 int objd)
4723 {
4724 struct lttng_ht_node_ulong *node;
4725 struct lttng_ht_iter iter;
4726 struct ust_app_channel *ua_chan = NULL;
4727
4728 assert(app);
4729
4730 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4731 node = lttng_ht_iter_get_node_ulong(&iter);
4732 if (node == NULL) {
4733 DBG2("UST app channel find by objd %d not found", objd);
4734 goto error;
4735 }
4736
4737 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4738
4739 error:
4740 return ua_chan;
4741 }
4742
4743 /*
4744 * Reply to a register channel notification from an application on the notify
4745 * socket. The channel metadata is also created.
4746 *
4747 * The session UST registry lock is acquired in this function.
4748 *
4749 * On success 0 is returned else a negative value.
4750 */
4751 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4752 size_t nr_fields, struct ustctl_field *fields)
4753 {
4754 int ret, ret_code = 0;
4755 uint32_t chan_id, reg_count;
4756 uint64_t chan_reg_key;
4757 enum ustctl_channel_header type;
4758 struct ust_app *app;
4759 struct ust_app_channel *ua_chan;
4760 struct ust_app_session *ua_sess;
4761 struct ust_registry_session *registry;
4762 struct ust_registry_channel *chan_reg;
4763
4764 rcu_read_lock();
4765
4766 /* Lookup application. If not found, there is a code flow error. */
4767 app = find_app_by_notify_sock(sock);
4768 if (!app) {
4769 DBG("Application socket %d is being teardown. Abort event notify",
4770 sock);
4771 ret = 0;
4772 free(fields);
4773 goto error_rcu_unlock;
4774 }
4775
4776 /* Lookup channel by UST object descriptor. */
4777 ua_chan = find_channel_by_objd(app, cobjd);
4778 if (!ua_chan) {
4779 DBG("Application channel is being teardown. Abort event notify");
4780 ret = 0;
4781 free(fields);
4782 goto error_rcu_unlock;
4783 }
4784
4785 assert(ua_chan->session);
4786 ua_sess = ua_chan->session;
4787
4788 /* Get right session registry depending on the session buffer type. */
4789 registry = get_session_registry(ua_sess);
4790 assert(registry);
4791
4792 /* Depending on the buffer type, a different channel key is used. */
4793 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4794 chan_reg_key = ua_chan->tracing_channel_id;
4795 } else {
4796 chan_reg_key = ua_chan->key;
4797 }
4798
4799 pthread_mutex_lock(&registry->lock);
4800
4801 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4802 assert(chan_reg);
4803
4804 if (!chan_reg->register_done) {
4805 reg_count = ust_registry_get_event_count(chan_reg);
4806 if (reg_count < 31) {
4807 type = USTCTL_CHANNEL_HEADER_COMPACT;
4808 } else {
4809 type = USTCTL_CHANNEL_HEADER_LARGE;
4810 }
4811
4812 chan_reg->nr_ctx_fields = nr_fields;
4813 chan_reg->ctx_fields = fields;
4814 chan_reg->header_type = type;
4815 } else {
4816 /* Get current already assigned values. */
4817 type = chan_reg->header_type;
4818 free(fields);
4819 /* Set to NULL so the error path does not do a double free. */
4820 fields = NULL;
4821 }
4822 /* Channel id is set during the object creation. */
4823 chan_id = chan_reg->chan_id;
4824
4825 /* Append to metadata */
4826 if (!chan_reg->metadata_dumped) {
4827 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4828 if (ret_code) {
4829 ERR("Error appending channel metadata (errno = %d)", ret_code);
4830 goto reply;
4831 }
4832 }
4833
4834 reply:
4835 DBG3("UST app replying to register channel key %" PRIu64
4836 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4837 ret_code);
4838
4839 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4840 if (ret < 0) {
4841 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4842 ERR("UST app reply channel failed with ret %d", ret);
4843 } else {
4844 DBG3("UST app reply channel failed. Application died");
4845 }
4846 goto error;
4847 }
4848
4849 /* This channel registry registration is completed. */
4850 chan_reg->register_done = 1;
4851
4852 error:
4853 pthread_mutex_unlock(&registry->lock);
4854 error_rcu_unlock:
4855 rcu_read_unlock();
4856 if (ret) {
4857 free(fields);
4858 }
4859 return ret;
4860 }
4861
4862 /*
4863 * Add event to the UST channel registry. When the event is added to the
4864 * registry, the metadata is also created. Once done, this replies to the
4865 * application with the appropriate error code.
4866 *
4867 * The session UST registry lock is acquired in the function.
4868 *
4869 * On success 0 is returned else a negative value.
4870 */
4871 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4872 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4873 char *model_emf_uri)
4874 {
4875 int ret, ret_code;
4876 uint32_t event_id = 0;
4877 uint64_t chan_reg_key;
4878 struct ust_app *app;
4879 struct ust_app_channel *ua_chan;
4880 struct ust_app_session *ua_sess;
4881 struct ust_registry_session *registry;
4882
4883 rcu_read_lock();
4884
4885 /* Lookup application. If not found, there is a code flow error. */
4886 app = find_app_by_notify_sock(sock);
4887 if (!app) {
4888 DBG("Application socket %d is being teardown. Abort event notify",
4889 sock);
4890 ret = 0;
4891 free(sig);
4892 free(fields);
4893 free(model_emf_uri);
4894 goto error_rcu_unlock;
4895 }
4896
4897 /* Lookup channel by UST object descriptor. */
4898 ua_chan = find_channel_by_objd(app, cobjd);
4899 if (!ua_chan) {
4900 DBG("Application channel is being teardown. Abort event notify");
4901 ret = 0;
4902 free(sig);
4903 free(fields);
4904 free(model_emf_uri);
4905 goto error_rcu_unlock;
4906 }
4907
4908 assert(ua_chan->session);
4909 ua_sess = ua_chan->session;
4910
4911 registry = get_session_registry(ua_sess);
4912 assert(registry);
4913
4914 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4915 chan_reg_key = ua_chan->tracing_channel_id;
4916 } else {
4917 chan_reg_key = ua_chan->key;
4918 }
4919
4920 pthread_mutex_lock(&registry->lock);
4921
4922 /*
4923 * From this point on, this call acquires the ownership of the sig, fields
4924 * and model_emf_uri meaning any free are done inside it if needed. These
4925 * three variables MUST NOT be read/write after this.
4926 */
4927 ret_code = ust_registry_create_event(registry, chan_reg_key,
4928 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4929 model_emf_uri, ua_sess->buffer_type, &event_id,
4930 app);
4931
4932 /*
4933 * The return value is returned to ustctl so in case of an error, the
4934 * application can be notified. In case of an error, it's important not to
4935 * return a negative error or else the application will get closed.
4936 */
4937 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4938 if (ret < 0) {
4939 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4940 ERR("UST app reply event failed with ret %d", ret);
4941 } else {
4942 DBG3("UST app reply event failed. Application died");
4943 }
4944 /*
4945 * No need to wipe the create event since the application socket will
4946 * get close on error hence cleaning up everything by itself.
4947 */
4948 goto error;
4949 }
4950
4951 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4952 name, event_id);
4953
4954 error:
4955 pthread_mutex_unlock(&registry->lock);
4956 error_rcu_unlock:
4957 rcu_read_unlock();
4958 return ret;
4959 }
4960
4961 /*
4962 * Handle application notification through the given notify socket.
4963 *
4964 * Return 0 on success or else a negative value.
4965 */
4966 int ust_app_recv_notify(int sock)
4967 {
4968 int ret;
4969 enum ustctl_notify_cmd cmd;
4970
4971 DBG3("UST app receiving notify from sock %d", sock);
4972
4973 ret = ustctl_recv_notify(sock, &cmd);
4974 if (ret < 0) {
4975 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4976 ERR("UST app recv notify failed with ret %d", ret);
4977 } else {
4978 DBG3("UST app recv notify failed. Application died");
4979 }
4980 goto error;
4981 }
4982
4983 switch (cmd) {
4984 case USTCTL_NOTIFY_CMD_EVENT:
4985 {
4986 int sobjd, cobjd, loglevel;
4987 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4988 size_t nr_fields;
4989 struct ustctl_field *fields;
4990
4991 DBG2("UST app ustctl register event received");
4992
4993 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
4994 &sig, &nr_fields, &fields, &model_emf_uri);
4995 if (ret < 0) {
4996 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4997 ERR("UST app recv event failed with ret %d", ret);
4998 } else {
4999 DBG3("UST app recv event failed. Application died");
5000 }
5001 goto error;
5002 }
5003
5004 /*
5005 * Add event to the UST registry coming from the notify socket. This
5006 * call will free if needed the sig, fields and model_emf_uri. This
5007 * code path loses the ownsership of these variables and transfer them
5008 * to the this function.
5009 */
5010 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5011 fields, loglevel, model_emf_uri);
5012 if (ret < 0) {
5013 goto error;
5014 }
5015
5016 break;
5017 }
5018 case USTCTL_NOTIFY_CMD_CHANNEL:
5019 {
5020 int sobjd, cobjd;
5021 size_t nr_fields;
5022 struct ustctl_field *fields;
5023
5024 DBG2("UST app ustctl register channel received");
5025
5026 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5027 &fields);
5028 if (ret < 0) {
5029 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5030 ERR("UST app recv channel failed with ret %d", ret);
5031 } else {
5032 DBG3("UST app recv channel failed. Application died");
5033 }
5034 goto error;
5035 }
5036
5037 /*
5038 * The fields ownership are transfered to this function call meaning
5039 * that if needed it will be freed. After this, it's invalid to access
5040 * fields or clean it up.
5041 */
5042 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5043 fields);
5044 if (ret < 0) {
5045 goto error;
5046 }
5047
5048 break;
5049 }
5050 default:
5051 /* Should NEVER happen. */
5052 assert(0);
5053 }
5054
5055 error:
5056 return ret;
5057 }
5058
5059 /*
5060 * Once the notify socket hangs up, this is called. First, it tries to find the
5061 * corresponding application. On failure, the call_rcu to close the socket is
5062 * executed. If an application is found, it tries to delete it from the notify
5063 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5064 *
5065 * Note that an object needs to be allocated here so on ENOMEM failure, the
5066 * call RCU is not done but the rest of the cleanup is.
5067 */
5068 void ust_app_notify_sock_unregister(int sock)
5069 {
5070 int err_enomem = 0;
5071 struct lttng_ht_iter iter;
5072 struct ust_app *app;
5073 struct ust_app_notify_sock_obj *obj;
5074
5075 assert(sock >= 0);
5076
5077 rcu_read_lock();
5078
5079 obj = zmalloc(sizeof(*obj));
5080 if (!obj) {
5081 /*
5082 * An ENOMEM is kind of uncool. If this strikes we continue the
5083 * procedure but the call_rcu will not be called. In this case, we
5084 * accept the fd leak rather than possibly creating an unsynchronized
5085 * state between threads.
5086 *
5087 * TODO: The notify object should be created once the notify socket is
5088 * registered and stored independantely from the ust app object. The
5089 * tricky part is to synchronize the teardown of the application and
5090 * this notify object. Let's keep that in mind so we can avoid this
5091 * kind of shenanigans with ENOMEM in the teardown path.
5092 */
5093 err_enomem = 1;
5094 } else {
5095 obj->fd = sock;
5096 }
5097
5098 DBG("UST app notify socket unregister %d", sock);
5099
5100 /*
5101 * Lookup application by notify socket. If this fails, this means that the
5102 * hash table delete has already been done by the application
5103 * unregistration process so we can safely close the notify socket in a
5104 * call RCU.
5105 */
5106 app = find_app_by_notify_sock(sock);
5107 if (!app) {
5108 goto close_socket;
5109 }
5110
5111 iter.iter.node = &app->notify_sock_n.node;
5112
5113 /*
5114 * Whatever happens here either we fail or succeed, in both cases we have
5115 * to close the socket after a grace period to continue to the call RCU
5116 * here. If the deletion is successful, the application is not visible
5117 * anymore by other threads and is it fails it means that it was already
5118 * deleted from the hash table so either way we just have to close the
5119 * socket.
5120 */
5121 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5122
5123 close_socket:
5124 rcu_read_unlock();
5125
5126 /*
5127 * Close socket after a grace period to avoid for the socket to be reused
5128 * before the application object is freed creating potential race between
5129 * threads trying to add unique in the global hash table.
5130 */
5131 if (!err_enomem) {
5132 call_rcu(&obj->head, close_notify_sock_rcu);
5133 }
5134 }
5135
5136 /*
5137 * Destroy a ust app data structure and free its memory.
5138 */
5139 void ust_app_destroy(struct ust_app *app)
5140 {
5141 if (!app) {
5142 return;
5143 }
5144
5145 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5146 }
5147
5148 /*
5149 * Take a snapshot for a given UST session. The snapshot is sent to the given
5150 * output.
5151 *
5152 * Return 0 on success or else a negative value.
5153 */
5154 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5155 struct snapshot_output *output, int wait,
5156 uint64_t nb_packets_per_stream)
5157 {
5158 int ret = 0;
5159 unsigned int snapshot_done = 0;
5160 struct lttng_ht_iter iter;
5161 struct ust_app *app;
5162 char pathname[PATH_MAX];
5163
5164 assert(usess);
5165 assert(output);
5166
5167 rcu_read_lock();
5168
5169 switch (usess->buffer_type) {
5170 case LTTNG_BUFFER_PER_UID:
5171 {
5172 struct buffer_reg_uid *reg;
5173
5174 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5175 struct buffer_reg_channel *reg_chan;
5176 struct consumer_socket *socket;
5177
5178 /* Get consumer socket to use to push the metadata.*/
5179 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5180 usess->consumer);
5181 if (!socket) {
5182 ret = -EINVAL;
5183 goto error;
5184 }
5185
5186 memset(pathname, 0, sizeof(pathname));
5187 ret = snprintf(pathname, sizeof(pathname),
5188 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5189 reg->uid, reg->bits_per_long);
5190 if (ret < 0) {
5191 PERROR("snprintf snapshot path");
5192 goto error;
5193 }
5194
5195 /* Add the UST default trace dir to path. */
5196 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5197 reg_chan, node.node) {
5198 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5199 output, 0, usess->uid, usess->gid, pathname, wait,
5200 nb_packets_per_stream);
5201 if (ret < 0) {
5202 goto error;
5203 }
5204 }
5205 ret = consumer_snapshot_channel(socket,
5206 reg->registry->reg.ust->metadata_key, output, 1,
5207 usess->uid, usess->gid, pathname, wait, 0);
5208 if (ret < 0) {
5209 goto error;
5210 }
5211 snapshot_done = 1;
5212 }
5213 break;
5214 }
5215 case LTTNG_BUFFER_PER_PID:
5216 {
5217 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5218 struct consumer_socket *socket;
5219 struct lttng_ht_iter chan_iter;
5220 struct ust_app_channel *ua_chan;
5221 struct ust_app_session *ua_sess;
5222 struct ust_registry_session *registry;
5223
5224 ua_sess = lookup_session_by_app(usess, app);
5225 if (!ua_sess) {
5226 /* Session not associated with this app. */
5227 continue;
5228 }
5229
5230 /* Get the right consumer socket for the application. */
5231 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5232 output->consumer);
5233 if (!socket) {
5234 ret = -EINVAL;
5235 goto error;
5236 }
5237
5238 /* Add the UST default trace dir to path. */
5239 memset(pathname, 0, sizeof(pathname));
5240 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5241 ua_sess->path);
5242 if (ret < 0) {
5243 PERROR("snprintf snapshot path");
5244 goto error;
5245 }
5246
5247 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5248 ua_chan, node.node) {
5249 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5250 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5251 nb_packets_per_stream);
5252 if (ret < 0) {
5253 goto error;
5254 }
5255 }
5256
5257 registry = get_session_registry(ua_sess);
5258 assert(registry);
5259 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5260 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5261 if (ret < 0) {
5262 goto error;
5263 }
5264 snapshot_done = 1;
5265 }
5266 break;
5267 }
5268 default:
5269 assert(0);
5270 break;
5271 }
5272
5273 if (!snapshot_done) {
5274 /*
5275 * If no snapshot was made and we are not in the error path, this means
5276 * that there are no buffers thus no (prior) application to snapshot
5277 * data from so we have simply NO data.
5278 */
5279 ret = -ENODATA;
5280 }
5281
5282 error:
5283 rcu_read_unlock();
5284 return ret;
5285 }
5286
5287 /*
5288 * Return the size taken by one more packet per stream.
5289 */
5290 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5291 uint64_t cur_nr_packets)
5292 {
5293 uint64_t tot_size = 0;
5294 struct ust_app *app;
5295 struct lttng_ht_iter iter;
5296
5297 assert(usess);
5298
5299 switch (usess->buffer_type) {
5300 case LTTNG_BUFFER_PER_UID:
5301 {
5302 struct buffer_reg_uid *reg;
5303
5304 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5305 struct buffer_reg_channel *reg_chan;
5306
5307 rcu_read_lock();
5308 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5309 reg_chan, node.node) {
5310 if (cur_nr_packets >= reg_chan->num_subbuf) {
5311 /*
5312 * Don't take channel into account if we
5313 * already grab all its packets.
5314 */
5315 continue;
5316 }
5317 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5318 }
5319 rcu_read_unlock();
5320 }
5321 break;
5322 }
5323 case LTTNG_BUFFER_PER_PID:
5324 {
5325 rcu_read_lock();
5326 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5327 struct ust_app_channel *ua_chan;
5328 struct ust_app_session *ua_sess;
5329 struct lttng_ht_iter chan_iter;
5330
5331 ua_sess = lookup_session_by_app(usess, app);
5332 if (!ua_sess) {
5333 /* Session not associated with this app. */
5334 continue;
5335 }
5336
5337 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5338 ua_chan, node.node) {
5339 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5340 /*
5341 * Don't take channel into account if we
5342 * already grab all its packets.
5343 */
5344 continue;
5345 }
5346 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5347 }
5348 }
5349 rcu_read_unlock();
5350 break;
5351 }
5352 default:
5353 assert(0);
5354 break;
5355 }
5356
5357 return tot_size;
5358 }
This page took 0.150721 seconds and 5 git commands to generate.