Fix: streamline ret/errno of run_as()
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2 only,
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18 #define _GNU_SOURCE
19 #include <errno.h>
20 #include <inttypes.h>
21 #include <pthread.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <unistd.h>
28 #include <urcu/compiler.h>
29 #include <lttng/ust-error.h>
30 #include <signal.h>
31
32 #include <common/common.h>
33 #include <common/sessiond-comm/sessiond-comm.h>
34
35 #include "buffer-registry.h"
36 #include "fd-limit.h"
37 #include "health-sessiond.h"
38 #include "ust-app.h"
39 #include "ust-consumer.h"
40 #include "ust-ctl.h"
41 #include "utils.h"
42
43 static
44 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
45
46 /* Next available channel key. Access under next_channel_key_lock. */
47 static uint64_t _next_channel_key;
48 static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
49
50 /* Next available session ID. Access under next_session_id_lock. */
51 static uint64_t _next_session_id;
52 static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
53
54 /*
55 * Return the incremented value of next_channel_key.
56 */
57 static uint64_t get_next_channel_key(void)
58 {
59 uint64_t ret;
60
61 pthread_mutex_lock(&next_channel_key_lock);
62 ret = ++_next_channel_key;
63 pthread_mutex_unlock(&next_channel_key_lock);
64 return ret;
65 }
66
67 /*
68 * Return the atomically incremented value of next_session_id.
69 */
70 static uint64_t get_next_session_id(void)
71 {
72 uint64_t ret;
73
74 pthread_mutex_lock(&next_session_id_lock);
75 ret = ++_next_session_id;
76 pthread_mutex_unlock(&next_session_id_lock);
77 return ret;
78 }
79
80 static void copy_channel_attr_to_ustctl(
81 struct ustctl_consumer_channel_attr *attr,
82 struct lttng_ust_channel_attr *uattr)
83 {
84 /* Copy event attributes since the layout is different. */
85 attr->subbuf_size = uattr->subbuf_size;
86 attr->num_subbuf = uattr->num_subbuf;
87 attr->overwrite = uattr->overwrite;
88 attr->switch_timer_interval = uattr->switch_timer_interval;
89 attr->read_timer_interval = uattr->read_timer_interval;
90 attr->output = uattr->output;
91 }
92
93 /*
94 * Match function for the hash table lookup.
95 *
96 * It matches an ust app event based on three attributes which are the event
97 * name, the filter bytecode and the loglevel.
98 */
99 static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
100 {
101 struct ust_app_event *event;
102 const struct ust_app_ht_key *key;
103
104 assert(node);
105 assert(_key);
106
107 event = caa_container_of(node, struct ust_app_event, node.node);
108 key = _key;
109
110 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
111
112 /* Event name */
113 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
114 goto no_match;
115 }
116
117 /* Event loglevel. */
118 if (event->attr.loglevel != key->loglevel) {
119 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
120 && key->loglevel == 0 && event->attr.loglevel == -1) {
121 /*
122 * Match is accepted. This is because on event creation, the
123 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
124 * -1 are accepted for this loglevel type since 0 is the one set by
125 * the API when receiving an enable event.
126 */
127 } else {
128 goto no_match;
129 }
130 }
131
132 /* One of the filters is NULL, fail. */
133 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
134 goto no_match;
135 }
136
137 if (key->filter && event->filter) {
138 /* Both filters exists, check length followed by the bytecode. */
139 if (event->filter->len != key->filter->len ||
140 memcmp(event->filter->data, key->filter->data,
141 event->filter->len) != 0) {
142 goto no_match;
143 }
144 }
145
146 /* One of the exclusions is NULL, fail. */
147 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
148 goto no_match;
149 }
150
151 if (key->exclusion && event->exclusion) {
152 /* Both exclusions exists, check count followed by the names. */
153 if (event->exclusion->count != key->exclusion->count ||
154 memcmp(event->exclusion->names, key->exclusion->names,
155 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
156 goto no_match;
157 }
158 }
159
160
161 /* Match. */
162 return 1;
163
164 no_match:
165 return 0;
166 }
167
168 /*
169 * Unique add of an ust app event in the given ht. This uses the custom
170 * ht_match_ust_app_event match function and the event name as hash.
171 */
172 static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
173 struct ust_app_event *event)
174 {
175 struct cds_lfht_node *node_ptr;
176 struct ust_app_ht_key key;
177 struct lttng_ht *ht;
178
179 assert(ua_chan);
180 assert(ua_chan->events);
181 assert(event);
182
183 ht = ua_chan->events;
184 key.name = event->attr.name;
185 key.filter = event->filter;
186 key.loglevel = event->attr.loglevel;
187 key.exclusion = event->exclusion;
188
189 node_ptr = cds_lfht_add_unique(ht->ht,
190 ht->hash_fct(event->node.key, lttng_ht_seed),
191 ht_match_ust_app_event, &key, &event->node.node);
192 assert(node_ptr == &event->node.node);
193 }
194
195 /*
196 * Close the notify socket from the given RCU head object. This MUST be called
197 * through a call_rcu().
198 */
199 static void close_notify_sock_rcu(struct rcu_head *head)
200 {
201 int ret;
202 struct ust_app_notify_sock_obj *obj =
203 caa_container_of(head, struct ust_app_notify_sock_obj, head);
204
205 /* Must have a valid fd here. */
206 assert(obj->fd >= 0);
207
208 ret = close(obj->fd);
209 if (ret) {
210 ERR("close notify sock %d RCU", obj->fd);
211 }
212 lttng_fd_put(LTTNG_FD_APPS, 1);
213
214 free(obj);
215 }
216
217 /*
218 * Return the session registry according to the buffer type of the given
219 * session.
220 *
221 * A registry per UID object MUST exists before calling this function or else
222 * it assert() if not found. RCU read side lock must be acquired.
223 */
224 static struct ust_registry_session *get_session_registry(
225 struct ust_app_session *ua_sess)
226 {
227 struct ust_registry_session *registry = NULL;
228
229 assert(ua_sess);
230
231 switch (ua_sess->buffer_type) {
232 case LTTNG_BUFFER_PER_PID:
233 {
234 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
235 if (!reg_pid) {
236 goto error;
237 }
238 registry = reg_pid->registry->reg.ust;
239 break;
240 }
241 case LTTNG_BUFFER_PER_UID:
242 {
243 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
244 ua_sess->tracing_id, ua_sess->bits_per_long, ua_sess->uid);
245 if (!reg_uid) {
246 goto error;
247 }
248 registry = reg_uid->registry->reg.ust;
249 break;
250 }
251 default:
252 assert(0);
253 };
254
255 error:
256 return registry;
257 }
258
259 /*
260 * Delete ust context safely. RCU read lock must be held before calling
261 * this function.
262 */
263 static
264 void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx)
265 {
266 int ret;
267
268 assert(ua_ctx);
269
270 if (ua_ctx->obj) {
271 ret = ustctl_release_object(sock, ua_ctx->obj);
272 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
273 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
274 sock, ua_ctx->obj->handle, ret);
275 }
276 free(ua_ctx->obj);
277 }
278 free(ua_ctx);
279 }
280
281 /*
282 * Delete ust app event safely. RCU read lock must be held before calling
283 * this function.
284 */
285 static
286 void delete_ust_app_event(int sock, struct ust_app_event *ua_event)
287 {
288 int ret;
289
290 assert(ua_event);
291
292 free(ua_event->filter);
293 if (ua_event->exclusion != NULL)
294 free(ua_event->exclusion);
295 if (ua_event->obj != NULL) {
296 ret = ustctl_release_object(sock, ua_event->obj);
297 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
298 ERR("UST app sock %d release event obj failed with ret %d",
299 sock, ret);
300 }
301 free(ua_event->obj);
302 }
303 free(ua_event);
304 }
305
306 /*
307 * Release ust data object of the given stream.
308 *
309 * Return 0 on success or else a negative value.
310 */
311 static int release_ust_app_stream(int sock, struct ust_app_stream *stream)
312 {
313 int ret = 0;
314
315 assert(stream);
316
317 if (stream->obj) {
318 ret = ustctl_release_object(sock, stream->obj);
319 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
320 ERR("UST app sock %d release stream obj failed with ret %d",
321 sock, ret);
322 }
323 lttng_fd_put(LTTNG_FD_APPS, 2);
324 free(stream->obj);
325 }
326
327 return ret;
328 }
329
330 /*
331 * Delete ust app stream safely. RCU read lock must be held before calling
332 * this function.
333 */
334 static
335 void delete_ust_app_stream(int sock, struct ust_app_stream *stream)
336 {
337 assert(stream);
338
339 (void) release_ust_app_stream(sock, stream);
340 free(stream);
341 }
342
343 /*
344 * We need to execute ht_destroy outside of RCU read-side critical
345 * section and outside of call_rcu thread, so we postpone its execution
346 * using ht_cleanup_push. It is simpler than to change the semantic of
347 * the many callers of delete_ust_app_session().
348 */
349 static
350 void delete_ust_app_channel_rcu(struct rcu_head *head)
351 {
352 struct ust_app_channel *ua_chan =
353 caa_container_of(head, struct ust_app_channel, rcu_head);
354
355 ht_cleanup_push(ua_chan->ctx);
356 ht_cleanup_push(ua_chan->events);
357 free(ua_chan);
358 }
359
360 /*
361 * Delete ust app channel safely. RCU read lock must be held before calling
362 * this function.
363 */
364 static
365 void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
366 struct ust_app *app)
367 {
368 int ret;
369 struct lttng_ht_iter iter;
370 struct ust_app_event *ua_event;
371 struct ust_app_ctx *ua_ctx;
372 struct ust_app_stream *stream, *stmp;
373 struct ust_registry_session *registry;
374
375 assert(ua_chan);
376
377 DBG3("UST app deleting channel %s", ua_chan->name);
378
379 /* Wipe stream */
380 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
381 cds_list_del(&stream->list);
382 delete_ust_app_stream(sock, stream);
383 }
384
385 /* Wipe context */
386 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
387 cds_list_del(&ua_ctx->list);
388 ret = lttng_ht_del(ua_chan->ctx, &iter);
389 assert(!ret);
390 delete_ust_app_ctx(sock, ua_ctx);
391 }
392
393 /* Wipe events */
394 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
395 node.node) {
396 ret = lttng_ht_del(ua_chan->events, &iter);
397 assert(!ret);
398 delete_ust_app_event(sock, ua_event);
399 }
400
401 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
402 /* Wipe and free registry from session registry. */
403 registry = get_session_registry(ua_chan->session);
404 if (registry) {
405 ust_registry_channel_del_free(registry, ua_chan->key);
406 }
407 }
408
409 if (ua_chan->obj != NULL) {
410 /* Remove channel from application UST object descriptor. */
411 iter.iter.node = &ua_chan->ust_objd_node.node;
412 ret = lttng_ht_del(app->ust_objd, &iter);
413 assert(!ret);
414 ret = ustctl_release_object(sock, ua_chan->obj);
415 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
416 ERR("UST app sock %d release channel obj failed with ret %d",
417 sock, ret);
418 }
419 lttng_fd_put(LTTNG_FD_APPS, 1);
420 free(ua_chan->obj);
421 }
422 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
423 }
424
425 /*
426 * Push metadata to consumer socket.
427 *
428 * RCU read-side lock must be held to guarantee existance of socket.
429 * Must be called with the ust app session lock held.
430 * Must be called with the registry lock held.
431 *
432 * On success, return the len of metadata pushed or else a negative value.
433 * Returning a -EPIPE return value means we could not send the metadata,
434 * but it can be caused by recoverable errors (e.g. the application has
435 * terminated concurrently).
436 */
437 ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
438 struct consumer_socket *socket, int send_zero_data)
439 {
440 int ret;
441 char *metadata_str = NULL;
442 size_t len, offset;
443 ssize_t ret_val;
444
445 assert(registry);
446 assert(socket);
447
448 /*
449 * Means that no metadata was assigned to the session. This can
450 * happens if no start has been done previously.
451 */
452 if (!registry->metadata_key) {
453 return 0;
454 }
455
456 /*
457 * On a push metadata error either the consumer is dead or the
458 * metadata channel has been destroyed because its endpoint
459 * might have died (e.g: relayd), or because the application has
460 * exited. If so, the metadata closed flag is set to 1 so we
461 * deny pushing metadata again which is not valid anymore on the
462 * consumer side.
463 */
464 if (registry->metadata_closed) {
465 return -EPIPE;
466 }
467
468 offset = registry->metadata_len_sent;
469 len = registry->metadata_len - registry->metadata_len_sent;
470 if (len == 0) {
471 DBG3("No metadata to push for metadata key %" PRIu64,
472 registry->metadata_key);
473 ret_val = len;
474 if (send_zero_data) {
475 DBG("No metadata to push");
476 goto push_data;
477 }
478 goto end;
479 }
480
481 /* Allocate only what we have to send. */
482 metadata_str = zmalloc(len);
483 if (!metadata_str) {
484 PERROR("zmalloc ust app metadata string");
485 ret_val = -ENOMEM;
486 goto error;
487 }
488 /* Copy what we haven't send out. */
489 memcpy(metadata_str, registry->metadata + offset, len);
490 registry->metadata_len_sent += len;
491
492 push_data:
493 ret = consumer_push_metadata(socket, registry->metadata_key,
494 metadata_str, len, offset);
495 if (ret < 0) {
496 /*
497 * There is an acceptable race here between the registry
498 * metadata key assignment and the creation on the
499 * consumer. The session daemon can concurrently push
500 * metadata for this registry while being created on the
501 * consumer since the metadata key of the registry is
502 * assigned *before* it is setup to avoid the consumer
503 * to ask for metadata that could possibly be not found
504 * in the session daemon.
505 *
506 * The metadata will get pushed either by the session
507 * being stopped or the consumer requesting metadata if
508 * that race is triggered.
509 */
510 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
511 ret = 0;
512 }
513
514 /*
515 * Update back the actual metadata len sent since it
516 * failed here.
517 */
518 registry->metadata_len_sent -= len;
519 ret_val = ret;
520 goto error_push;
521 }
522
523 free(metadata_str);
524 return len;
525
526 end:
527 error:
528 if (ret_val) {
529 /*
530 * On error, flag the registry that the metadata is
531 * closed. We were unable to push anything and this
532 * means that either the consumer is not responding or
533 * the metadata cache has been destroyed on the
534 * consumer.
535 */
536 registry->metadata_closed = 1;
537 }
538 error_push:
539 free(metadata_str);
540 return ret_val;
541 }
542
543 /*
544 * For a given application and session, push metadata to consumer.
545 * Either sock or consumer is required : if sock is NULL, the default
546 * socket to send the metadata is retrieved from consumer, if sock
547 * is not NULL we use it to send the metadata.
548 * RCU read-side lock must be held while calling this function,
549 * therefore ensuring existance of registry. It also ensures existance
550 * of socket throughout this function.
551 *
552 * Return 0 on success else a negative error.
553 * Returning a -EPIPE return value means we could not send the metadata,
554 * but it can be caused by recoverable errors (e.g. the application has
555 * terminated concurrently).
556 */
557 static int push_metadata(struct ust_registry_session *registry,
558 struct consumer_output *consumer)
559 {
560 int ret_val;
561 ssize_t ret;
562 struct consumer_socket *socket;
563
564 assert(registry);
565 assert(consumer);
566
567 pthread_mutex_lock(&registry->lock);
568 if (registry->metadata_closed) {
569 ret_val = -EPIPE;
570 goto error;
571 }
572
573 /* Get consumer socket to use to push the metadata.*/
574 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
575 consumer);
576 if (!socket) {
577 ret_val = -1;
578 goto error;
579 }
580
581 ret = ust_app_push_metadata(registry, socket, 0);
582 if (ret < 0) {
583 ret_val = ret;
584 goto error;
585 }
586 pthread_mutex_unlock(&registry->lock);
587 return 0;
588
589 error:
590 pthread_mutex_unlock(&registry->lock);
591 return ret_val;
592 }
593
594 /*
595 * Send to the consumer a close metadata command for the given session. Once
596 * done, the metadata channel is deleted and the session metadata pointer is
597 * nullified. The session lock MUST be held unless the application is
598 * in the destroy path.
599 *
600 * Return 0 on success else a negative value.
601 */
602 static int close_metadata(struct ust_registry_session *registry,
603 struct consumer_output *consumer)
604 {
605 int ret;
606 struct consumer_socket *socket;
607
608 assert(registry);
609 assert(consumer);
610
611 rcu_read_lock();
612
613 pthread_mutex_lock(&registry->lock);
614
615 if (!registry->metadata_key || registry->metadata_closed) {
616 ret = 0;
617 goto end;
618 }
619
620 /* Get consumer socket to use to push the metadata.*/
621 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
622 consumer);
623 if (!socket) {
624 ret = -1;
625 goto error;
626 }
627
628 ret = consumer_close_metadata(socket, registry->metadata_key);
629 if (ret < 0) {
630 goto error;
631 }
632
633 error:
634 /*
635 * Metadata closed. Even on error this means that the consumer is not
636 * responding or not found so either way a second close should NOT be emit
637 * for this registry.
638 */
639 registry->metadata_closed = 1;
640 end:
641 pthread_mutex_unlock(&registry->lock);
642 rcu_read_unlock();
643 return ret;
644 }
645
646 /*
647 * We need to execute ht_destroy outside of RCU read-side critical
648 * section and outside of call_rcu thread, so we postpone its execution
649 * using ht_cleanup_push. It is simpler than to change the semantic of
650 * the many callers of delete_ust_app_session().
651 */
652 static
653 void delete_ust_app_session_rcu(struct rcu_head *head)
654 {
655 struct ust_app_session *ua_sess =
656 caa_container_of(head, struct ust_app_session, rcu_head);
657
658 ht_cleanup_push(ua_sess->channels);
659 free(ua_sess);
660 }
661
662 /*
663 * Delete ust app session safely. RCU read lock must be held before calling
664 * this function.
665 */
666 static
667 void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
668 struct ust_app *app)
669 {
670 int ret;
671 struct lttng_ht_iter iter;
672 struct ust_app_channel *ua_chan;
673 struct ust_registry_session *registry;
674
675 assert(ua_sess);
676
677 pthread_mutex_lock(&ua_sess->lock);
678
679 assert(!ua_sess->deleted);
680 ua_sess->deleted = true;
681
682 registry = get_session_registry(ua_sess);
683 if (registry) {
684 /* Push metadata for application before freeing the application. */
685 (void) push_metadata(registry, ua_sess->consumer);
686
687 /*
688 * Don't ask to close metadata for global per UID buffers. Close
689 * metadata only on destroy trace session in this case. Also, the
690 * previous push metadata could have flag the metadata registry to
691 * close so don't send a close command if closed.
692 */
693 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
694 /* And ask to close it for this session registry. */
695 (void) close_metadata(registry, ua_sess->consumer);
696 }
697 }
698
699 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
700 node.node) {
701 ret = lttng_ht_del(ua_sess->channels, &iter);
702 assert(!ret);
703 delete_ust_app_channel(sock, ua_chan, app);
704 }
705
706 /* In case of per PID, the registry is kept in the session. */
707 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
708 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
709 if (reg_pid) {
710 buffer_reg_pid_remove(reg_pid);
711 buffer_reg_pid_destroy(reg_pid);
712 }
713 }
714
715 if (ua_sess->handle != -1) {
716 ret = ustctl_release_handle(sock, ua_sess->handle);
717 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
718 ERR("UST app sock %d release session handle failed with ret %d",
719 sock, ret);
720 }
721 }
722 pthread_mutex_unlock(&ua_sess->lock);
723
724 consumer_output_put(ua_sess->consumer);
725
726 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
727 }
728
729 /*
730 * Delete a traceable application structure from the global list. Never call
731 * this function outside of a call_rcu call.
732 *
733 * RCU read side lock should _NOT_ be held when calling this function.
734 */
735 static
736 void delete_ust_app(struct ust_app *app)
737 {
738 int ret, sock;
739 struct ust_app_session *ua_sess, *tmp_ua_sess;
740
741 /* Delete ust app sessions info */
742 sock = app->sock;
743 app->sock = -1;
744
745 /* Wipe sessions */
746 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
747 teardown_node) {
748 /* Free every object in the session and the session. */
749 rcu_read_lock();
750 delete_ust_app_session(sock, ua_sess, app);
751 rcu_read_unlock();
752 }
753
754 ht_cleanup_push(app->sessions);
755 ht_cleanup_push(app->ust_objd);
756
757 /*
758 * Wait until we have deleted the application from the sock hash table
759 * before closing this socket, otherwise an application could re-use the
760 * socket ID and race with the teardown, using the same hash table entry.
761 *
762 * It's OK to leave the close in call_rcu. We want it to stay unique for
763 * all RCU readers that could run concurrently with unregister app,
764 * therefore we _need_ to only close that socket after a grace period. So
765 * it should stay in this RCU callback.
766 *
767 * This close() is a very important step of the synchronization model so
768 * every modification to this function must be carefully reviewed.
769 */
770 ret = close(sock);
771 if (ret) {
772 PERROR("close");
773 }
774 lttng_fd_put(LTTNG_FD_APPS, 1);
775
776 DBG2("UST app pid %d deleted", app->pid);
777 free(app);
778 }
779
780 /*
781 * URCU intermediate call to delete an UST app.
782 */
783 static
784 void delete_ust_app_rcu(struct rcu_head *head)
785 {
786 struct lttng_ht_node_ulong *node =
787 caa_container_of(head, struct lttng_ht_node_ulong, head);
788 struct ust_app *app =
789 caa_container_of(node, struct ust_app, pid_n);
790
791 DBG3("Call RCU deleting app PID %d", app->pid);
792 delete_ust_app(app);
793 }
794
795 /*
796 * Delete the session from the application ht and delete the data structure by
797 * freeing every object inside and releasing them.
798 */
799 static void destroy_app_session(struct ust_app *app,
800 struct ust_app_session *ua_sess)
801 {
802 int ret;
803 struct lttng_ht_iter iter;
804
805 assert(app);
806 assert(ua_sess);
807
808 iter.iter.node = &ua_sess->node.node;
809 ret = lttng_ht_del(app->sessions, &iter);
810 if (ret) {
811 /* Already scheduled for teardown. */
812 goto end;
813 }
814
815 /* Once deleted, free the data structure. */
816 delete_ust_app_session(app->sock, ua_sess, app);
817
818 end:
819 return;
820 }
821
822 /*
823 * Alloc new UST app session.
824 */
825 static
826 struct ust_app_session *alloc_ust_app_session(struct ust_app *app)
827 {
828 struct ust_app_session *ua_sess;
829
830 /* Init most of the default value by allocating and zeroing */
831 ua_sess = zmalloc(sizeof(struct ust_app_session));
832 if (ua_sess == NULL) {
833 PERROR("malloc");
834 goto error_free;
835 }
836
837 ua_sess->handle = -1;
838 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
839 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
840 pthread_mutex_init(&ua_sess->lock, NULL);
841
842 return ua_sess;
843
844 error_free:
845 return NULL;
846 }
847
848 /*
849 * Alloc new UST app channel.
850 */
851 static
852 struct ust_app_channel *alloc_ust_app_channel(char *name,
853 struct ust_app_session *ua_sess,
854 struct lttng_ust_channel_attr *attr)
855 {
856 struct ust_app_channel *ua_chan;
857
858 /* Init most of the default value by allocating and zeroing */
859 ua_chan = zmalloc(sizeof(struct ust_app_channel));
860 if (ua_chan == NULL) {
861 PERROR("malloc");
862 goto error;
863 }
864
865 /* Setup channel name */
866 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
867 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
868
869 ua_chan->enabled = 1;
870 ua_chan->handle = -1;
871 ua_chan->session = ua_sess;
872 ua_chan->key = get_next_channel_key();
873 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
874 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
875 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
876
877 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
878 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
879
880 /* Copy attributes */
881 if (attr) {
882 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
883 ua_chan->attr.subbuf_size = attr->subbuf_size;
884 ua_chan->attr.num_subbuf = attr->num_subbuf;
885 ua_chan->attr.overwrite = attr->overwrite;
886 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
887 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
888 ua_chan->attr.output = attr->output;
889 }
890 /* By default, the channel is a per cpu channel. */
891 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
892
893 DBG3("UST app channel %s allocated", ua_chan->name);
894
895 return ua_chan;
896
897 error:
898 return NULL;
899 }
900
901 /*
902 * Allocate and initialize a UST app stream.
903 *
904 * Return newly allocated stream pointer or NULL on error.
905 */
906 struct ust_app_stream *ust_app_alloc_stream(void)
907 {
908 struct ust_app_stream *stream = NULL;
909
910 stream = zmalloc(sizeof(*stream));
911 if (stream == NULL) {
912 PERROR("zmalloc ust app stream");
913 goto error;
914 }
915
916 /* Zero could be a valid value for a handle so flag it to -1. */
917 stream->handle = -1;
918
919 error:
920 return stream;
921 }
922
923 /*
924 * Alloc new UST app event.
925 */
926 static
927 struct ust_app_event *alloc_ust_app_event(char *name,
928 struct lttng_ust_event *attr)
929 {
930 struct ust_app_event *ua_event;
931
932 /* Init most of the default value by allocating and zeroing */
933 ua_event = zmalloc(sizeof(struct ust_app_event));
934 if (ua_event == NULL) {
935 PERROR("malloc");
936 goto error;
937 }
938
939 ua_event->enabled = 1;
940 strncpy(ua_event->name, name, sizeof(ua_event->name));
941 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
942 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
943
944 /* Copy attributes */
945 if (attr) {
946 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
947 }
948
949 DBG3("UST app event %s allocated", ua_event->name);
950
951 return ua_event;
952
953 error:
954 return NULL;
955 }
956
957 /*
958 * Alloc new UST app context.
959 */
960 static
961 struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context *uctx)
962 {
963 struct ust_app_ctx *ua_ctx;
964
965 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
966 if (ua_ctx == NULL) {
967 goto error;
968 }
969
970 CDS_INIT_LIST_HEAD(&ua_ctx->list);
971
972 if (uctx) {
973 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
974 }
975
976 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
977
978 error:
979 return ua_ctx;
980 }
981
982 /*
983 * Allocate a filter and copy the given original filter.
984 *
985 * Return allocated filter or NULL on error.
986 */
987 static struct lttng_ust_filter_bytecode *alloc_copy_ust_app_filter(
988 struct lttng_ust_filter_bytecode *orig_f)
989 {
990 struct lttng_ust_filter_bytecode *filter = NULL;
991
992 /* Copy filter bytecode */
993 filter = zmalloc(sizeof(*filter) + orig_f->len);
994 if (!filter) {
995 PERROR("zmalloc alloc ust app filter");
996 goto error;
997 }
998
999 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1000
1001 error:
1002 return filter;
1003 }
1004
1005 /*
1006 * Find an ust_app using the sock and return it. RCU read side lock must be
1007 * held before calling this helper function.
1008 */
1009 struct ust_app *ust_app_find_by_sock(int sock)
1010 {
1011 struct lttng_ht_node_ulong *node;
1012 struct lttng_ht_iter iter;
1013
1014 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
1015 node = lttng_ht_iter_get_node_ulong(&iter);
1016 if (node == NULL) {
1017 DBG2("UST app find by sock %d not found", sock);
1018 goto error;
1019 }
1020
1021 return caa_container_of(node, struct ust_app, sock_n);
1022
1023 error:
1024 return NULL;
1025 }
1026
1027 /*
1028 * Find an ust_app using the notify sock and return it. RCU read side lock must
1029 * be held before calling this helper function.
1030 */
1031 static struct ust_app *find_app_by_notify_sock(int sock)
1032 {
1033 struct lttng_ht_node_ulong *node;
1034 struct lttng_ht_iter iter;
1035
1036 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1037 &iter);
1038 node = lttng_ht_iter_get_node_ulong(&iter);
1039 if (node == NULL) {
1040 DBG2("UST app find by notify sock %d not found", sock);
1041 goto error;
1042 }
1043
1044 return caa_container_of(node, struct ust_app, notify_sock_n);
1045
1046 error:
1047 return NULL;
1048 }
1049
1050 /*
1051 * Lookup for an ust app event based on event name, filter bytecode and the
1052 * event loglevel.
1053 *
1054 * Return an ust_app_event object or NULL on error.
1055 */
1056 static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
1057 char *name, struct lttng_ust_filter_bytecode *filter, int loglevel,
1058 const struct lttng_event_exclusion *exclusion)
1059 {
1060 struct lttng_ht_iter iter;
1061 struct lttng_ht_node_str *node;
1062 struct ust_app_event *event = NULL;
1063 struct ust_app_ht_key key;
1064
1065 assert(name);
1066 assert(ht);
1067
1068 /* Setup key for event lookup. */
1069 key.name = name;
1070 key.filter = filter;
1071 key.loglevel = loglevel;
1072 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
1073 key.exclusion = (struct lttng_ust_event_exclusion *)exclusion;
1074
1075 /* Lookup using the event name as hash and a custom match fct. */
1076 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1077 ht_match_ust_app_event, &key, &iter.iter);
1078 node = lttng_ht_iter_get_node_str(&iter);
1079 if (node == NULL) {
1080 goto end;
1081 }
1082
1083 event = caa_container_of(node, struct ust_app_event, node);
1084
1085 end:
1086 return event;
1087 }
1088
1089 /*
1090 * Create the channel context on the tracer.
1091 *
1092 * Called with UST app session lock held.
1093 */
1094 static
1095 int create_ust_channel_context(struct ust_app_channel *ua_chan,
1096 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1097 {
1098 int ret;
1099
1100 health_code_update();
1101
1102 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
1103 ua_chan->obj, &ua_ctx->obj);
1104 if (ret < 0) {
1105 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1106 ERR("UST app create channel context failed for app (pid: %d) "
1107 "with ret %d", app->pid, ret);
1108 } else {
1109 /*
1110 * This is normal behavior, an application can die during the
1111 * creation process. Don't report an error so the execution can
1112 * continue normally.
1113 */
1114 ret = 0;
1115 DBG3("UST app disable event failed. Application is dead.");
1116 }
1117 goto error;
1118 }
1119
1120 ua_ctx->handle = ua_ctx->obj->handle;
1121
1122 DBG2("UST app context handle %d created successfully for channel %s",
1123 ua_ctx->handle, ua_chan->name);
1124
1125 error:
1126 health_code_update();
1127 return ret;
1128 }
1129
1130 /*
1131 * Set the filter on the tracer.
1132 */
1133 static
1134 int set_ust_event_filter(struct ust_app_event *ua_event,
1135 struct ust_app *app)
1136 {
1137 int ret;
1138
1139 health_code_update();
1140
1141 if (!ua_event->filter) {
1142 ret = 0;
1143 goto error;
1144 }
1145
1146 ret = ustctl_set_filter(app->sock, ua_event->filter,
1147 ua_event->obj);
1148 if (ret < 0) {
1149 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1150 ERR("UST app event %s filter failed for app (pid: %d) "
1151 "with ret %d", ua_event->attr.name, app->pid, ret);
1152 } else {
1153 /*
1154 * This is normal behavior, an application can die during the
1155 * creation process. Don't report an error so the execution can
1156 * continue normally.
1157 */
1158 ret = 0;
1159 DBG3("UST app filter event failed. Application is dead.");
1160 }
1161 goto error;
1162 }
1163
1164 DBG2("UST filter set successfully for event %s", ua_event->name);
1165
1166 error:
1167 health_code_update();
1168 return ret;
1169 }
1170
1171 /*
1172 * Set event exclusions on the tracer.
1173 */
1174 static
1175 int set_ust_event_exclusion(struct ust_app_event *ua_event,
1176 struct ust_app *app)
1177 {
1178 int ret;
1179
1180 health_code_update();
1181
1182 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1183 ret = 0;
1184 goto error;
1185 }
1186
1187 ret = ustctl_set_exclusion(app->sock, ua_event->exclusion,
1188 ua_event->obj);
1189 if (ret < 0) {
1190 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1191 ERR("UST app event %s exclusions failed for app (pid: %d) "
1192 "with ret %d", ua_event->attr.name, app->pid, ret);
1193 } else {
1194 /*
1195 * This is normal behavior, an application can die during the
1196 * creation process. Don't report an error so the execution can
1197 * continue normally.
1198 */
1199 ret = 0;
1200 DBG3("UST app event exclusion failed. Application is dead.");
1201 }
1202 goto error;
1203 }
1204
1205 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1206
1207 error:
1208 health_code_update();
1209 return ret;
1210 }
1211
1212 /*
1213 * Disable the specified event on to UST tracer for the UST session.
1214 */
1215 static int disable_ust_event(struct ust_app *app,
1216 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1217 {
1218 int ret;
1219
1220 health_code_update();
1221
1222 ret = ustctl_disable(app->sock, ua_event->obj);
1223 if (ret < 0) {
1224 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1225 ERR("UST app event %s disable failed for app (pid: %d) "
1226 "and session handle %d with ret %d",
1227 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1228 } else {
1229 /*
1230 * This is normal behavior, an application can die during the
1231 * creation process. Don't report an error so the execution can
1232 * continue normally.
1233 */
1234 ret = 0;
1235 DBG3("UST app disable event failed. Application is dead.");
1236 }
1237 goto error;
1238 }
1239
1240 DBG2("UST app event %s disabled successfully for app (pid: %d)",
1241 ua_event->attr.name, app->pid);
1242
1243 error:
1244 health_code_update();
1245 return ret;
1246 }
1247
1248 /*
1249 * Disable the specified channel on to UST tracer for the UST session.
1250 */
1251 static int disable_ust_channel(struct ust_app *app,
1252 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1253 {
1254 int ret;
1255
1256 health_code_update();
1257
1258 ret = ustctl_disable(app->sock, ua_chan->obj);
1259 if (ret < 0) {
1260 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1261 ERR("UST app channel %s disable failed for app (pid: %d) "
1262 "and session handle %d with ret %d",
1263 ua_chan->name, app->pid, ua_sess->handle, ret);
1264 } else {
1265 /*
1266 * This is normal behavior, an application can die during the
1267 * creation process. Don't report an error so the execution can
1268 * continue normally.
1269 */
1270 ret = 0;
1271 DBG3("UST app disable channel failed. Application is dead.");
1272 }
1273 goto error;
1274 }
1275
1276 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
1277 ua_chan->name, app->pid);
1278
1279 error:
1280 health_code_update();
1281 return ret;
1282 }
1283
1284 /*
1285 * Enable the specified channel on to UST tracer for the UST session.
1286 */
1287 static int enable_ust_channel(struct ust_app *app,
1288 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1289 {
1290 int ret;
1291
1292 health_code_update();
1293
1294 ret = ustctl_enable(app->sock, ua_chan->obj);
1295 if (ret < 0) {
1296 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1297 ERR("UST app channel %s enable failed for app (pid: %d) "
1298 "and session handle %d with ret %d",
1299 ua_chan->name, app->pid, ua_sess->handle, ret);
1300 } else {
1301 /*
1302 * This is normal behavior, an application can die during the
1303 * creation process. Don't report an error so the execution can
1304 * continue normally.
1305 */
1306 ret = 0;
1307 DBG3("UST app enable channel failed. Application is dead.");
1308 }
1309 goto error;
1310 }
1311
1312 ua_chan->enabled = 1;
1313
1314 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
1315 ua_chan->name, app->pid);
1316
1317 error:
1318 health_code_update();
1319 return ret;
1320 }
1321
1322 /*
1323 * Enable the specified event on to UST tracer for the UST session.
1324 */
1325 static int enable_ust_event(struct ust_app *app,
1326 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1327 {
1328 int ret;
1329
1330 health_code_update();
1331
1332 ret = ustctl_enable(app->sock, ua_event->obj);
1333 if (ret < 0) {
1334 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1335 ERR("UST app event %s enable failed for app (pid: %d) "
1336 "and session handle %d with ret %d",
1337 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1338 } else {
1339 /*
1340 * This is normal behavior, an application can die during the
1341 * creation process. Don't report an error so the execution can
1342 * continue normally.
1343 */
1344 ret = 0;
1345 DBG3("UST app enable event failed. Application is dead.");
1346 }
1347 goto error;
1348 }
1349
1350 DBG2("UST app event %s enabled successfully for app (pid: %d)",
1351 ua_event->attr.name, app->pid);
1352
1353 error:
1354 health_code_update();
1355 return ret;
1356 }
1357
1358 /*
1359 * Send channel and stream buffer to application.
1360 *
1361 * Return 0 on success. On error, a negative value is returned.
1362 */
1363 static int send_channel_pid_to_ust(struct ust_app *app,
1364 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1365 {
1366 int ret;
1367 struct ust_app_stream *stream, *stmp;
1368
1369 assert(app);
1370 assert(ua_sess);
1371 assert(ua_chan);
1372
1373 health_code_update();
1374
1375 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1376 app->sock);
1377
1378 /* Send channel to the application. */
1379 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
1380 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1381 ret = -ENOTCONN; /* Caused by app exiting. */
1382 goto error;
1383 } else if (ret < 0) {
1384 goto error;
1385 }
1386
1387 health_code_update();
1388
1389 /* Send all streams to application. */
1390 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1391 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
1392 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1393 ret = -ENOTCONN; /* Caused by app exiting. */
1394 goto error;
1395 } else if (ret < 0) {
1396 goto error;
1397 }
1398 /* We don't need the stream anymore once sent to the tracer. */
1399 cds_list_del(&stream->list);
1400 delete_ust_app_stream(-1, stream);
1401 }
1402 /* Flag the channel that it is sent to the application. */
1403 ua_chan->is_sent = 1;
1404
1405 error:
1406 health_code_update();
1407 return ret;
1408 }
1409
1410 /*
1411 * Create the specified event onto the UST tracer for a UST session.
1412 *
1413 * Should be called with session mutex held.
1414 */
1415 static
1416 int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1417 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
1418 {
1419 int ret = 0;
1420
1421 health_code_update();
1422
1423 /* Create UST event on tracer */
1424 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
1425 &ua_event->obj);
1426 if (ret < 0) {
1427 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1428 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1429 ua_event->attr.name, app->pid, ret);
1430 } else {
1431 /*
1432 * This is normal behavior, an application can die during the
1433 * creation process. Don't report an error so the execution can
1434 * continue normally.
1435 */
1436 ret = 0;
1437 DBG3("UST app create event failed. Application is dead.");
1438 }
1439 goto error;
1440 }
1441
1442 ua_event->handle = ua_event->obj->handle;
1443
1444 DBG2("UST app event %s created successfully for pid:%d",
1445 ua_event->attr.name, app->pid);
1446
1447 health_code_update();
1448
1449 /* Set filter if one is present. */
1450 if (ua_event->filter) {
1451 ret = set_ust_event_filter(ua_event, app);
1452 if (ret < 0) {
1453 goto error;
1454 }
1455 }
1456
1457 /* Set exclusions for the event */
1458 if (ua_event->exclusion) {
1459 ret = set_ust_event_exclusion(ua_event, app);
1460 if (ret < 0) {
1461 goto error;
1462 }
1463 }
1464
1465 /* If event not enabled, disable it on the tracer */
1466 if (ua_event->enabled) {
1467 /*
1468 * We now need to explicitly enable the event, since it
1469 * is now disabled at creation.
1470 */
1471 ret = enable_ust_event(app, ua_sess, ua_event);
1472 if (ret < 0) {
1473 /*
1474 * If we hit an EPERM, something is wrong with our enable call. If
1475 * we get an EEXIST, there is a problem on the tracer side since we
1476 * just created it.
1477 */
1478 switch (ret) {
1479 case -LTTNG_UST_ERR_PERM:
1480 /* Code flow problem */
1481 assert(0);
1482 case -LTTNG_UST_ERR_EXIST:
1483 /* It's OK for our use case. */
1484 ret = 0;
1485 break;
1486 default:
1487 break;
1488 }
1489 goto error;
1490 }
1491 } else {
1492 ret = disable_ust_event(app, ua_sess, ua_event);
1493 if (ret < 0) {
1494 /*
1495 * If we hit an EPERM, something is wrong with our disable call. If
1496 * we get an EEXIST, there is a problem on the tracer side since we
1497 * just created it.
1498 */
1499 switch (ret) {
1500 case -LTTNG_UST_ERR_PERM:
1501 /* Code flow problem */
1502 assert(0);
1503 case -LTTNG_UST_ERR_EXIST:
1504 /* It's OK for our use case. */
1505 ret = 0;
1506 break;
1507 default:
1508 break;
1509 }
1510 goto error;
1511 }
1512 }
1513
1514 error:
1515 health_code_update();
1516 return ret;
1517 }
1518
1519 /*
1520 * Copy data between an UST app event and a LTT event.
1521 */
1522 static void shadow_copy_event(struct ust_app_event *ua_event,
1523 struct ltt_ust_event *uevent)
1524 {
1525 size_t exclusion_alloc_size;
1526
1527 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1528 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1529
1530 ua_event->enabled = uevent->enabled;
1531
1532 /* Copy event attributes */
1533 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1534
1535 /* Copy filter bytecode */
1536 if (uevent->filter) {
1537 ua_event->filter = alloc_copy_ust_app_filter(uevent->filter);
1538 /* Filter might be NULL here in case of ENONEM. */
1539 }
1540
1541 /* Copy exclusion data */
1542 if (uevent->exclusion) {
1543 exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1544 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1545 ua_event->exclusion = zmalloc(exclusion_alloc_size);
1546 if (ua_event->exclusion == NULL) {
1547 PERROR("malloc");
1548 } else {
1549 memcpy(ua_event->exclusion, uevent->exclusion,
1550 exclusion_alloc_size);
1551 }
1552 }
1553 }
1554
1555 /*
1556 * Copy data between an UST app channel and a LTT channel.
1557 */
1558 static void shadow_copy_channel(struct ust_app_channel *ua_chan,
1559 struct ltt_ust_channel *uchan)
1560 {
1561 struct lttng_ht_iter iter;
1562 struct ltt_ust_event *uevent;
1563 struct ltt_ust_context *uctx;
1564 struct ust_app_event *ua_event;
1565 struct ust_app_ctx *ua_ctx;
1566
1567 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
1568
1569 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1570 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1571
1572 ua_chan->tracefile_size = uchan->tracefile_size;
1573 ua_chan->tracefile_count = uchan->tracefile_count;
1574
1575 /* Copy event attributes since the layout is different. */
1576 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1577 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1578 ua_chan->attr.overwrite = uchan->attr.overwrite;
1579 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1580 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
1581 ua_chan->attr.output = uchan->attr.output;
1582 /*
1583 * Note that the attribute channel type is not set since the channel on the
1584 * tracing registry side does not have this information.
1585 */
1586
1587 ua_chan->enabled = uchan->enabled;
1588 ua_chan->tracing_channel_id = uchan->id;
1589
1590 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
1591 ua_ctx = alloc_ust_app_ctx(&uctx->ctx);
1592 if (ua_ctx == NULL) {
1593 continue;
1594 }
1595 lttng_ht_node_init_ulong(&ua_ctx->node,
1596 (unsigned long) ua_ctx->ctx.ctx);
1597 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
1598 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
1599 }
1600
1601 /* Copy all events from ltt ust channel to ust app channel */
1602 cds_lfht_for_each_entry(uchan->events->ht, &iter.iter, uevent, node.node) {
1603 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
1604 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
1605 if (ua_event == NULL) {
1606 DBG2("UST event %s not found on shadow copy channel",
1607 uevent->attr.name);
1608 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
1609 if (ua_event == NULL) {
1610 continue;
1611 }
1612 shadow_copy_event(ua_event, uevent);
1613 add_unique_ust_app_event(ua_chan, ua_event);
1614 }
1615 }
1616
1617 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
1618 }
1619
1620 /*
1621 * Copy data between a UST app session and a regular LTT session.
1622 */
1623 static void shadow_copy_session(struct ust_app_session *ua_sess,
1624 struct ltt_ust_session *usess, struct ust_app *app)
1625 {
1626 struct lttng_ht_node_str *ua_chan_node;
1627 struct lttng_ht_iter iter;
1628 struct ltt_ust_channel *uchan;
1629 struct ust_app_channel *ua_chan;
1630 time_t rawtime;
1631 struct tm *timeinfo;
1632 char datetime[16];
1633 int ret;
1634
1635 /* Get date and time for unique app path */
1636 time(&rawtime);
1637 timeinfo = localtime(&rawtime);
1638 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
1639
1640 DBG2("Shadow copy of session handle %d", ua_sess->handle);
1641
1642 ua_sess->tracing_id = usess->id;
1643 ua_sess->id = get_next_session_id();
1644 ua_sess->uid = app->uid;
1645 ua_sess->gid = app->gid;
1646 ua_sess->euid = usess->uid;
1647 ua_sess->egid = usess->gid;
1648 ua_sess->buffer_type = usess->buffer_type;
1649 ua_sess->bits_per_long = app->bits_per_long;
1650
1651 /* There is only one consumer object per session possible. */
1652 consumer_output_get(usess->consumer);
1653 ua_sess->consumer = usess->consumer;
1654
1655 ua_sess->output_traces = usess->output_traces;
1656 ua_sess->live_timer_interval = usess->live_timer_interval;
1657 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1658 &usess->metadata_attr);
1659
1660 switch (ua_sess->buffer_type) {
1661 case LTTNG_BUFFER_PER_PID:
1662 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1663 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
1664 datetime);
1665 break;
1666 case LTTNG_BUFFER_PER_UID:
1667 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
1668 DEFAULT_UST_TRACE_UID_PATH, ua_sess->uid, app->bits_per_long);
1669 break;
1670 default:
1671 assert(0);
1672 goto error;
1673 }
1674 if (ret < 0) {
1675 PERROR("asprintf UST shadow copy session");
1676 assert(0);
1677 goto error;
1678 }
1679
1680 /* Iterate over all channels in global domain. */
1681 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &iter.iter,
1682 uchan, node.node) {
1683 struct lttng_ht_iter uiter;
1684
1685 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
1686 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
1687 if (ua_chan_node != NULL) {
1688 /* Session exist. Contiuing. */
1689 continue;
1690 }
1691
1692 DBG2("Channel %s not found on shadow session copy, creating it",
1693 uchan->name);
1694 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
1695 if (ua_chan == NULL) {
1696 /* malloc failed FIXME: Might want to do handle ENOMEM .. */
1697 continue;
1698 }
1699 shadow_copy_channel(ua_chan, uchan);
1700 /*
1701 * The concept of metadata channel does not exist on the tracing
1702 * registry side of the session daemon so this can only be a per CPU
1703 * channel and not metadata.
1704 */
1705 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
1706
1707 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
1708 }
1709 return;
1710
1711 error:
1712 consumer_output_put(ua_sess->consumer);
1713 }
1714
1715 /*
1716 * Lookup sesison wrapper.
1717 */
1718 static
1719 void __lookup_session_by_app(struct ltt_ust_session *usess,
1720 struct ust_app *app, struct lttng_ht_iter *iter)
1721 {
1722 /* Get right UST app session from app */
1723 lttng_ht_lookup(app->sessions, &usess->id, iter);
1724 }
1725
1726 /*
1727 * Return ust app session from the app session hashtable using the UST session
1728 * id.
1729 */
1730 static struct ust_app_session *lookup_session_by_app(
1731 struct ltt_ust_session *usess, struct ust_app *app)
1732 {
1733 struct lttng_ht_iter iter;
1734 struct lttng_ht_node_u64 *node;
1735
1736 __lookup_session_by_app(usess, app, &iter);
1737 node = lttng_ht_iter_get_node_u64(&iter);
1738 if (node == NULL) {
1739 goto error;
1740 }
1741
1742 return caa_container_of(node, struct ust_app_session, node);
1743
1744 error:
1745 return NULL;
1746 }
1747
1748 /*
1749 * Setup buffer registry per PID for the given session and application. If none
1750 * is found, a new one is created, added to the global registry and
1751 * initialized. If regp is valid, it's set with the newly created object.
1752 *
1753 * Return 0 on success or else a negative value.
1754 */
1755 static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1756 struct ust_app *app, struct buffer_reg_pid **regp)
1757 {
1758 int ret = 0;
1759 struct buffer_reg_pid *reg_pid;
1760
1761 assert(ua_sess);
1762 assert(app);
1763
1764 rcu_read_lock();
1765
1766 reg_pid = buffer_reg_pid_find(ua_sess->id);
1767 if (!reg_pid) {
1768 /*
1769 * This is the create channel path meaning that if there is NO
1770 * registry available, we have to create one for this session.
1771 */
1772 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid);
1773 if (ret < 0) {
1774 goto error;
1775 }
1776 buffer_reg_pid_add(reg_pid);
1777 } else {
1778 goto end;
1779 }
1780
1781 /* Initialize registry. */
1782 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1783 app->bits_per_long, app->uint8_t_alignment,
1784 app->uint16_t_alignment, app->uint32_t_alignment,
1785 app->uint64_t_alignment, app->long_alignment,
1786 app->byte_order, app->version.major,
1787 app->version.minor);
1788 if (ret < 0) {
1789 goto error;
1790 }
1791
1792 DBG3("UST app buffer registry per PID created successfully");
1793
1794 end:
1795 if (regp) {
1796 *regp = reg_pid;
1797 }
1798 error:
1799 rcu_read_unlock();
1800 return ret;
1801 }
1802
1803 /*
1804 * Setup buffer registry per UID for the given session and application. If none
1805 * is found, a new one is created, added to the global registry and
1806 * initialized. If regp is valid, it's set with the newly created object.
1807 *
1808 * Return 0 on success or else a negative value.
1809 */
1810 static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
1811 struct ust_app *app, struct buffer_reg_uid **regp)
1812 {
1813 int ret = 0;
1814 struct buffer_reg_uid *reg_uid;
1815
1816 assert(usess);
1817 assert(app);
1818
1819 rcu_read_lock();
1820
1821 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
1822 if (!reg_uid) {
1823 /*
1824 * This is the create channel path meaning that if there is NO
1825 * registry available, we have to create one for this session.
1826 */
1827 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
1828 LTTNG_DOMAIN_UST, &reg_uid);
1829 if (ret < 0) {
1830 goto error;
1831 }
1832 buffer_reg_uid_add(reg_uid);
1833 } else {
1834 goto end;
1835 }
1836
1837 /* Initialize registry. */
1838 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
1839 app->bits_per_long, app->uint8_t_alignment,
1840 app->uint16_t_alignment, app->uint32_t_alignment,
1841 app->uint64_t_alignment, app->long_alignment,
1842 app->byte_order, app->version.major,
1843 app->version.minor);
1844 if (ret < 0) {
1845 goto error;
1846 }
1847 /* Add node to teardown list of the session. */
1848 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
1849
1850 DBG3("UST app buffer registry per UID created successfully");
1851
1852 end:
1853 if (regp) {
1854 *regp = reg_uid;
1855 }
1856 error:
1857 rcu_read_unlock();
1858 return ret;
1859 }
1860
1861 /*
1862 * Create a session on the tracer side for the given app.
1863 *
1864 * On success, ua_sess_ptr is populated with the session pointer or else left
1865 * untouched. If the session was created, is_created is set to 1. On error,
1866 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
1867 * be NULL.
1868 *
1869 * Returns 0 on success or else a negative code which is either -ENOMEM or
1870 * -ENOTCONN which is the default code if the ustctl_create_session fails.
1871 */
1872 static int create_ust_app_session(struct ltt_ust_session *usess,
1873 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
1874 int *is_created)
1875 {
1876 int ret, created = 0;
1877 struct ust_app_session *ua_sess;
1878
1879 assert(usess);
1880 assert(app);
1881 assert(ua_sess_ptr);
1882
1883 health_code_update();
1884
1885 ua_sess = lookup_session_by_app(usess, app);
1886 if (ua_sess == NULL) {
1887 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
1888 app->pid, usess->id);
1889 ua_sess = alloc_ust_app_session(app);
1890 if (ua_sess == NULL) {
1891 /* Only malloc can failed so something is really wrong */
1892 ret = -ENOMEM;
1893 goto error;
1894 }
1895 shadow_copy_session(ua_sess, usess, app);
1896 created = 1;
1897 }
1898
1899 switch (usess->buffer_type) {
1900 case LTTNG_BUFFER_PER_PID:
1901 /* Init local registry. */
1902 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
1903 if (ret < 0) {
1904 goto error;
1905 }
1906 break;
1907 case LTTNG_BUFFER_PER_UID:
1908 /* Look for a global registry. If none exists, create one. */
1909 ret = setup_buffer_reg_uid(usess, app, NULL);
1910 if (ret < 0) {
1911 goto error;
1912 }
1913 break;
1914 default:
1915 assert(0);
1916 ret = -EINVAL;
1917 goto error;
1918 }
1919
1920 health_code_update();
1921
1922 if (ua_sess->handle == -1) {
1923 ret = ustctl_create_session(app->sock);
1924 if (ret < 0) {
1925 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1926 ERR("Creating session for app pid %d with ret %d",
1927 app->pid, ret);
1928 } else {
1929 DBG("UST app creating session failed. Application is dead");
1930 /*
1931 * This is normal behavior, an application can die during the
1932 * creation process. Don't report an error so the execution can
1933 * continue normally. This will get flagged ENOTCONN and the
1934 * caller will handle it.
1935 */
1936 ret = 0;
1937 }
1938 delete_ust_app_session(-1, ua_sess, app);
1939 if (ret != -ENOMEM) {
1940 /*
1941 * Tracer is probably gone or got an internal error so let's
1942 * behave like it will soon unregister or not usable.
1943 */
1944 ret = -ENOTCONN;
1945 }
1946 goto error;
1947 }
1948
1949 ua_sess->handle = ret;
1950
1951 /* Add ust app session to app's HT */
1952 lttng_ht_node_init_u64(&ua_sess->node,
1953 ua_sess->tracing_id);
1954 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
1955
1956 DBG2("UST app session created successfully with handle %d", ret);
1957 }
1958
1959 *ua_sess_ptr = ua_sess;
1960 if (is_created) {
1961 *is_created = created;
1962 }
1963
1964 /* Everything went well. */
1965 ret = 0;
1966
1967 error:
1968 health_code_update();
1969 return ret;
1970 }
1971
1972 /*
1973 * Match function for a hash table lookup of ust_app_ctx.
1974 *
1975 * It matches an ust app context based on the context type and, in the case
1976 * of perf counters, their name.
1977 */
1978 static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
1979 {
1980 struct ust_app_ctx *ctx;
1981 const struct lttng_ust_context *key;
1982
1983 assert(node);
1984 assert(_key);
1985
1986 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
1987 key = _key;
1988
1989 /* Context type */
1990 if (ctx->ctx.ctx != key->ctx) {
1991 goto no_match;
1992 }
1993
1994 /* Check the name in the case of perf thread counters. */
1995 if (key->ctx == LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER) {
1996 if (strncmp(key->u.perf_counter.name,
1997 ctx->ctx.u.perf_counter.name,
1998 sizeof(key->u.perf_counter.name))) {
1999 goto no_match;
2000 }
2001 }
2002
2003 /* Match. */
2004 return 1;
2005
2006 no_match:
2007 return 0;
2008 }
2009
2010 /*
2011 * Lookup for an ust app context from an lttng_ust_context.
2012 *
2013 * Must be called while holding RCU read side lock.
2014 * Return an ust_app_ctx object or NULL on error.
2015 */
2016 static
2017 struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
2018 struct lttng_ust_context *uctx)
2019 {
2020 struct lttng_ht_iter iter;
2021 struct lttng_ht_node_ulong *node;
2022 struct ust_app_ctx *app_ctx = NULL;
2023
2024 assert(uctx);
2025 assert(ht);
2026
2027 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2028 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2029 ht_match_ust_app_ctx, uctx, &iter.iter);
2030 node = lttng_ht_iter_get_node_ulong(&iter);
2031 if (!node) {
2032 goto end;
2033 }
2034
2035 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2036
2037 end:
2038 return app_ctx;
2039 }
2040
2041 /*
2042 * Create a context for the channel on the tracer.
2043 *
2044 * Called with UST app session lock held and a RCU read side lock.
2045 */
2046 static
2047 int create_ust_app_channel_context(struct ust_app_session *ua_sess,
2048 struct ust_app_channel *ua_chan, struct lttng_ust_context *uctx,
2049 struct ust_app *app)
2050 {
2051 int ret = 0;
2052 struct ust_app_ctx *ua_ctx;
2053
2054 DBG2("UST app adding context to channel %s", ua_chan->name);
2055
2056 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2057 if (ua_ctx) {
2058 ret = -EEXIST;
2059 goto error;
2060 }
2061
2062 ua_ctx = alloc_ust_app_ctx(uctx);
2063 if (ua_ctx == NULL) {
2064 /* malloc failed */
2065 ret = -1;
2066 goto error;
2067 }
2068
2069 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
2070 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
2071 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
2072
2073 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2074 if (ret < 0) {
2075 goto error;
2076 }
2077
2078 error:
2079 return ret;
2080 }
2081
2082 /*
2083 * Enable on the tracer side a ust app event for the session and channel.
2084 *
2085 * Called with UST app session lock held.
2086 */
2087 static
2088 int enable_ust_app_event(struct ust_app_session *ua_sess,
2089 struct ust_app_event *ua_event, struct ust_app *app)
2090 {
2091 int ret;
2092
2093 ret = enable_ust_event(app, ua_sess, ua_event);
2094 if (ret < 0) {
2095 goto error;
2096 }
2097
2098 ua_event->enabled = 1;
2099
2100 error:
2101 return ret;
2102 }
2103
2104 /*
2105 * Disable on the tracer side a ust app event for the session and channel.
2106 */
2107 static int disable_ust_app_event(struct ust_app_session *ua_sess,
2108 struct ust_app_event *ua_event, struct ust_app *app)
2109 {
2110 int ret;
2111
2112 ret = disable_ust_event(app, ua_sess, ua_event);
2113 if (ret < 0) {
2114 goto error;
2115 }
2116
2117 ua_event->enabled = 0;
2118
2119 error:
2120 return ret;
2121 }
2122
2123 /*
2124 * Lookup ust app channel for session and disable it on the tracer side.
2125 */
2126 static
2127 int disable_ust_app_channel(struct ust_app_session *ua_sess,
2128 struct ust_app_channel *ua_chan, struct ust_app *app)
2129 {
2130 int ret;
2131
2132 ret = disable_ust_channel(app, ua_sess, ua_chan);
2133 if (ret < 0) {
2134 goto error;
2135 }
2136
2137 ua_chan->enabled = 0;
2138
2139 error:
2140 return ret;
2141 }
2142
2143 /*
2144 * Lookup ust app channel for session and enable it on the tracer side. This
2145 * MUST be called with a RCU read side lock acquired.
2146 */
2147 static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2148 struct ltt_ust_channel *uchan, struct ust_app *app)
2149 {
2150 int ret = 0;
2151 struct lttng_ht_iter iter;
2152 struct lttng_ht_node_str *ua_chan_node;
2153 struct ust_app_channel *ua_chan;
2154
2155 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2156 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2157 if (ua_chan_node == NULL) {
2158 DBG2("Unable to find channel %s in ust session id %" PRIu64,
2159 uchan->name, ua_sess->tracing_id);
2160 goto error;
2161 }
2162
2163 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2164
2165 ret = enable_ust_channel(app, ua_sess, ua_chan);
2166 if (ret < 0) {
2167 goto error;
2168 }
2169
2170 error:
2171 return ret;
2172 }
2173
2174 /*
2175 * Ask the consumer to create a channel and get it if successful.
2176 *
2177 * Return 0 on success or else a negative value.
2178 */
2179 static int do_consumer_create_channel(struct ltt_ust_session *usess,
2180 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
2181 int bitness, struct ust_registry_session *registry)
2182 {
2183 int ret;
2184 unsigned int nb_fd = 0;
2185 struct consumer_socket *socket;
2186
2187 assert(usess);
2188 assert(ua_sess);
2189 assert(ua_chan);
2190 assert(registry);
2191
2192 rcu_read_lock();
2193 health_code_update();
2194
2195 /* Get the right consumer socket for the application. */
2196 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2197 if (!socket) {
2198 ret = -EINVAL;
2199 goto error;
2200 }
2201
2202 health_code_update();
2203
2204 /* Need one fd for the channel. */
2205 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2206 if (ret < 0) {
2207 ERR("Exhausted number of available FD upon create channel");
2208 goto error;
2209 }
2210
2211 /*
2212 * Ask consumer to create channel. The consumer will return the number of
2213 * stream we have to expect.
2214 */
2215 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
2216 registry);
2217 if (ret < 0) {
2218 goto error_ask;
2219 }
2220
2221 /*
2222 * Compute the number of fd needed before receiving them. It must be 2 per
2223 * stream (2 being the default value here).
2224 */
2225 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2226
2227 /* Reserve the amount of file descriptor we need. */
2228 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2229 if (ret < 0) {
2230 ERR("Exhausted number of available FD upon create channel");
2231 goto error_fd_get_stream;
2232 }
2233
2234 health_code_update();
2235
2236 /*
2237 * Now get the channel from the consumer. This call wil populate the stream
2238 * list of that channel and set the ust objects.
2239 */
2240 if (usess->consumer->enabled) {
2241 ret = ust_consumer_get_channel(socket, ua_chan);
2242 if (ret < 0) {
2243 goto error_destroy;
2244 }
2245 }
2246
2247 rcu_read_unlock();
2248 return 0;
2249
2250 error_destroy:
2251 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2252 error_fd_get_stream:
2253 /*
2254 * Initiate a destroy channel on the consumer since we had an error
2255 * handling it on our side. The return value is of no importance since we
2256 * already have a ret value set by the previous error that we need to
2257 * return.
2258 */
2259 (void) ust_consumer_destroy_channel(socket, ua_chan);
2260 error_ask:
2261 lttng_fd_put(LTTNG_FD_APPS, 1);
2262 error:
2263 health_code_update();
2264 rcu_read_unlock();
2265 return ret;
2266 }
2267
2268 /*
2269 * Duplicate the ust data object of the ust app stream and save it in the
2270 * buffer registry stream.
2271 *
2272 * Return 0 on success or else a negative value.
2273 */
2274 static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2275 struct ust_app_stream *stream)
2276 {
2277 int ret;
2278
2279 assert(reg_stream);
2280 assert(stream);
2281
2282 /* Reserve the amount of file descriptor we need. */
2283 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2284 if (ret < 0) {
2285 ERR("Exhausted number of available FD upon duplicate stream");
2286 goto error;
2287 }
2288
2289 /* Duplicate object for stream once the original is in the registry. */
2290 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2291 reg_stream->obj.ust);
2292 if (ret < 0) {
2293 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2294 reg_stream->obj.ust, stream->obj, ret);
2295 lttng_fd_put(LTTNG_FD_APPS, 2);
2296 goto error;
2297 }
2298 stream->handle = stream->obj->handle;
2299
2300 error:
2301 return ret;
2302 }
2303
2304 /*
2305 * Duplicate the ust data object of the ust app. channel and save it in the
2306 * buffer registry channel.
2307 *
2308 * Return 0 on success or else a negative value.
2309 */
2310 static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2311 struct ust_app_channel *ua_chan)
2312 {
2313 int ret;
2314
2315 assert(reg_chan);
2316 assert(ua_chan);
2317
2318 /* Need two fds for the channel. */
2319 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2320 if (ret < 0) {
2321 ERR("Exhausted number of available FD upon duplicate channel");
2322 goto error_fd_get;
2323 }
2324
2325 /* Duplicate object for stream once the original is in the registry. */
2326 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2327 if (ret < 0) {
2328 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2329 reg_chan->obj.ust, ua_chan->obj, ret);
2330 goto error;
2331 }
2332 ua_chan->handle = ua_chan->obj->handle;
2333
2334 return 0;
2335
2336 error:
2337 lttng_fd_put(LTTNG_FD_APPS, 1);
2338 error_fd_get:
2339 return ret;
2340 }
2341
2342 /*
2343 * For a given channel buffer registry, setup all streams of the given ust
2344 * application channel.
2345 *
2346 * Return 0 on success or else a negative value.
2347 */
2348 static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
2349 struct ust_app_channel *ua_chan)
2350 {
2351 int ret = 0;
2352 struct ust_app_stream *stream, *stmp;
2353
2354 assert(reg_chan);
2355 assert(ua_chan);
2356
2357 DBG2("UST app setup buffer registry stream");
2358
2359 /* Send all streams to application. */
2360 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2361 struct buffer_reg_stream *reg_stream;
2362
2363 ret = buffer_reg_stream_create(&reg_stream);
2364 if (ret < 0) {
2365 goto error;
2366 }
2367
2368 /*
2369 * Keep original pointer and nullify it in the stream so the delete
2370 * stream call does not release the object.
2371 */
2372 reg_stream->obj.ust = stream->obj;
2373 stream->obj = NULL;
2374 buffer_reg_stream_add(reg_stream, reg_chan);
2375
2376 /* We don't need the streams anymore. */
2377 cds_list_del(&stream->list);
2378 delete_ust_app_stream(-1, stream);
2379 }
2380
2381 error:
2382 return ret;
2383 }
2384
2385 /*
2386 * Create a buffer registry channel for the given session registry and
2387 * application channel object. If regp pointer is valid, it's set with the
2388 * created object. Important, the created object is NOT added to the session
2389 * registry hash table.
2390 *
2391 * Return 0 on success else a negative value.
2392 */
2393 static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2394 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2395 {
2396 int ret;
2397 struct buffer_reg_channel *reg_chan = NULL;
2398
2399 assert(reg_sess);
2400 assert(ua_chan);
2401
2402 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2403
2404 /* Create buffer registry channel. */
2405 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2406 if (ret < 0) {
2407 goto error_create;
2408 }
2409 assert(reg_chan);
2410 reg_chan->consumer_key = ua_chan->key;
2411 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
2412 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
2413
2414 /* Create and add a channel registry to session. */
2415 ret = ust_registry_channel_add(reg_sess->reg.ust,
2416 ua_chan->tracing_channel_id);
2417 if (ret < 0) {
2418 goto error;
2419 }
2420 buffer_reg_channel_add(reg_sess, reg_chan);
2421
2422 if (regp) {
2423 *regp = reg_chan;
2424 }
2425
2426 return 0;
2427
2428 error:
2429 /* Safe because the registry channel object was not added to any HT. */
2430 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2431 error_create:
2432 return ret;
2433 }
2434
2435 /*
2436 * Setup buffer registry channel for the given session registry and application
2437 * channel object. If regp pointer is valid, it's set with the created object.
2438 *
2439 * Return 0 on success else a negative value.
2440 */
2441 static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2442 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan)
2443 {
2444 int ret;
2445
2446 assert(reg_sess);
2447 assert(reg_chan);
2448 assert(ua_chan);
2449 assert(ua_chan->obj);
2450
2451 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
2452
2453 /* Setup all streams for the registry. */
2454 ret = setup_buffer_reg_streams(reg_chan, ua_chan);
2455 if (ret < 0) {
2456 goto error;
2457 }
2458
2459 reg_chan->obj.ust = ua_chan->obj;
2460 ua_chan->obj = NULL;
2461
2462 return 0;
2463
2464 error:
2465 buffer_reg_channel_remove(reg_sess, reg_chan);
2466 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2467 return ret;
2468 }
2469
2470 /*
2471 * Send buffer registry channel to the application.
2472 *
2473 * Return 0 on success else a negative value.
2474 */
2475 static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2476 struct ust_app *app, struct ust_app_session *ua_sess,
2477 struct ust_app_channel *ua_chan)
2478 {
2479 int ret;
2480 struct buffer_reg_stream *reg_stream;
2481
2482 assert(reg_chan);
2483 assert(app);
2484 assert(ua_sess);
2485 assert(ua_chan);
2486
2487 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2488
2489 ret = duplicate_channel_object(reg_chan, ua_chan);
2490 if (ret < 0) {
2491 goto error;
2492 }
2493
2494 /* Send channel to the application. */
2495 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
2496 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2497 ret = -ENOTCONN; /* Caused by app exiting. */
2498 goto error;
2499 } else if (ret < 0) {
2500 goto error;
2501 }
2502
2503 health_code_update();
2504
2505 /* Send all streams to application. */
2506 pthread_mutex_lock(&reg_chan->stream_list_lock);
2507 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2508 struct ust_app_stream stream;
2509
2510 ret = duplicate_stream_object(reg_stream, &stream);
2511 if (ret < 0) {
2512 goto error_stream_unlock;
2513 }
2514
2515 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2516 if (ret < 0) {
2517 (void) release_ust_app_stream(-1, &stream);
2518 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2519 ret = -ENOTCONN; /* Caused by app exiting. */
2520 goto error_stream_unlock;
2521 } else if (ret < 0) {
2522 goto error_stream_unlock;
2523 }
2524 goto error_stream_unlock;
2525 }
2526
2527 /*
2528 * The return value is not important here. This function will output an
2529 * error if needed.
2530 */
2531 (void) release_ust_app_stream(-1, &stream);
2532 }
2533 ua_chan->is_sent = 1;
2534
2535 error_stream_unlock:
2536 pthread_mutex_unlock(&reg_chan->stream_list_lock);
2537 error:
2538 return ret;
2539 }
2540
2541 /*
2542 * Create and send to the application the created buffers with per UID buffers.
2543 *
2544 * Return 0 on success else a negative value.
2545 */
2546 static int create_channel_per_uid(struct ust_app *app,
2547 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2548 struct ust_app_channel *ua_chan)
2549 {
2550 int ret;
2551 struct buffer_reg_uid *reg_uid;
2552 struct buffer_reg_channel *reg_chan;
2553
2554 assert(app);
2555 assert(usess);
2556 assert(ua_sess);
2557 assert(ua_chan);
2558
2559 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2560
2561 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2562 /*
2563 * The session creation handles the creation of this global registry
2564 * object. If none can be find, there is a code flow problem or a
2565 * teardown race.
2566 */
2567 assert(reg_uid);
2568
2569 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2570 reg_uid);
2571 if (!reg_chan) {
2572 /* Create the buffer registry channel object. */
2573 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2574 if (ret < 0) {
2575 ERR("Error creating the UST channel \"%s\" registry instance",
2576 ua_chan->name);
2577 goto error;
2578 }
2579 assert(reg_chan);
2580
2581 /*
2582 * Create the buffers on the consumer side. This call populates the
2583 * ust app channel object with all streams and data object.
2584 */
2585 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2586 app->bits_per_long, reg_uid->registry->reg.ust);
2587 if (ret < 0) {
2588 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2589 ua_chan->name);
2590
2591 /*
2592 * Let's remove the previously created buffer registry channel so
2593 * it's not visible anymore in the session registry.
2594 */
2595 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2596 ua_chan->tracing_channel_id);
2597 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2598 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2599 goto error;
2600 }
2601
2602 /*
2603 * Setup the streams and add it to the session registry.
2604 */
2605 ret = setup_buffer_reg_channel(reg_uid->registry, ua_chan, reg_chan);
2606 if (ret < 0) {
2607 ERR("Error setting up UST channel \"%s\"",
2608 ua_chan->name);
2609 goto error;
2610 }
2611
2612 }
2613
2614 /* Send buffers to the application. */
2615 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2616 if (ret < 0) {
2617 if (ret != -ENOTCONN) {
2618 ERR("Error sending channel to application");
2619 }
2620 goto error;
2621 }
2622
2623 error:
2624 return ret;
2625 }
2626
2627 /*
2628 * Create and send to the application the created buffers with per PID buffers.
2629 *
2630 * Return 0 on success else a negative value.
2631 */
2632 static int create_channel_per_pid(struct ust_app *app,
2633 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2634 struct ust_app_channel *ua_chan)
2635 {
2636 int ret;
2637 struct ust_registry_session *registry;
2638
2639 assert(app);
2640 assert(usess);
2641 assert(ua_sess);
2642 assert(ua_chan);
2643
2644 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2645
2646 rcu_read_lock();
2647
2648 registry = get_session_registry(ua_sess);
2649 assert(registry);
2650
2651 /* Create and add a new channel registry to session. */
2652 ret = ust_registry_channel_add(registry, ua_chan->key);
2653 if (ret < 0) {
2654 ERR("Error creating the UST channel \"%s\" registry instance",
2655 ua_chan->name);
2656 goto error;
2657 }
2658
2659 /* Create and get channel on the consumer side. */
2660 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
2661 app->bits_per_long, registry);
2662 if (ret < 0) {
2663 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2664 ua_chan->name);
2665 goto error;
2666 }
2667
2668 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2669 if (ret < 0) {
2670 if (ret != -ENOTCONN) {
2671 ERR("Error sending channel to application");
2672 }
2673 goto error;
2674 }
2675
2676 error:
2677 rcu_read_unlock();
2678 return ret;
2679 }
2680
2681 /*
2682 * From an already allocated ust app channel, create the channel buffers if
2683 * need and send it to the application. This MUST be called with a RCU read
2684 * side lock acquired.
2685 *
2686 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2687 * the application exited concurrently.
2688 */
2689 static int do_create_channel(struct ust_app *app,
2690 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2691 struct ust_app_channel *ua_chan)
2692 {
2693 int ret;
2694
2695 assert(app);
2696 assert(usess);
2697 assert(ua_sess);
2698 assert(ua_chan);
2699
2700 /* Handle buffer type before sending the channel to the application. */
2701 switch (usess->buffer_type) {
2702 case LTTNG_BUFFER_PER_UID:
2703 {
2704 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
2705 if (ret < 0) {
2706 goto error;
2707 }
2708 break;
2709 }
2710 case LTTNG_BUFFER_PER_PID:
2711 {
2712 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
2713 if (ret < 0) {
2714 goto error;
2715 }
2716 break;
2717 }
2718 default:
2719 assert(0);
2720 ret = -EINVAL;
2721 goto error;
2722 }
2723
2724 /* Initialize ust objd object using the received handle and add it. */
2725 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
2726 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
2727
2728 /* If channel is not enabled, disable it on the tracer */
2729 if (!ua_chan->enabled) {
2730 ret = disable_ust_channel(app, ua_sess, ua_chan);
2731 if (ret < 0) {
2732 goto error;
2733 }
2734 }
2735
2736 error:
2737 return ret;
2738 }
2739
2740 /*
2741 * Create UST app channel and create it on the tracer. Set ua_chanp of the
2742 * newly created channel if not NULL.
2743 *
2744 * Called with UST app session lock and RCU read-side lock held.
2745 *
2746 * Return 0 on success or else a negative value. Returns -ENOTCONN if
2747 * the application exited concurrently.
2748 */
2749 static int create_ust_app_channel(struct ust_app_session *ua_sess,
2750 struct ltt_ust_channel *uchan, struct ust_app *app,
2751 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
2752 struct ust_app_channel **ua_chanp)
2753 {
2754 int ret = 0;
2755 struct lttng_ht_iter iter;
2756 struct lttng_ht_node_str *ua_chan_node;
2757 struct ust_app_channel *ua_chan;
2758
2759 /* Lookup channel in the ust app session */
2760 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2761 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2762 if (ua_chan_node != NULL) {
2763 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2764 goto end;
2765 }
2766
2767 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
2768 if (ua_chan == NULL) {
2769 /* Only malloc can fail here */
2770 ret = -ENOMEM;
2771 goto error_alloc;
2772 }
2773 shadow_copy_channel(ua_chan, uchan);
2774
2775 /* Set channel type. */
2776 ua_chan->attr.type = type;
2777
2778 ret = do_create_channel(app, usess, ua_sess, ua_chan);
2779 if (ret < 0) {
2780 goto error;
2781 }
2782
2783 DBG2("UST app create channel %s for PID %d completed", ua_chan->name,
2784 app->pid);
2785
2786 /* Only add the channel if successful on the tracer side. */
2787 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
2788
2789 end:
2790 if (ua_chanp) {
2791 *ua_chanp = ua_chan;
2792 }
2793
2794 /* Everything went well. */
2795 return 0;
2796
2797 error:
2798 delete_ust_app_channel(ua_chan->is_sent ? app->sock : -1, ua_chan, app);
2799 error_alloc:
2800 return ret;
2801 }
2802
2803 /*
2804 * Create UST app event and create it on the tracer side.
2805 *
2806 * Called with ust app session mutex held.
2807 */
2808 static
2809 int create_ust_app_event(struct ust_app_session *ua_sess,
2810 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
2811 struct ust_app *app)
2812 {
2813 int ret = 0;
2814 struct ust_app_event *ua_event;
2815
2816 /* Get event node */
2817 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
2818 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
2819 if (ua_event != NULL) {
2820 ret = -EEXIST;
2821 goto end;
2822 }
2823
2824 /* Does not exist so create one */
2825 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
2826 if (ua_event == NULL) {
2827 /* Only malloc can failed so something is really wrong */
2828 ret = -ENOMEM;
2829 goto end;
2830 }
2831 shadow_copy_event(ua_event, uevent);
2832
2833 /* Create it on the tracer side */
2834 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
2835 if (ret < 0) {
2836 /* Not found previously means that it does not exist on the tracer */
2837 assert(ret != -LTTNG_UST_ERR_EXIST);
2838 goto error;
2839 }
2840
2841 add_unique_ust_app_event(ua_chan, ua_event);
2842
2843 DBG2("UST app create event %s for PID %d completed", ua_event->name,
2844 app->pid);
2845
2846 end:
2847 return ret;
2848
2849 error:
2850 /* Valid. Calling here is already in a read side lock */
2851 delete_ust_app_event(-1, ua_event);
2852 return ret;
2853 }
2854
2855 /*
2856 * Create UST metadata and open it on the tracer side.
2857 *
2858 * Called with UST app session lock held and RCU read side lock.
2859 */
2860 static int create_ust_app_metadata(struct ust_app_session *ua_sess,
2861 struct ust_app *app, struct consumer_output *consumer)
2862 {
2863 int ret = 0;
2864 struct ust_app_channel *metadata;
2865 struct consumer_socket *socket;
2866 struct ust_registry_session *registry;
2867
2868 assert(ua_sess);
2869 assert(app);
2870 assert(consumer);
2871
2872 registry = get_session_registry(ua_sess);
2873 assert(registry);
2874
2875 pthread_mutex_lock(&registry->lock);
2876
2877 /* Metadata already exists for this registry or it was closed previously */
2878 if (registry->metadata_key || registry->metadata_closed) {
2879 ret = 0;
2880 goto error;
2881 }
2882
2883 /* Allocate UST metadata */
2884 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
2885 if (!metadata) {
2886 /* malloc() failed */
2887 ret = -ENOMEM;
2888 goto error;
2889 }
2890
2891 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
2892
2893 /* Need one fd for the channel. */
2894 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2895 if (ret < 0) {
2896 ERR("Exhausted number of available FD upon create metadata");
2897 goto error;
2898 }
2899
2900 /* Get the right consumer socket for the application. */
2901 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
2902 if (!socket) {
2903 ret = -EINVAL;
2904 goto error_consumer;
2905 }
2906
2907 /*
2908 * Keep metadata key so we can identify it on the consumer side. Assign it
2909 * to the registry *before* we ask the consumer so we avoid the race of the
2910 * consumer requesting the metadata and the ask_channel call on our side
2911 * did not returned yet.
2912 */
2913 registry->metadata_key = metadata->key;
2914
2915 /*
2916 * Ask the metadata channel creation to the consumer. The metadata object
2917 * will be created by the consumer and kept their. However, the stream is
2918 * never added or monitored until we do a first push metadata to the
2919 * consumer.
2920 */
2921 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
2922 registry);
2923 if (ret < 0) {
2924 /* Nullify the metadata key so we don't try to close it later on. */
2925 registry->metadata_key = 0;
2926 goto error_consumer;
2927 }
2928
2929 /*
2930 * The setup command will make the metadata stream be sent to the relayd,
2931 * if applicable, and the thread managing the metadatas. This is important
2932 * because after this point, if an error occurs, the only way the stream
2933 * can be deleted is to be monitored in the consumer.
2934 */
2935 ret = consumer_setup_metadata(socket, metadata->key);
2936 if (ret < 0) {
2937 /* Nullify the metadata key so we don't try to close it later on. */
2938 registry->metadata_key = 0;
2939 goto error_consumer;
2940 }
2941
2942 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
2943 metadata->key, app->pid);
2944
2945 error_consumer:
2946 lttng_fd_put(LTTNG_FD_APPS, 1);
2947 delete_ust_app_channel(-1, metadata, app);
2948 error:
2949 pthread_mutex_unlock(&registry->lock);
2950 return ret;
2951 }
2952
2953 /*
2954 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
2955 * acquired before calling this function.
2956 */
2957 struct ust_app *ust_app_find_by_pid(pid_t pid)
2958 {
2959 struct ust_app *app = NULL;
2960 struct lttng_ht_node_ulong *node;
2961 struct lttng_ht_iter iter;
2962
2963 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
2964 node = lttng_ht_iter_get_node_ulong(&iter);
2965 if (node == NULL) {
2966 DBG2("UST app no found with pid %d", pid);
2967 goto error;
2968 }
2969
2970 DBG2("Found UST app by pid %d", pid);
2971
2972 app = caa_container_of(node, struct ust_app, pid_n);
2973
2974 error:
2975 return app;
2976 }
2977
2978 /*
2979 * Allocate and init an UST app object using the registration information and
2980 * the command socket. This is called when the command socket connects to the
2981 * session daemon.
2982 *
2983 * The object is returned on success or else NULL.
2984 */
2985 struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
2986 {
2987 struct ust_app *lta = NULL;
2988
2989 assert(msg);
2990 assert(sock >= 0);
2991
2992 DBG3("UST app creating application for socket %d", sock);
2993
2994 if ((msg->bits_per_long == 64 &&
2995 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
2996 || (msg->bits_per_long == 32 &&
2997 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
2998 ERR("Registration failed: application \"%s\" (pid: %d) has "
2999 "%d-bit long, but no consumerd for this size is available.\n",
3000 msg->name, msg->pid, msg->bits_per_long);
3001 goto error;
3002 }
3003
3004 lta = zmalloc(sizeof(struct ust_app));
3005 if (lta == NULL) {
3006 PERROR("malloc");
3007 goto error;
3008 }
3009
3010 lta->ppid = msg->ppid;
3011 lta->uid = msg->uid;
3012 lta->gid = msg->gid;
3013
3014 lta->bits_per_long = msg->bits_per_long;
3015 lta->uint8_t_alignment = msg->uint8_t_alignment;
3016 lta->uint16_t_alignment = msg->uint16_t_alignment;
3017 lta->uint32_t_alignment = msg->uint32_t_alignment;
3018 lta->uint64_t_alignment = msg->uint64_t_alignment;
3019 lta->long_alignment = msg->long_alignment;
3020 lta->byte_order = msg->byte_order;
3021
3022 lta->v_major = msg->major;
3023 lta->v_minor = msg->minor;
3024 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
3025 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3026 lta->notify_sock = -1;
3027
3028 /* Copy name and make sure it's NULL terminated. */
3029 strncpy(lta->name, msg->name, sizeof(lta->name));
3030 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3031
3032 /*
3033 * Before this can be called, when receiving the registration information,
3034 * the application compatibility is checked. So, at this point, the
3035 * application can work with this session daemon.
3036 */
3037 lta->compatible = 1;
3038
3039 lta->pid = msg->pid;
3040 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
3041 lta->sock = sock;
3042 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
3043
3044 CDS_INIT_LIST_HEAD(&lta->teardown_head);
3045
3046 error:
3047 return lta;
3048 }
3049
3050 /*
3051 * For a given application object, add it to every hash table.
3052 */
3053 void ust_app_add(struct ust_app *app)
3054 {
3055 assert(app);
3056 assert(app->notify_sock >= 0);
3057
3058 rcu_read_lock();
3059
3060 /*
3061 * On a re-registration, we want to kick out the previous registration of
3062 * that pid
3063 */
3064 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
3065
3066 /*
3067 * The socket _should_ be unique until _we_ call close. So, a add_unique
3068 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3069 * already in the table.
3070 */
3071 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
3072
3073 /* Add application to the notify socket hash table. */
3074 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3075 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
3076
3077 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
3078 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3079 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3080 app->v_minor);
3081
3082 rcu_read_unlock();
3083 }
3084
3085 /*
3086 * Set the application version into the object.
3087 *
3088 * Return 0 on success else a negative value either an errno code or a
3089 * LTTng-UST error code.
3090 */
3091 int ust_app_version(struct ust_app *app)
3092 {
3093 int ret;
3094
3095 assert(app);
3096
3097 ret = ustctl_tracer_version(app->sock, &app->version);
3098 if (ret < 0) {
3099 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3100 ERR("UST app %d version failed with ret %d", app->sock, ret);
3101 } else {
3102 DBG3("UST app %d version failed. Application is dead", app->sock);
3103 }
3104 }
3105
3106 return ret;
3107 }
3108
3109 /*
3110 * Unregister app by removing it from the global traceable app list and freeing
3111 * the data struct.
3112 *
3113 * The socket is already closed at this point so no close to sock.
3114 */
3115 void ust_app_unregister(int sock)
3116 {
3117 struct ust_app *lta;
3118 struct lttng_ht_node_ulong *node;
3119 struct lttng_ht_iter ust_app_sock_iter;
3120 struct lttng_ht_iter iter;
3121 struct ust_app_session *ua_sess;
3122 int ret;
3123
3124 rcu_read_lock();
3125
3126 /* Get the node reference for a call_rcu */
3127 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3128 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
3129 assert(node);
3130
3131 lta = caa_container_of(node, struct ust_app, sock_n);
3132 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3133
3134 /*
3135 * For per-PID buffers, perform "push metadata" and flush all
3136 * application streams before removing app from hash tables,
3137 * ensuring proper behavior of data_pending check.
3138 * Remove sessions so they are not visible during deletion.
3139 */
3140 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3141 node.node) {
3142 struct ust_registry_session *registry;
3143
3144 ret = lttng_ht_del(lta->sessions, &iter);
3145 if (ret) {
3146 /* The session was already removed so scheduled for teardown. */
3147 continue;
3148 }
3149
3150 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3151 (void) ust_app_flush_app_session(lta, ua_sess);
3152 }
3153
3154 /*
3155 * Add session to list for teardown. This is safe since at this point we
3156 * are the only one using this list.
3157 */
3158 pthread_mutex_lock(&ua_sess->lock);
3159
3160 if (ua_sess->deleted) {
3161 pthread_mutex_unlock(&ua_sess->lock);
3162 continue;
3163 }
3164
3165 /*
3166 * Normally, this is done in the delete session process which is
3167 * executed in the call rcu below. However, upon registration we can't
3168 * afford to wait for the grace period before pushing data or else the
3169 * data pending feature can race between the unregistration and stop
3170 * command where the data pending command is sent *before* the grace
3171 * period ended.
3172 *
3173 * The close metadata below nullifies the metadata pointer in the
3174 * session so the delete session will NOT push/close a second time.
3175 */
3176 registry = get_session_registry(ua_sess);
3177 if (registry) {
3178 /* Push metadata for application before freeing the application. */
3179 (void) push_metadata(registry, ua_sess->consumer);
3180
3181 /*
3182 * Don't ask to close metadata for global per UID buffers. Close
3183 * metadata only on destroy trace session in this case. Also, the
3184 * previous push metadata could have flag the metadata registry to
3185 * close so don't send a close command if closed.
3186 */
3187 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
3188 /* And ask to close it for this session registry. */
3189 (void) close_metadata(registry, ua_sess->consumer);
3190 }
3191 }
3192 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
3193
3194 pthread_mutex_unlock(&ua_sess->lock);
3195 }
3196
3197 /* Remove application from PID hash table */
3198 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3199 assert(!ret);
3200
3201 /*
3202 * Remove application from notify hash table. The thread handling the
3203 * notify socket could have deleted the node so ignore on error because
3204 * either way it's valid. The close of that socket is handled by the other
3205 * thread.
3206 */
3207 iter.iter.node = &lta->notify_sock_n.node;
3208 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3209
3210 /*
3211 * Ignore return value since the node might have been removed before by an
3212 * add replace during app registration because the PID can be reassigned by
3213 * the OS.
3214 */
3215 iter.iter.node = &lta->pid_n.node;
3216 ret = lttng_ht_del(ust_app_ht, &iter);
3217 if (ret) {
3218 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3219 lta->pid);
3220 }
3221
3222 /* Free memory */
3223 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3224
3225 rcu_read_unlock();
3226 return;
3227 }
3228
3229 /*
3230 * Fill events array with all events name of all registered apps.
3231 */
3232 int ust_app_list_events(struct lttng_event **events)
3233 {
3234 int ret, handle;
3235 size_t nbmem, count = 0;
3236 struct lttng_ht_iter iter;
3237 struct ust_app *app;
3238 struct lttng_event *tmp_event;
3239
3240 nbmem = UST_APP_EVENT_LIST_SIZE;
3241 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3242 if (tmp_event == NULL) {
3243 PERROR("zmalloc ust app events");
3244 ret = -ENOMEM;
3245 goto error;
3246 }
3247
3248 rcu_read_lock();
3249
3250 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3251 struct lttng_ust_tracepoint_iter uiter;
3252
3253 health_code_update();
3254
3255 if (!app->compatible) {
3256 /*
3257 * TODO: In time, we should notice the caller of this error by
3258 * telling him that this is a version error.
3259 */
3260 continue;
3261 }
3262 handle = ustctl_tracepoint_list(app->sock);
3263 if (handle < 0) {
3264 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3265 ERR("UST app list events getting handle failed for app pid %d",
3266 app->pid);
3267 }
3268 continue;
3269 }
3270
3271 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
3272 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3273 /* Handle ustctl error. */
3274 if (ret < 0) {
3275 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3276 ERR("UST app tp list get failed for app %d with ret %d",
3277 app->sock, ret);
3278 } else {
3279 DBG3("UST app tp list get failed. Application is dead");
3280 /*
3281 * This is normal behavior, an application can die during the
3282 * creation process. Don't report an error so the execution can
3283 * continue normally. Continue normal execution.
3284 */
3285 break;
3286 }
3287 free(tmp_event);
3288 goto rcu_error;
3289 }
3290
3291 health_code_update();
3292 if (count >= nbmem) {
3293 /* In case the realloc fails, we free the memory */
3294 struct lttng_event *new_tmp_event;
3295 size_t new_nbmem;
3296
3297 new_nbmem = nbmem << 1;
3298 DBG2("Reallocating event list from %zu to %zu entries",
3299 nbmem, new_nbmem);
3300 new_tmp_event = realloc(tmp_event,
3301 new_nbmem * sizeof(struct lttng_event));
3302 if (new_tmp_event == NULL) {
3303 PERROR("realloc ust app events");
3304 free(tmp_event);
3305 ret = -ENOMEM;
3306 goto rcu_error;
3307 }
3308 /* Zero the new memory */
3309 memset(new_tmp_event + nbmem, 0,
3310 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3311 nbmem = new_nbmem;
3312 tmp_event = new_tmp_event;
3313 }
3314 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3315 tmp_event[count].loglevel = uiter.loglevel;
3316 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3317 tmp_event[count].pid = app->pid;
3318 tmp_event[count].enabled = -1;
3319 count++;
3320 }
3321 }
3322
3323 ret = count;
3324 *events = tmp_event;
3325
3326 DBG2("UST app list events done (%zu events)", count);
3327
3328 rcu_error:
3329 rcu_read_unlock();
3330 error:
3331 health_code_update();
3332 return ret;
3333 }
3334
3335 /*
3336 * Fill events array with all events name of all registered apps.
3337 */
3338 int ust_app_list_event_fields(struct lttng_event_field **fields)
3339 {
3340 int ret, handle;
3341 size_t nbmem, count = 0;
3342 struct lttng_ht_iter iter;
3343 struct ust_app *app;
3344 struct lttng_event_field *tmp_event;
3345
3346 nbmem = UST_APP_EVENT_LIST_SIZE;
3347 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3348 if (tmp_event == NULL) {
3349 PERROR("zmalloc ust app event fields");
3350 ret = -ENOMEM;
3351 goto error;
3352 }
3353
3354 rcu_read_lock();
3355
3356 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3357 struct lttng_ust_field_iter uiter;
3358
3359 health_code_update();
3360
3361 if (!app->compatible) {
3362 /*
3363 * TODO: In time, we should notice the caller of this error by
3364 * telling him that this is a version error.
3365 */
3366 continue;
3367 }
3368 handle = ustctl_tracepoint_field_list(app->sock);
3369 if (handle < 0) {
3370 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3371 ERR("UST app list field getting handle failed for app pid %d",
3372 app->pid);
3373 }
3374 continue;
3375 }
3376
3377 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
3378 &uiter)) != -LTTNG_UST_ERR_NOENT) {
3379 /* Handle ustctl error. */
3380 if (ret < 0) {
3381 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
3382 ERR("UST app tp list field failed for app %d with ret %d",
3383 app->sock, ret);
3384 } else {
3385 DBG3("UST app tp list field failed. Application is dead");
3386 /*
3387 * This is normal behavior, an application can die during the
3388 * creation process. Don't report an error so the execution can
3389 * continue normally. Reset list and count for next app.
3390 */
3391 break;
3392 }
3393 free(tmp_event);
3394 goto rcu_error;
3395 }
3396
3397 health_code_update();
3398 if (count >= nbmem) {
3399 /* In case the realloc fails, we free the memory */
3400 struct lttng_event_field *new_tmp_event;
3401 size_t new_nbmem;
3402
3403 new_nbmem = nbmem << 1;
3404 DBG2("Reallocating event field list from %zu to %zu entries",
3405 nbmem, new_nbmem);
3406 new_tmp_event = realloc(tmp_event,
3407 new_nbmem * sizeof(struct lttng_event_field));
3408 if (new_tmp_event == NULL) {
3409 PERROR("realloc ust app event fields");
3410 free(tmp_event);
3411 ret = -ENOMEM;
3412 goto rcu_error;
3413 }
3414 /* Zero the new memory */
3415 memset(new_tmp_event + nbmem, 0,
3416 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3417 nbmem = new_nbmem;
3418 tmp_event = new_tmp_event;
3419 }
3420
3421 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
3422 /* Mapping between these enums matches 1 to 1. */
3423 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
3424 tmp_event[count].nowrite = uiter.nowrite;
3425
3426 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3427 tmp_event[count].event.loglevel = uiter.loglevel;
3428 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
3429 tmp_event[count].event.pid = app->pid;
3430 tmp_event[count].event.enabled = -1;
3431 count++;
3432 }
3433 }
3434
3435 ret = count;
3436 *fields = tmp_event;
3437
3438 DBG2("UST app list event fields done (%zu events)", count);
3439
3440 rcu_error:
3441 rcu_read_unlock();
3442 error:
3443 health_code_update();
3444 return ret;
3445 }
3446
3447 /*
3448 * Free and clean all traceable apps of the global list.
3449 *
3450 * Should _NOT_ be called with RCU read-side lock held.
3451 */
3452 void ust_app_clean_list(void)
3453 {
3454 int ret;
3455 struct ust_app *app;
3456 struct lttng_ht_iter iter;
3457
3458 DBG2("UST app cleaning registered apps hash table");
3459
3460 rcu_read_lock();
3461
3462 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3463 ret = lttng_ht_del(ust_app_ht, &iter);
3464 assert(!ret);
3465 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3466 }
3467
3468 /* Cleanup socket hash table */
3469 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3470 sock_n.node) {
3471 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3472 assert(!ret);
3473 }
3474
3475 /* Cleanup notify socket hash table */
3476 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3477 notify_sock_n.node) {
3478 ret = lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3479 assert(!ret);
3480 }
3481 rcu_read_unlock();
3482
3483 /* Destroy is done only when the ht is empty */
3484 ht_cleanup_push(ust_app_ht);
3485 ht_cleanup_push(ust_app_ht_by_sock);
3486 ht_cleanup_push(ust_app_ht_by_notify_sock);
3487 }
3488
3489 /*
3490 * Init UST app hash table.
3491 */
3492 void ust_app_ht_alloc(void)
3493 {
3494 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3495 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3496 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
3497 }
3498
3499 /*
3500 * For a specific UST session, disable the channel for all registered apps.
3501 */
3502 int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
3503 struct ltt_ust_channel *uchan)
3504 {
3505 int ret = 0;
3506 struct lttng_ht_iter iter;
3507 struct lttng_ht_node_str *ua_chan_node;
3508 struct ust_app *app;
3509 struct ust_app_session *ua_sess;
3510 struct ust_app_channel *ua_chan;
3511
3512 if (usess == NULL || uchan == NULL) {
3513 ERR("Disabling UST global channel with NULL values");
3514 ret = -1;
3515 goto error;
3516 }
3517
3518 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
3519 uchan->name, usess->id);
3520
3521 rcu_read_lock();
3522
3523 /* For every registered applications */
3524 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3525 struct lttng_ht_iter uiter;
3526 if (!app->compatible) {
3527 /*
3528 * TODO: In time, we should notice the caller of this error by
3529 * telling him that this is a version error.
3530 */
3531 continue;
3532 }
3533 ua_sess = lookup_session_by_app(usess, app);
3534 if (ua_sess == NULL) {
3535 continue;
3536 }
3537
3538 /* Get channel */
3539 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3540 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3541 /* If the session if found for the app, the channel must be there */
3542 assert(ua_chan_node);
3543
3544 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3545 /* The channel must not be already disabled */
3546 assert(ua_chan->enabled == 1);
3547
3548 /* Disable channel onto application */
3549 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
3550 if (ret < 0) {
3551 /* XXX: We might want to report this error at some point... */
3552 continue;
3553 }
3554 }
3555
3556 rcu_read_unlock();
3557
3558 error:
3559 return ret;
3560 }
3561
3562 /*
3563 * For a specific UST session, enable the channel for all registered apps.
3564 */
3565 int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
3566 struct ltt_ust_channel *uchan)
3567 {
3568 int ret = 0;
3569 struct lttng_ht_iter iter;
3570 struct ust_app *app;
3571 struct ust_app_session *ua_sess;
3572
3573 if (usess == NULL || uchan == NULL) {
3574 ERR("Adding UST global channel to NULL values");
3575 ret = -1;
3576 goto error;
3577 }
3578
3579 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
3580 uchan->name, usess->id);
3581
3582 rcu_read_lock();
3583
3584 /* For every registered applications */
3585 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3586 if (!app->compatible) {
3587 /*
3588 * TODO: In time, we should notice the caller of this error by
3589 * telling him that this is a version error.
3590 */
3591 continue;
3592 }
3593 ua_sess = lookup_session_by_app(usess, app);
3594 if (ua_sess == NULL) {
3595 continue;
3596 }
3597
3598 /* Enable channel onto application */
3599 ret = enable_ust_app_channel(ua_sess, uchan, app);
3600 if (ret < 0) {
3601 /* XXX: We might want to report this error at some point... */
3602 continue;
3603 }
3604 }
3605
3606 rcu_read_unlock();
3607
3608 error:
3609 return ret;
3610 }
3611
3612 /*
3613 * Disable an event in a channel and for a specific session.
3614 */
3615 int ust_app_disable_event_glb(struct ltt_ust_session *usess,
3616 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3617 {
3618 int ret = 0;
3619 struct lttng_ht_iter iter, uiter;
3620 struct lttng_ht_node_str *ua_chan_node, *ua_event_node;
3621 struct ust_app *app;
3622 struct ust_app_session *ua_sess;
3623 struct ust_app_channel *ua_chan;
3624 struct ust_app_event *ua_event;
3625
3626 DBG("UST app disabling event %s for all apps in channel "
3627 "%s for session id %" PRIu64,
3628 uevent->attr.name, uchan->name, usess->id);
3629
3630 rcu_read_lock();
3631
3632 /* For all registered applications */
3633 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3634 if (!app->compatible) {
3635 /*
3636 * TODO: In time, we should notice the caller of this error by
3637 * telling him that this is a version error.
3638 */
3639 continue;
3640 }
3641 ua_sess = lookup_session_by_app(usess, app);
3642 if (ua_sess == NULL) {
3643 /* Next app */
3644 continue;
3645 }
3646
3647 /* Lookup channel in the ust app session */
3648 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3649 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3650 if (ua_chan_node == NULL) {
3651 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
3652 "Skipping", uchan->name, usess->id, app->pid);
3653 continue;
3654 }
3655 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3656
3657 lttng_ht_lookup(ua_chan->events, (void *)uevent->attr.name, &uiter);
3658 ua_event_node = lttng_ht_iter_get_node_str(&uiter);
3659 if (ua_event_node == NULL) {
3660 DBG2("Event %s not found in channel %s for app pid %d."
3661 "Skipping", uevent->attr.name, uchan->name, app->pid);
3662 continue;
3663 }
3664 ua_event = caa_container_of(ua_event_node, struct ust_app_event, node);
3665
3666 ret = disable_ust_app_event(ua_sess, ua_event, app);
3667 if (ret < 0) {
3668 /* XXX: Report error someday... */
3669 continue;
3670 }
3671 }
3672
3673 rcu_read_unlock();
3674
3675 return ret;
3676 }
3677
3678 /*
3679 * For a specific UST session, create the channel for all registered apps.
3680 */
3681 int ust_app_create_channel_glb(struct ltt_ust_session *usess,
3682 struct ltt_ust_channel *uchan)
3683 {
3684 int ret = 0, created;
3685 struct lttng_ht_iter iter;
3686 struct ust_app *app;
3687 struct ust_app_session *ua_sess = NULL;
3688
3689 /* Very wrong code flow */
3690 assert(usess);
3691 assert(uchan);
3692
3693 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
3694 uchan->name, usess->id);
3695
3696 rcu_read_lock();
3697
3698 /* For every registered applications */
3699 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3700 if (!app->compatible) {
3701 /*
3702 * TODO: In time, we should notice the caller of this error by
3703 * telling him that this is a version error.
3704 */
3705 continue;
3706 }
3707 /*
3708 * Create session on the tracer side and add it to app session HT. Note
3709 * that if session exist, it will simply return a pointer to the ust
3710 * app session.
3711 */
3712 ret = create_ust_app_session(usess, app, &ua_sess, &created);
3713 if (ret < 0) {
3714 switch (ret) {
3715 case -ENOTCONN:
3716 /*
3717 * The application's socket is not valid. Either a bad socket
3718 * or a timeout on it. We can't inform the caller that for a
3719 * specific app, the session failed so lets continue here.
3720 */
3721 ret = 0; /* Not an error. */
3722 continue;
3723 case -ENOMEM:
3724 default:
3725 goto error_rcu_unlock;
3726 }
3727 }
3728 assert(ua_sess);
3729
3730 pthread_mutex_lock(&ua_sess->lock);
3731
3732 if (ua_sess->deleted) {
3733 pthread_mutex_unlock(&ua_sess->lock);
3734 continue;
3735 }
3736
3737 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
3738 sizeof(uchan->name))) {
3739 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr, &uchan->attr);
3740 ret = 0;
3741 } else {
3742 /* Create channel onto application. We don't need the chan ref. */
3743 ret = create_ust_app_channel(ua_sess, uchan, app,
3744 LTTNG_UST_CHAN_PER_CPU, usess, NULL);
3745 }
3746 pthread_mutex_unlock(&ua_sess->lock);
3747 if (ret < 0) {
3748 /* Cleanup the created session if it's the case. */
3749 if (created) {
3750 destroy_app_session(app, ua_sess);
3751 }
3752 switch (ret) {
3753 case -ENOTCONN:
3754 /*
3755 * The application's socket is not valid. Either a bad socket
3756 * or a timeout on it. We can't inform the caller that for a
3757 * specific app, the session failed so lets continue here.
3758 */
3759 ret = 0; /* Not an error. */
3760 continue;
3761 case -ENOMEM:
3762 default:
3763 goto error_rcu_unlock;
3764 }
3765 }
3766 }
3767
3768 error_rcu_unlock:
3769 rcu_read_unlock();
3770 return ret;
3771 }
3772
3773 /*
3774 * Enable event for a specific session and channel on the tracer.
3775 */
3776 int ust_app_enable_event_glb(struct ltt_ust_session *usess,
3777 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3778 {
3779 int ret = 0;
3780 struct lttng_ht_iter iter, uiter;
3781 struct lttng_ht_node_str *ua_chan_node;
3782 struct ust_app *app;
3783 struct ust_app_session *ua_sess;
3784 struct ust_app_channel *ua_chan;
3785 struct ust_app_event *ua_event;
3786
3787 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
3788 uevent->attr.name, usess->id);
3789
3790 /*
3791 * NOTE: At this point, this function is called only if the session and
3792 * channel passed are already created for all apps. and enabled on the
3793 * tracer also.
3794 */
3795
3796 rcu_read_lock();
3797
3798 /* For all registered applications */
3799 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3800 if (!app->compatible) {
3801 /*
3802 * TODO: In time, we should notice the caller of this error by
3803 * telling him that this is a version error.
3804 */
3805 continue;
3806 }
3807 ua_sess = lookup_session_by_app(usess, app);
3808 if (!ua_sess) {
3809 /* The application has problem or is probably dead. */
3810 continue;
3811 }
3812
3813 pthread_mutex_lock(&ua_sess->lock);
3814
3815 if (ua_sess->deleted) {
3816 pthread_mutex_unlock(&ua_sess->lock);
3817 continue;
3818 }
3819
3820 /* Lookup channel in the ust app session */
3821 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3822 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3823 /*
3824 * It is possible that the channel cannot be found is
3825 * the channel/event creation occurs concurrently with
3826 * an application exit.
3827 */
3828 if (!ua_chan_node) {
3829 pthread_mutex_unlock(&ua_sess->lock);
3830 continue;
3831 }
3832
3833 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3834
3835 /* Get event node */
3836 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
3837 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
3838 if (ua_event == NULL) {
3839 DBG3("UST app enable event %s not found for app PID %d."
3840 "Skipping app", uevent->attr.name, app->pid);
3841 goto next_app;
3842 }
3843
3844 ret = enable_ust_app_event(ua_sess, ua_event, app);
3845 if (ret < 0) {
3846 pthread_mutex_unlock(&ua_sess->lock);
3847 goto error;
3848 }
3849 next_app:
3850 pthread_mutex_unlock(&ua_sess->lock);
3851 }
3852
3853 error:
3854 rcu_read_unlock();
3855 return ret;
3856 }
3857
3858 /*
3859 * For a specific existing UST session and UST channel, creates the event for
3860 * all registered apps.
3861 */
3862 int ust_app_create_event_glb(struct ltt_ust_session *usess,
3863 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
3864 {
3865 int ret = 0;
3866 struct lttng_ht_iter iter, uiter;
3867 struct lttng_ht_node_str *ua_chan_node;
3868 struct ust_app *app;
3869 struct ust_app_session *ua_sess;
3870 struct ust_app_channel *ua_chan;
3871
3872 DBG("UST app creating event %s for all apps for session id %" PRIu64,
3873 uevent->attr.name, usess->id);
3874
3875 rcu_read_lock();
3876
3877 /* For all registered applications */
3878 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3879 if (!app->compatible) {
3880 /*
3881 * TODO: In time, we should notice the caller of this error by
3882 * telling him that this is a version error.
3883 */
3884 continue;
3885 }
3886 ua_sess = lookup_session_by_app(usess, app);
3887 if (!ua_sess) {
3888 /* The application has problem or is probably dead. */
3889 continue;
3890 }
3891
3892 pthread_mutex_lock(&ua_sess->lock);
3893
3894 if (ua_sess->deleted) {
3895 pthread_mutex_unlock(&ua_sess->lock);
3896 continue;
3897 }
3898
3899 /* Lookup channel in the ust app session */
3900 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3901 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
3902 /* If the channel is not found, there is a code flow error */
3903 assert(ua_chan_node);
3904
3905 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3906
3907 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
3908 pthread_mutex_unlock(&ua_sess->lock);
3909 if (ret < 0) {
3910 if (ret != -LTTNG_UST_ERR_EXIST) {
3911 /* Possible value at this point: -ENOMEM. If so, we stop! */
3912 break;
3913 }
3914 DBG2("UST app event %s already exist on app PID %d",
3915 uevent->attr.name, app->pid);
3916 continue;
3917 }
3918 }
3919
3920 rcu_read_unlock();
3921
3922 return ret;
3923 }
3924
3925 /*
3926 * Start tracing for a specific UST session and app.
3927 */
3928 static
3929 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
3930 {
3931 int ret = 0;
3932 struct ust_app_session *ua_sess;
3933
3934 DBG("Starting tracing for ust app pid %d", app->pid);
3935
3936 rcu_read_lock();
3937
3938 if (!app->compatible) {
3939 goto end;
3940 }
3941
3942 ua_sess = lookup_session_by_app(usess, app);
3943 if (ua_sess == NULL) {
3944 /* The session is in teardown process. Ignore and continue. */
3945 goto end;
3946 }
3947
3948 pthread_mutex_lock(&ua_sess->lock);
3949
3950 if (ua_sess->deleted) {
3951 pthread_mutex_unlock(&ua_sess->lock);
3952 goto end;
3953 }
3954
3955 /* Upon restart, we skip the setup, already done */
3956 if (ua_sess->started) {
3957 goto skip_setup;
3958 }
3959
3960 /* Create directories if consumer is LOCAL and has a path defined. */
3961 if (usess->consumer->type == CONSUMER_DST_LOCAL &&
3962 strlen(usess->consumer->dst.trace_path) > 0) {
3963 ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
3964 S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
3965 if (ret < 0) {
3966 if (errno != EEXIST) {
3967 ERR("Trace directory creation error");
3968 goto error_unlock;
3969 }
3970 }
3971 }
3972
3973 /*
3974 * Create the metadata for the application. This returns gracefully if a
3975 * metadata was already set for the session.
3976 */
3977 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
3978 if (ret < 0) {
3979 goto error_unlock;
3980 }
3981
3982 health_code_update();
3983
3984 skip_setup:
3985 /* This start the UST tracing */
3986 ret = ustctl_start_session(app->sock, ua_sess->handle);
3987 if (ret < 0) {
3988 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
3989 ERR("Error starting tracing for app pid: %d (ret: %d)",
3990 app->pid, ret);
3991 } else {
3992 DBG("UST app start session failed. Application is dead.");
3993 /*
3994 * This is normal behavior, an application can die during the
3995 * creation process. Don't report an error so the execution can
3996 * continue normally.
3997 */
3998 pthread_mutex_unlock(&ua_sess->lock);
3999 goto end;
4000 }
4001 goto error_unlock;
4002 }
4003
4004 /* Indicate that the session has been started once */
4005 ua_sess->started = 1;
4006
4007 pthread_mutex_unlock(&ua_sess->lock);
4008
4009 health_code_update();
4010
4011 /* Quiescent wait after starting trace */
4012 ret = ustctl_wait_quiescent(app->sock);
4013 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4014 ERR("UST app wait quiescent failed for app pid %d ret %d",
4015 app->pid, ret);
4016 }
4017
4018 end:
4019 rcu_read_unlock();
4020 health_code_update();
4021 return 0;
4022
4023 error_unlock:
4024 pthread_mutex_unlock(&ua_sess->lock);
4025 rcu_read_unlock();
4026 health_code_update();
4027 return -1;
4028 }
4029
4030 /*
4031 * Stop tracing for a specific UST session and app.
4032 */
4033 static
4034 int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4035 {
4036 int ret = 0;
4037 struct ust_app_session *ua_sess;
4038 struct ust_registry_session *registry;
4039
4040 DBG("Stopping tracing for ust app pid %d", app->pid);
4041
4042 rcu_read_lock();
4043
4044 if (!app->compatible) {
4045 goto end_no_session;
4046 }
4047
4048 ua_sess = lookup_session_by_app(usess, app);
4049 if (ua_sess == NULL) {
4050 goto end_no_session;
4051 }
4052
4053 pthread_mutex_lock(&ua_sess->lock);
4054
4055 if (ua_sess->deleted) {
4056 pthread_mutex_unlock(&ua_sess->lock);
4057 goto end_no_session;
4058 }
4059
4060 /*
4061 * If started = 0, it means that stop trace has been called for a session
4062 * that was never started. It's possible since we can have a fail start
4063 * from either the application manager thread or the command thread. Simply
4064 * indicate that this is a stop error.
4065 */
4066 if (!ua_sess->started) {
4067 goto error_rcu_unlock;
4068 }
4069
4070 health_code_update();
4071
4072 /* This inhibits UST tracing */
4073 ret = ustctl_stop_session(app->sock, ua_sess->handle);
4074 if (ret < 0) {
4075 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4076 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4077 app->pid, ret);
4078 } else {
4079 DBG("UST app stop session failed. Application is dead.");
4080 /*
4081 * This is normal behavior, an application can die during the
4082 * creation process. Don't report an error so the execution can
4083 * continue normally.
4084 */
4085 goto end_unlock;
4086 }
4087 goto error_rcu_unlock;
4088 }
4089
4090 health_code_update();
4091
4092 /* Quiescent wait after stopping trace */
4093 ret = ustctl_wait_quiescent(app->sock);
4094 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4095 ERR("UST app wait quiescent failed for app pid %d ret %d",
4096 app->pid, ret);
4097 }
4098
4099 health_code_update();
4100
4101 registry = get_session_registry(ua_sess);
4102 assert(registry);
4103
4104 /* Push metadata for application before freeing the application. */
4105 (void) push_metadata(registry, ua_sess->consumer);
4106
4107 end_unlock:
4108 pthread_mutex_unlock(&ua_sess->lock);
4109 end_no_session:
4110 rcu_read_unlock();
4111 health_code_update();
4112 return 0;
4113
4114 error_rcu_unlock:
4115 pthread_mutex_unlock(&ua_sess->lock);
4116 rcu_read_unlock();
4117 health_code_update();
4118 return -1;
4119 }
4120
4121 static
4122 int ust_app_flush_app_session(struct ust_app *app,
4123 struct ust_app_session *ua_sess)
4124 {
4125 int ret, retval = 0;
4126 struct lttng_ht_iter iter;
4127 struct ust_app_channel *ua_chan;
4128 struct consumer_socket *socket;
4129
4130 DBG("Flushing app session buffers for ust app pid %d", app->pid);
4131
4132 rcu_read_lock();
4133
4134 if (!app->compatible) {
4135 goto end_not_compatible;
4136 }
4137
4138 pthread_mutex_lock(&ua_sess->lock);
4139
4140 if (ua_sess->deleted) {
4141 goto end_deleted;
4142 }
4143
4144 health_code_update();
4145
4146 /* Flushing buffers */
4147 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4148 ua_sess->consumer);
4149
4150 /* Flush buffers and push metadata. */
4151 switch (ua_sess->buffer_type) {
4152 case LTTNG_BUFFER_PER_PID:
4153 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4154 node.node) {
4155 health_code_update();
4156 assert(ua_chan->is_sent);
4157 ret = consumer_flush_channel(socket, ua_chan->key);
4158 if (ret) {
4159 ERR("Error flushing consumer channel");
4160 retval = -1;
4161 continue;
4162 }
4163 }
4164 break;
4165 case LTTNG_BUFFER_PER_UID:
4166 default:
4167 assert(0);
4168 break;
4169 }
4170
4171 health_code_update();
4172
4173 end_deleted:
4174 pthread_mutex_unlock(&ua_sess->lock);
4175
4176 end_not_compatible:
4177 rcu_read_unlock();
4178 health_code_update();
4179 return retval;
4180 }
4181
4182 /*
4183 * Flush buffers for all applications for a specific UST session.
4184 * Called with UST session lock held.
4185 */
4186 static
4187 int ust_app_flush_session(struct ltt_ust_session *usess)
4188
4189 {
4190 int ret = 0;
4191
4192 DBG("Flushing session buffers for all ust apps");
4193
4194 rcu_read_lock();
4195
4196 /* Flush buffers and push metadata. */
4197 switch (usess->buffer_type) {
4198 case LTTNG_BUFFER_PER_UID:
4199 {
4200 struct buffer_reg_uid *reg;
4201 struct lttng_ht_iter iter;
4202
4203 /* Flush all per UID buffers associated to that session. */
4204 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4205 struct ust_registry_session *ust_session_reg;
4206 struct buffer_reg_channel *reg_chan;
4207 struct consumer_socket *socket;
4208
4209 /* Get consumer socket to use to push the metadata.*/
4210 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4211 usess->consumer);
4212 if (!socket) {
4213 /* Ignore request if no consumer is found for the session. */
4214 continue;
4215 }
4216
4217 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4218 reg_chan, node.node) {
4219 /*
4220 * The following call will print error values so the return
4221 * code is of little importance because whatever happens, we
4222 * have to try them all.
4223 */
4224 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4225 }
4226
4227 ust_session_reg = reg->registry->reg.ust;
4228 /* Push metadata. */
4229 (void) push_metadata(ust_session_reg, usess->consumer);
4230 }
4231 break;
4232 }
4233 case LTTNG_BUFFER_PER_PID:
4234 {
4235 struct ust_app_session *ua_sess;
4236 struct lttng_ht_iter iter;
4237 struct ust_app *app;
4238
4239 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4240 ua_sess = lookup_session_by_app(usess, app);
4241 if (ua_sess == NULL) {
4242 continue;
4243 }
4244 (void) ust_app_flush_app_session(app, ua_sess);
4245 }
4246 break;
4247 }
4248 default:
4249 ret = -1;
4250 assert(0);
4251 break;
4252 }
4253
4254 rcu_read_unlock();
4255 health_code_update();
4256 return ret;
4257 }
4258
4259 /*
4260 * Destroy a specific UST session in apps.
4261 */
4262 static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
4263 {
4264 int ret;
4265 struct ust_app_session *ua_sess;
4266 struct lttng_ht_iter iter;
4267 struct lttng_ht_node_u64 *node;
4268
4269 DBG("Destroy tracing for ust app pid %d", app->pid);
4270
4271 rcu_read_lock();
4272
4273 if (!app->compatible) {
4274 goto end;
4275 }
4276
4277 __lookup_session_by_app(usess, app, &iter);
4278 node = lttng_ht_iter_get_node_u64(&iter);
4279 if (node == NULL) {
4280 /* Session is being or is deleted. */
4281 goto end;
4282 }
4283 ua_sess = caa_container_of(node, struct ust_app_session, node);
4284
4285 health_code_update();
4286 destroy_app_session(app, ua_sess);
4287
4288 health_code_update();
4289
4290 /* Quiescent wait after stopping trace */
4291 ret = ustctl_wait_quiescent(app->sock);
4292 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4293 ERR("UST app wait quiescent failed for app pid %d ret %d",
4294 app->pid, ret);
4295 }
4296 end:
4297 rcu_read_unlock();
4298 health_code_update();
4299 return 0;
4300 }
4301
4302 /*
4303 * Start tracing for the UST session.
4304 */
4305 int ust_app_start_trace_all(struct ltt_ust_session *usess)
4306 {
4307 int ret = 0;
4308 struct lttng_ht_iter iter;
4309 struct ust_app *app;
4310
4311 DBG("Starting all UST traces");
4312
4313 rcu_read_lock();
4314
4315 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4316 ret = ust_app_start_trace(usess, app);
4317 if (ret < 0) {
4318 /* Continue to next apps even on error */
4319 continue;
4320 }
4321 }
4322
4323 rcu_read_unlock();
4324
4325 return 0;
4326 }
4327
4328 /*
4329 * Start tracing for the UST session.
4330 * Called with UST session lock held.
4331 */
4332 int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4333 {
4334 int ret = 0;
4335 struct lttng_ht_iter iter;
4336 struct ust_app *app;
4337
4338 DBG("Stopping all UST traces");
4339
4340 rcu_read_lock();
4341
4342 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4343 ret = ust_app_stop_trace(usess, app);
4344 if (ret < 0) {
4345 /* Continue to next apps even on error */
4346 continue;
4347 }
4348 }
4349
4350 (void) ust_app_flush_session(usess);
4351
4352 rcu_read_unlock();
4353
4354 return 0;
4355 }
4356
4357 /*
4358 * Destroy app UST session.
4359 */
4360 int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4361 {
4362 int ret = 0;
4363 struct lttng_ht_iter iter;
4364 struct ust_app *app;
4365
4366 DBG("Destroy all UST traces");
4367
4368 rcu_read_lock();
4369
4370 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4371 ret = destroy_trace(usess, app);
4372 if (ret < 0) {
4373 /* Continue to next apps even on error */
4374 continue;
4375 }
4376 }
4377
4378 rcu_read_unlock();
4379
4380 return 0;
4381 }
4382
4383 /*
4384 * Add channels/events from UST global domain to registered apps at sock.
4385 */
4386 void ust_app_global_update(struct ltt_ust_session *usess, int sock)
4387 {
4388 int ret = 0;
4389 struct lttng_ht_iter iter, uiter;
4390 struct ust_app *app;
4391 struct ust_app_session *ua_sess = NULL;
4392 struct ust_app_channel *ua_chan;
4393 struct ust_app_event *ua_event;
4394 struct ust_app_ctx *ua_ctx;
4395
4396 assert(usess);
4397 assert(sock >= 0);
4398
4399 DBG2("UST app global update for app sock %d for session id %" PRIu64, sock,
4400 usess->id);
4401
4402 rcu_read_lock();
4403
4404 app = ust_app_find_by_sock(sock);
4405 if (app == NULL) {
4406 /*
4407 * Application can be unregistered before so this is possible hence
4408 * simply stopping the update.
4409 */
4410 DBG3("UST app update failed to find app sock %d", sock);
4411 goto error;
4412 }
4413
4414 if (!app->compatible) {
4415 goto error;
4416 }
4417
4418 ret = create_ust_app_session(usess, app, &ua_sess, NULL);
4419 if (ret < 0) {
4420 /* Tracer is probably gone or ENOMEM. */
4421 goto error;
4422 }
4423 assert(ua_sess);
4424
4425 pthread_mutex_lock(&ua_sess->lock);
4426
4427 if (ua_sess->deleted) {
4428 pthread_mutex_unlock(&ua_sess->lock);
4429 goto error;
4430 }
4431
4432 /*
4433 * We can iterate safely here over all UST app session since the create ust
4434 * app session above made a shadow copy of the UST global domain from the
4435 * ltt ust session.
4436 */
4437 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4438 node.node) {
4439 ret = do_create_channel(app, usess, ua_sess, ua_chan);
4440 if (ret < 0 && ret != -ENOTCONN) {
4441 /*
4442 * Stop everything. On error, the application
4443 * failed, no more file descriptor are available
4444 * or ENOMEM so stopping here is the only thing
4445 * we can do for now. The only exception is
4446 * -ENOTCONN, which indicates that the application
4447 * has exit.
4448 */
4449 goto error_unlock;
4450 }
4451
4452 /*
4453 * Add context using the list so they are enabled in the same order the
4454 * user added them.
4455 */
4456 cds_list_for_each_entry(ua_ctx, &ua_chan->ctx_list, list) {
4457 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
4458 if (ret < 0) {
4459 goto error_unlock;
4460 }
4461 }
4462
4463
4464 /* For each events */
4465 cds_lfht_for_each_entry(ua_chan->events->ht, &uiter.iter, ua_event,
4466 node.node) {
4467 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
4468 if (ret < 0) {
4469 goto error_unlock;
4470 }
4471 }
4472 }
4473
4474 pthread_mutex_unlock(&ua_sess->lock);
4475
4476 if (usess->active) {
4477 ret = ust_app_start_trace(usess, app);
4478 if (ret < 0) {
4479 goto error;
4480 }
4481
4482 DBG2("UST trace started for app pid %d", app->pid);
4483 }
4484
4485 /* Everything went well at this point. */
4486 rcu_read_unlock();
4487 return;
4488
4489 error_unlock:
4490 pthread_mutex_unlock(&ua_sess->lock);
4491 error:
4492 if (ua_sess) {
4493 destroy_app_session(app, ua_sess);
4494 }
4495 rcu_read_unlock();
4496 return;
4497 }
4498
4499 /*
4500 * Add context to a specific channel for global UST domain.
4501 */
4502 int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
4503 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
4504 {
4505 int ret = 0;
4506 struct lttng_ht_node_str *ua_chan_node;
4507 struct lttng_ht_iter iter, uiter;
4508 struct ust_app_channel *ua_chan = NULL;
4509 struct ust_app_session *ua_sess;
4510 struct ust_app *app;
4511
4512 rcu_read_lock();
4513
4514 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4515 if (!app->compatible) {
4516 /*
4517 * TODO: In time, we should notice the caller of this error by
4518 * telling him that this is a version error.
4519 */
4520 continue;
4521 }
4522 ua_sess = lookup_session_by_app(usess, app);
4523 if (ua_sess == NULL) {
4524 continue;
4525 }
4526
4527 pthread_mutex_lock(&ua_sess->lock);
4528
4529 if (ua_sess->deleted) {
4530 pthread_mutex_unlock(&ua_sess->lock);
4531 continue;
4532 }
4533
4534 /* Lookup channel in the ust app session */
4535 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4536 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
4537 if (ua_chan_node == NULL) {
4538 goto next_app;
4539 }
4540 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
4541 node);
4542 ret = create_ust_app_channel_context(ua_sess, ua_chan, &uctx->ctx, app);
4543 if (ret < 0) {
4544 goto next_app;
4545 }
4546 next_app:
4547 pthread_mutex_unlock(&ua_sess->lock);
4548 }
4549
4550 rcu_read_unlock();
4551 return ret;
4552 }
4553
4554 /*
4555 * Enable event for a channel from a UST session for a specific PID.
4556 */
4557 int ust_app_enable_event_pid(struct ltt_ust_session *usess,
4558 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent, pid_t pid)
4559 {
4560 int ret = 0;
4561 struct lttng_ht_iter iter;
4562 struct lttng_ht_node_str *ua_chan_node;
4563 struct ust_app *app;
4564 struct ust_app_session *ua_sess;
4565 struct ust_app_channel *ua_chan;
4566 struct ust_app_event *ua_event;
4567
4568 DBG("UST app enabling event %s for PID %d", uevent->attr.name, pid);
4569
4570 rcu_read_lock();
4571
4572 app = ust_app_find_by_pid(pid);
4573 if (app == NULL) {
4574 ERR("UST app enable event per PID %d not found", pid);
4575 ret = -1;
4576 goto end;
4577 }
4578
4579 if (!app->compatible) {
4580 ret = 0;
4581 goto end;
4582 }
4583
4584 ua_sess = lookup_session_by_app(usess, app);
4585 if (!ua_sess) {
4586 /* The application has problem or is probably dead. */
4587 ret = 0;
4588 goto end;
4589 }
4590
4591 pthread_mutex_lock(&ua_sess->lock);
4592
4593 if (ua_sess->deleted) {
4594 ret = 0;
4595 goto end_unlock;
4596 }
4597
4598 /* Lookup channel in the ust app session */
4599 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
4600 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4601 /* If the channel is not found, there is a code flow error */
4602 assert(ua_chan_node);
4603
4604 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4605
4606 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4607 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4608 if (ua_event == NULL) {
4609 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4610 if (ret < 0) {
4611 goto end_unlock;
4612 }
4613 } else {
4614 ret = enable_ust_app_event(ua_sess, ua_event, app);
4615 if (ret < 0) {
4616 goto end_unlock;
4617 }
4618 }
4619
4620 end_unlock:
4621 pthread_mutex_unlock(&ua_sess->lock);
4622 end:
4623 rcu_read_unlock();
4624 return ret;
4625 }
4626
4627 /*
4628 * Calibrate registered applications.
4629 */
4630 int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
4631 {
4632 int ret = 0;
4633 struct lttng_ht_iter iter;
4634 struct ust_app *app;
4635
4636 rcu_read_lock();
4637
4638 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4639 if (!app->compatible) {
4640 /*
4641 * TODO: In time, we should notice the caller of this error by
4642 * telling him that this is a version error.
4643 */
4644 continue;
4645 }
4646
4647 health_code_update();
4648
4649 ret = ustctl_calibrate(app->sock, calibrate);
4650 if (ret < 0) {
4651 switch (ret) {
4652 case -ENOSYS:
4653 /* Means that it's not implemented on the tracer side. */
4654 ret = 0;
4655 break;
4656 default:
4657 DBG2("Calibrate app PID %d returned with error %d",
4658 app->pid, ret);
4659 break;
4660 }
4661 }
4662 }
4663
4664 DBG("UST app global domain calibration finished");
4665
4666 rcu_read_unlock();
4667
4668 health_code_update();
4669
4670 return ret;
4671 }
4672
4673 /*
4674 * Receive registration and populate the given msg structure.
4675 *
4676 * On success return 0 else a negative value returned by the ustctl call.
4677 */
4678 int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
4679 {
4680 int ret;
4681 uint32_t pid, ppid, uid, gid;
4682
4683 assert(msg);
4684
4685 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
4686 &pid, &ppid, &uid, &gid,
4687 &msg->bits_per_long,
4688 &msg->uint8_t_alignment,
4689 &msg->uint16_t_alignment,
4690 &msg->uint32_t_alignment,
4691 &msg->uint64_t_alignment,
4692 &msg->long_alignment,
4693 &msg->byte_order,
4694 msg->name);
4695 if (ret < 0) {
4696 switch (-ret) {
4697 case EPIPE:
4698 case ECONNRESET:
4699 case LTTNG_UST_ERR_EXITING:
4700 DBG3("UST app recv reg message failed. Application died");
4701 break;
4702 case LTTNG_UST_ERR_UNSUP_MAJOR:
4703 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
4704 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
4705 LTTNG_UST_ABI_MINOR_VERSION);
4706 break;
4707 default:
4708 ERR("UST app recv reg message failed with ret %d", ret);
4709 break;
4710 }
4711 goto error;
4712 }
4713 msg->pid = (pid_t) pid;
4714 msg->ppid = (pid_t) ppid;
4715 msg->uid = (uid_t) uid;
4716 msg->gid = (gid_t) gid;
4717
4718 error:
4719 return ret;
4720 }
4721
4722 /*
4723 * Return a ust app channel object using the application object and the channel
4724 * object descriptor has a key. If not found, NULL is returned. A RCU read side
4725 * lock MUST be acquired before calling this function.
4726 */
4727 static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
4728 int objd)
4729 {
4730 struct lttng_ht_node_ulong *node;
4731 struct lttng_ht_iter iter;
4732 struct ust_app_channel *ua_chan = NULL;
4733
4734 assert(app);
4735
4736 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
4737 node = lttng_ht_iter_get_node_ulong(&iter);
4738 if (node == NULL) {
4739 DBG2("UST app channel find by objd %d not found", objd);
4740 goto error;
4741 }
4742
4743 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
4744
4745 error:
4746 return ua_chan;
4747 }
4748
4749 /*
4750 * Reply to a register channel notification from an application on the notify
4751 * socket. The channel metadata is also created.
4752 *
4753 * The session UST registry lock is acquired in this function.
4754 *
4755 * On success 0 is returned else a negative value.
4756 */
4757 static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
4758 size_t nr_fields, struct ustctl_field *fields)
4759 {
4760 int ret, ret_code = 0;
4761 uint32_t chan_id, reg_count;
4762 uint64_t chan_reg_key;
4763 enum ustctl_channel_header type;
4764 struct ust_app *app;
4765 struct ust_app_channel *ua_chan;
4766 struct ust_app_session *ua_sess;
4767 struct ust_registry_session *registry;
4768 struct ust_registry_channel *chan_reg;
4769
4770 rcu_read_lock();
4771
4772 /* Lookup application. If not found, there is a code flow error. */
4773 app = find_app_by_notify_sock(sock);
4774 if (!app) {
4775 DBG("Application socket %d is being teardown. Abort event notify",
4776 sock);
4777 ret = 0;
4778 free(fields);
4779 goto error_rcu_unlock;
4780 }
4781
4782 /* Lookup channel by UST object descriptor. */
4783 ua_chan = find_channel_by_objd(app, cobjd);
4784 if (!ua_chan) {
4785 DBG("Application channel is being teardown. Abort event notify");
4786 ret = 0;
4787 free(fields);
4788 goto error_rcu_unlock;
4789 }
4790
4791 assert(ua_chan->session);
4792 ua_sess = ua_chan->session;
4793
4794 /* Get right session registry depending on the session buffer type. */
4795 registry = get_session_registry(ua_sess);
4796 assert(registry);
4797
4798 /* Depending on the buffer type, a different channel key is used. */
4799 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4800 chan_reg_key = ua_chan->tracing_channel_id;
4801 } else {
4802 chan_reg_key = ua_chan->key;
4803 }
4804
4805 pthread_mutex_lock(&registry->lock);
4806
4807 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
4808 assert(chan_reg);
4809
4810 if (!chan_reg->register_done) {
4811 reg_count = ust_registry_get_event_count(chan_reg);
4812 if (reg_count < 31) {
4813 type = USTCTL_CHANNEL_HEADER_COMPACT;
4814 } else {
4815 type = USTCTL_CHANNEL_HEADER_LARGE;
4816 }
4817
4818 chan_reg->nr_ctx_fields = nr_fields;
4819 chan_reg->ctx_fields = fields;
4820 chan_reg->header_type = type;
4821 } else {
4822 /* Get current already assigned values. */
4823 type = chan_reg->header_type;
4824 free(fields);
4825 /* Set to NULL so the error path does not do a double free. */
4826 fields = NULL;
4827 }
4828 /* Channel id is set during the object creation. */
4829 chan_id = chan_reg->chan_id;
4830
4831 /* Append to metadata */
4832 if (!chan_reg->metadata_dumped) {
4833 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
4834 if (ret_code) {
4835 ERR("Error appending channel metadata (errno = %d)", ret_code);
4836 goto reply;
4837 }
4838 }
4839
4840 reply:
4841 DBG3("UST app replying to register channel key %" PRIu64
4842 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
4843 ret_code);
4844
4845 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
4846 if (ret < 0) {
4847 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4848 ERR("UST app reply channel failed with ret %d", ret);
4849 } else {
4850 DBG3("UST app reply channel failed. Application died");
4851 }
4852 goto error;
4853 }
4854
4855 /* This channel registry registration is completed. */
4856 chan_reg->register_done = 1;
4857
4858 error:
4859 pthread_mutex_unlock(&registry->lock);
4860 error_rcu_unlock:
4861 rcu_read_unlock();
4862 if (ret) {
4863 free(fields);
4864 }
4865 return ret;
4866 }
4867
4868 /*
4869 * Add event to the UST channel registry. When the event is added to the
4870 * registry, the metadata is also created. Once done, this replies to the
4871 * application with the appropriate error code.
4872 *
4873 * The session UST registry lock is acquired in the function.
4874 *
4875 * On success 0 is returned else a negative value.
4876 */
4877 static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
4878 char *sig, size_t nr_fields, struct ustctl_field *fields, int loglevel,
4879 char *model_emf_uri)
4880 {
4881 int ret, ret_code;
4882 uint32_t event_id = 0;
4883 uint64_t chan_reg_key;
4884 struct ust_app *app;
4885 struct ust_app_channel *ua_chan;
4886 struct ust_app_session *ua_sess;
4887 struct ust_registry_session *registry;
4888
4889 rcu_read_lock();
4890
4891 /* Lookup application. If not found, there is a code flow error. */
4892 app = find_app_by_notify_sock(sock);
4893 if (!app) {
4894 DBG("Application socket %d is being teardown. Abort event notify",
4895 sock);
4896 ret = 0;
4897 free(sig);
4898 free(fields);
4899 free(model_emf_uri);
4900 goto error_rcu_unlock;
4901 }
4902
4903 /* Lookup channel by UST object descriptor. */
4904 ua_chan = find_channel_by_objd(app, cobjd);
4905 if (!ua_chan) {
4906 DBG("Application channel is being teardown. Abort event notify");
4907 ret = 0;
4908 free(sig);
4909 free(fields);
4910 free(model_emf_uri);
4911 goto error_rcu_unlock;
4912 }
4913
4914 assert(ua_chan->session);
4915 ua_sess = ua_chan->session;
4916
4917 registry = get_session_registry(ua_sess);
4918 assert(registry);
4919
4920 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
4921 chan_reg_key = ua_chan->tracing_channel_id;
4922 } else {
4923 chan_reg_key = ua_chan->key;
4924 }
4925
4926 pthread_mutex_lock(&registry->lock);
4927
4928 /*
4929 * From this point on, this call acquires the ownership of the sig, fields
4930 * and model_emf_uri meaning any free are done inside it if needed. These
4931 * three variables MUST NOT be read/write after this.
4932 */
4933 ret_code = ust_registry_create_event(registry, chan_reg_key,
4934 sobjd, cobjd, name, sig, nr_fields, fields, loglevel,
4935 model_emf_uri, ua_sess->buffer_type, &event_id,
4936 app);
4937
4938 /*
4939 * The return value is returned to ustctl so in case of an error, the
4940 * application can be notified. In case of an error, it's important not to
4941 * return a negative error or else the application will get closed.
4942 */
4943 ret = ustctl_reply_register_event(sock, event_id, ret_code);
4944 if (ret < 0) {
4945 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4946 ERR("UST app reply event failed with ret %d", ret);
4947 } else {
4948 DBG3("UST app reply event failed. Application died");
4949 }
4950 /*
4951 * No need to wipe the create event since the application socket will
4952 * get close on error hence cleaning up everything by itself.
4953 */
4954 goto error;
4955 }
4956
4957 DBG3("UST registry event %s with id %" PRId32 " added successfully",
4958 name, event_id);
4959
4960 error:
4961 pthread_mutex_unlock(&registry->lock);
4962 error_rcu_unlock:
4963 rcu_read_unlock();
4964 return ret;
4965 }
4966
4967 /*
4968 * Handle application notification through the given notify socket.
4969 *
4970 * Return 0 on success or else a negative value.
4971 */
4972 int ust_app_recv_notify(int sock)
4973 {
4974 int ret;
4975 enum ustctl_notify_cmd cmd;
4976
4977 DBG3("UST app receiving notify from sock %d", sock);
4978
4979 ret = ustctl_recv_notify(sock, &cmd);
4980 if (ret < 0) {
4981 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4982 ERR("UST app recv notify failed with ret %d", ret);
4983 } else {
4984 DBG3("UST app recv notify failed. Application died");
4985 }
4986 goto error;
4987 }
4988
4989 switch (cmd) {
4990 case USTCTL_NOTIFY_CMD_EVENT:
4991 {
4992 int sobjd, cobjd, loglevel;
4993 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
4994 size_t nr_fields;
4995 struct ustctl_field *fields;
4996
4997 DBG2("UST app ustctl register event received");
4998
4999 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name, &loglevel,
5000 &sig, &nr_fields, &fields, &model_emf_uri);
5001 if (ret < 0) {
5002 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5003 ERR("UST app recv event failed with ret %d", ret);
5004 } else {
5005 DBG3("UST app recv event failed. Application died");
5006 }
5007 goto error;
5008 }
5009
5010 /*
5011 * Add event to the UST registry coming from the notify socket. This
5012 * call will free if needed the sig, fields and model_emf_uri. This
5013 * code path loses the ownsership of these variables and transfer them
5014 * to the this function.
5015 */
5016 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
5017 fields, loglevel, model_emf_uri);
5018 if (ret < 0) {
5019 goto error;
5020 }
5021
5022 break;
5023 }
5024 case USTCTL_NOTIFY_CMD_CHANNEL:
5025 {
5026 int sobjd, cobjd;
5027 size_t nr_fields;
5028 struct ustctl_field *fields;
5029
5030 DBG2("UST app ustctl register channel received");
5031
5032 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5033 &fields);
5034 if (ret < 0) {
5035 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5036 ERR("UST app recv channel failed with ret %d", ret);
5037 } else {
5038 DBG3("UST app recv channel failed. Application died");
5039 }
5040 goto error;
5041 }
5042
5043 /*
5044 * The fields ownership are transfered to this function call meaning
5045 * that if needed it will be freed. After this, it's invalid to access
5046 * fields or clean it up.
5047 */
5048 ret = reply_ust_register_channel(sock, sobjd, cobjd, nr_fields,
5049 fields);
5050 if (ret < 0) {
5051 goto error;
5052 }
5053
5054 break;
5055 }
5056 default:
5057 /* Should NEVER happen. */
5058 assert(0);
5059 }
5060
5061 error:
5062 return ret;
5063 }
5064
5065 /*
5066 * Once the notify socket hangs up, this is called. First, it tries to find the
5067 * corresponding application. On failure, the call_rcu to close the socket is
5068 * executed. If an application is found, it tries to delete it from the notify
5069 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5070 *
5071 * Note that an object needs to be allocated here so on ENOMEM failure, the
5072 * call RCU is not done but the rest of the cleanup is.
5073 */
5074 void ust_app_notify_sock_unregister(int sock)
5075 {
5076 int err_enomem = 0;
5077 struct lttng_ht_iter iter;
5078 struct ust_app *app;
5079 struct ust_app_notify_sock_obj *obj;
5080
5081 assert(sock >= 0);
5082
5083 rcu_read_lock();
5084
5085 obj = zmalloc(sizeof(*obj));
5086 if (!obj) {
5087 /*
5088 * An ENOMEM is kind of uncool. If this strikes we continue the
5089 * procedure but the call_rcu will not be called. In this case, we
5090 * accept the fd leak rather than possibly creating an unsynchronized
5091 * state between threads.
5092 *
5093 * TODO: The notify object should be created once the notify socket is
5094 * registered and stored independantely from the ust app object. The
5095 * tricky part is to synchronize the teardown of the application and
5096 * this notify object. Let's keep that in mind so we can avoid this
5097 * kind of shenanigans with ENOMEM in the teardown path.
5098 */
5099 err_enomem = 1;
5100 } else {
5101 obj->fd = sock;
5102 }
5103
5104 DBG("UST app notify socket unregister %d", sock);
5105
5106 /*
5107 * Lookup application by notify socket. If this fails, this means that the
5108 * hash table delete has already been done by the application
5109 * unregistration process so we can safely close the notify socket in a
5110 * call RCU.
5111 */
5112 app = find_app_by_notify_sock(sock);
5113 if (!app) {
5114 goto close_socket;
5115 }
5116
5117 iter.iter.node = &app->notify_sock_n.node;
5118
5119 /*
5120 * Whatever happens here either we fail or succeed, in both cases we have
5121 * to close the socket after a grace period to continue to the call RCU
5122 * here. If the deletion is successful, the application is not visible
5123 * anymore by other threads and is it fails it means that it was already
5124 * deleted from the hash table so either way we just have to close the
5125 * socket.
5126 */
5127 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5128
5129 close_socket:
5130 rcu_read_unlock();
5131
5132 /*
5133 * Close socket after a grace period to avoid for the socket to be reused
5134 * before the application object is freed creating potential race between
5135 * threads trying to add unique in the global hash table.
5136 */
5137 if (!err_enomem) {
5138 call_rcu(&obj->head, close_notify_sock_rcu);
5139 }
5140 }
5141
5142 /*
5143 * Destroy a ust app data structure and free its memory.
5144 */
5145 void ust_app_destroy(struct ust_app *app)
5146 {
5147 if (!app) {
5148 return;
5149 }
5150
5151 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5152 }
5153
5154 /*
5155 * Take a snapshot for a given UST session. The snapshot is sent to the given
5156 * output.
5157 *
5158 * Return 0 on success or else a negative value.
5159 */
5160 int ust_app_snapshot_record(struct ltt_ust_session *usess,
5161 struct snapshot_output *output, int wait,
5162 uint64_t nb_packets_per_stream)
5163 {
5164 int ret = 0;
5165 unsigned int snapshot_done = 0;
5166 struct lttng_ht_iter iter;
5167 struct ust_app *app;
5168 char pathname[PATH_MAX];
5169
5170 assert(usess);
5171 assert(output);
5172
5173 rcu_read_lock();
5174
5175 switch (usess->buffer_type) {
5176 case LTTNG_BUFFER_PER_UID:
5177 {
5178 struct buffer_reg_uid *reg;
5179
5180 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5181 struct buffer_reg_channel *reg_chan;
5182 struct consumer_socket *socket;
5183
5184 /* Get consumer socket to use to push the metadata.*/
5185 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5186 usess->consumer);
5187 if (!socket) {
5188 ret = -EINVAL;
5189 goto error;
5190 }
5191
5192 memset(pathname, 0, sizeof(pathname));
5193 ret = snprintf(pathname, sizeof(pathname),
5194 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
5195 reg->uid, reg->bits_per_long);
5196 if (ret < 0) {
5197 PERROR("snprintf snapshot path");
5198 goto error;
5199 }
5200
5201 /* Add the UST default trace dir to path. */
5202 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5203 reg_chan, node.node) {
5204 ret = consumer_snapshot_channel(socket, reg_chan->consumer_key,
5205 output, 0, usess->uid, usess->gid, pathname, wait,
5206 nb_packets_per_stream);
5207 if (ret < 0) {
5208 goto error;
5209 }
5210 }
5211 ret = consumer_snapshot_channel(socket,
5212 reg->registry->reg.ust->metadata_key, output, 1,
5213 usess->uid, usess->gid, pathname, wait, 0);
5214 if (ret < 0) {
5215 goto error;
5216 }
5217 snapshot_done = 1;
5218 }
5219 break;
5220 }
5221 case LTTNG_BUFFER_PER_PID:
5222 {
5223 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5224 struct consumer_socket *socket;
5225 struct lttng_ht_iter chan_iter;
5226 struct ust_app_channel *ua_chan;
5227 struct ust_app_session *ua_sess;
5228 struct ust_registry_session *registry;
5229
5230 ua_sess = lookup_session_by_app(usess, app);
5231 if (!ua_sess) {
5232 /* Session not associated with this app. */
5233 continue;
5234 }
5235
5236 /* Get the right consumer socket for the application. */
5237 socket = consumer_find_socket_by_bitness(app->bits_per_long,
5238 output->consumer);
5239 if (!socket) {
5240 ret = -EINVAL;
5241 goto error;
5242 }
5243
5244 /* Add the UST default trace dir to path. */
5245 memset(pathname, 0, sizeof(pathname));
5246 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
5247 ua_sess->path);
5248 if (ret < 0) {
5249 PERROR("snprintf snapshot path");
5250 goto error;
5251 }
5252
5253 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5254 ua_chan, node.node) {
5255 ret = consumer_snapshot_channel(socket, ua_chan->key, output,
5256 0, ua_sess->euid, ua_sess->egid, pathname, wait,
5257 nb_packets_per_stream);
5258 if (ret < 0) {
5259 goto error;
5260 }
5261 }
5262
5263 registry = get_session_registry(ua_sess);
5264 assert(registry);
5265 ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
5266 1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
5267 if (ret < 0) {
5268 goto error;
5269 }
5270 snapshot_done = 1;
5271 }
5272 break;
5273 }
5274 default:
5275 assert(0);
5276 break;
5277 }
5278
5279 if (!snapshot_done) {
5280 /*
5281 * If no snapshot was made and we are not in the error path, this means
5282 * that there are no buffers thus no (prior) application to snapshot
5283 * data from so we have simply NO data.
5284 */
5285 ret = -ENODATA;
5286 }
5287
5288 error:
5289 rcu_read_unlock();
5290 return ret;
5291 }
5292
5293 /*
5294 * Return the size taken by one more packet per stream.
5295 */
5296 uint64_t ust_app_get_size_one_more_packet_per_stream(struct ltt_ust_session *usess,
5297 uint64_t cur_nr_packets)
5298 {
5299 uint64_t tot_size = 0;
5300 struct ust_app *app;
5301 struct lttng_ht_iter iter;
5302
5303 assert(usess);
5304
5305 switch (usess->buffer_type) {
5306 case LTTNG_BUFFER_PER_UID:
5307 {
5308 struct buffer_reg_uid *reg;
5309
5310 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5311 struct buffer_reg_channel *reg_chan;
5312
5313 rcu_read_lock();
5314 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5315 reg_chan, node.node) {
5316 if (cur_nr_packets >= reg_chan->num_subbuf) {
5317 /*
5318 * Don't take channel into account if we
5319 * already grab all its packets.
5320 */
5321 continue;
5322 }
5323 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5324 }
5325 rcu_read_unlock();
5326 }
5327 break;
5328 }
5329 case LTTNG_BUFFER_PER_PID:
5330 {
5331 rcu_read_lock();
5332 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5333 struct ust_app_channel *ua_chan;
5334 struct ust_app_session *ua_sess;
5335 struct lttng_ht_iter chan_iter;
5336
5337 ua_sess = lookup_session_by_app(usess, app);
5338 if (!ua_sess) {
5339 /* Session not associated with this app. */
5340 continue;
5341 }
5342
5343 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
5344 ua_chan, node.node) {
5345 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
5346 /*
5347 * Don't take channel into account if we
5348 * already grab all its packets.
5349 */
5350 continue;
5351 }
5352 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5353 }
5354 }
5355 rcu_read_unlock();
5356 break;
5357 }
5358 default:
5359 assert(0);
5360 break;
5361 }
5362
5363 return tot_size;
5364 }
This page took 0.137766 seconds and 4 git commands to generate.