Fix: skip start trace for app that are already started.
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53
DG
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
bdf64013 3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
91d76f53
DG
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
91d76f53
DG
17 */
18
6c1c0768 19#define _LGPL_SOURCE
91d76f53 20#include <errno.h>
7972aab2 21#include <inttypes.h>
91d76f53
DG
22#include <pthread.h>
23#include <stdio.h>
24#include <stdlib.h>
099e26bd 25#include <string.h>
aba8e916
DG
26#include <sys/stat.h>
27#include <sys/types.h>
099e26bd 28#include <unistd.h>
0df502fd 29#include <urcu/compiler.h>
331744e3 30#include <signal.h>
bec39940 31
990570ed 32#include <common/common.h>
86acf0da 33#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 34
7972aab2 35#include "buffer-registry.h"
86acf0da 36#include "fd-limit.h"
8782cc74 37#include "health-sessiond.h"
56fff090 38#include "ust-app.h"
48842b30 39#include "ust-consumer.h"
75018ab6
JG
40#include "lttng-ust-ctl.h"
41#include "lttng-ust-error.h"
0b2dc8df 42#include "utils.h"
fb83fe64 43#include "session.h"
e9404c27
JG
44#include "lttng-sessiond.h"
45#include "notification-thread-commands.h"
5c408ad8 46#include "rotate.h"
d80a6244 47
c4b88406
MD
48static
49int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
50
d9bf3ca4
MD
51/* Next available channel key. Access under next_channel_key_lock. */
52static uint64_t _next_channel_key;
53static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
54
55/* Next available session ID. Access under next_session_id_lock. */
56static uint64_t _next_session_id;
57static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
58
59/*
d9bf3ca4 60 * Return the incremented value of next_channel_key.
ffe60014 61 */
d9bf3ca4 62static uint64_t get_next_channel_key(void)
ffe60014 63{
d9bf3ca4
MD
64 uint64_t ret;
65
66 pthread_mutex_lock(&next_channel_key_lock);
67 ret = ++_next_channel_key;
68 pthread_mutex_unlock(&next_channel_key_lock);
69 return ret;
ffe60014
DG
70}
71
72/*
7972aab2 73 * Return the atomically incremented value of next_session_id.
ffe60014 74 */
d9bf3ca4 75static uint64_t get_next_session_id(void)
ffe60014 76{
d9bf3ca4
MD
77 uint64_t ret;
78
79 pthread_mutex_lock(&next_session_id_lock);
80 ret = ++_next_session_id;
81 pthread_mutex_unlock(&next_session_id_lock);
82 return ret;
ffe60014
DG
83}
84
d65d2de8
DG
85static void copy_channel_attr_to_ustctl(
86 struct ustctl_consumer_channel_attr *attr,
87 struct lttng_ust_channel_attr *uattr)
88{
89 /* Copy event attributes since the layout is different. */
90 attr->subbuf_size = uattr->subbuf_size;
91 attr->num_subbuf = uattr->num_subbuf;
92 attr->overwrite = uattr->overwrite;
93 attr->switch_timer_interval = uattr->switch_timer_interval;
94 attr->read_timer_interval = uattr->read_timer_interval;
95 attr->output = uattr->output;
491d1539 96 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
97}
98
025faf73
DG
99/*
100 * Match function for the hash table lookup.
101 *
102 * It matches an ust app event based on three attributes which are the event
103 * name, the filter bytecode and the loglevel.
104 */
18eace3b
DG
105static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
106{
107 struct ust_app_event *event;
108 const struct ust_app_ht_key *key;
2106efa0 109 int ev_loglevel_value;
18eace3b
DG
110
111 assert(node);
112 assert(_key);
113
114 event = caa_container_of(node, struct ust_app_event, node.node);
115 key = _key;
2106efa0 116 ev_loglevel_value = event->attr.loglevel;
18eace3b 117
1af53eb5 118 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
119
120 /* Event name */
121 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
122 goto no_match;
123 }
124
125 /* Event loglevel. */
2106efa0 126 if (ev_loglevel_value != key->loglevel_type) {
025faf73 127 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
128 && key->loglevel_type == 0 &&
129 ev_loglevel_value == -1) {
025faf73
DG
130 /*
131 * Match is accepted. This is because on event creation, the
132 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
133 * -1 are accepted for this loglevel type since 0 is the one set by
134 * the API when receiving an enable event.
135 */
136 } else {
137 goto no_match;
138 }
18eace3b
DG
139 }
140
141 /* One of the filters is NULL, fail. */
142 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
143 goto no_match;
144 }
145
025faf73
DG
146 if (key->filter && event->filter) {
147 /* Both filters exists, check length followed by the bytecode. */
148 if (event->filter->len != key->filter->len ||
149 memcmp(event->filter->data, key->filter->data,
150 event->filter->len) != 0) {
151 goto no_match;
152 }
18eace3b
DG
153 }
154
1af53eb5
JI
155 /* One of the exclusions is NULL, fail. */
156 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
157 goto no_match;
158 }
159
160 if (key->exclusion && event->exclusion) {
161 /* Both exclusions exists, check count followed by the names. */
162 if (event->exclusion->count != key->exclusion->count ||
163 memcmp(event->exclusion->names, key->exclusion->names,
164 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
165 goto no_match;
166 }
167 }
168
169
025faf73 170 /* Match. */
18eace3b
DG
171 return 1;
172
173no_match:
174 return 0;
18eace3b
DG
175}
176
025faf73
DG
177/*
178 * Unique add of an ust app event in the given ht. This uses the custom
179 * ht_match_ust_app_event match function and the event name as hash.
180 */
d0b96690 181static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
182 struct ust_app_event *event)
183{
184 struct cds_lfht_node *node_ptr;
185 struct ust_app_ht_key key;
d0b96690 186 struct lttng_ht *ht;
18eace3b 187
d0b96690
DG
188 assert(ua_chan);
189 assert(ua_chan->events);
18eace3b
DG
190 assert(event);
191
d0b96690 192 ht = ua_chan->events;
18eace3b
DG
193 key.name = event->attr.name;
194 key.filter = event->filter;
2106efa0 195 key.loglevel_type = event->attr.loglevel;
91c89f23 196 key.exclusion = event->exclusion;
18eace3b
DG
197
198 node_ptr = cds_lfht_add_unique(ht->ht,
199 ht->hash_fct(event->node.key, lttng_ht_seed),
200 ht_match_ust_app_event, &key, &event->node.node);
201 assert(node_ptr == &event->node.node);
202}
203
d88aee68
DG
204/*
205 * Close the notify socket from the given RCU head object. This MUST be called
206 * through a call_rcu().
207 */
208static void close_notify_sock_rcu(struct rcu_head *head)
209{
210 int ret;
211 struct ust_app_notify_sock_obj *obj =
212 caa_container_of(head, struct ust_app_notify_sock_obj, head);
213
214 /* Must have a valid fd here. */
215 assert(obj->fd >= 0);
216
217 ret = close(obj->fd);
218 if (ret) {
219 ERR("close notify sock %d RCU", obj->fd);
220 }
221 lttng_fd_put(LTTNG_FD_APPS, 1);
222
223 free(obj);
224}
225
7972aab2
DG
226/*
227 * Return the session registry according to the buffer type of the given
228 * session.
229 *
230 * A registry per UID object MUST exists before calling this function or else
231 * it assert() if not found. RCU read side lock must be acquired.
232 */
233static struct ust_registry_session *get_session_registry(
234 struct ust_app_session *ua_sess)
235{
236 struct ust_registry_session *registry = NULL;
237
238 assert(ua_sess);
239
240 switch (ua_sess->buffer_type) {
241 case LTTNG_BUFFER_PER_PID:
242 {
243 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
244 if (!reg_pid) {
245 goto error;
246 }
247 registry = reg_pid->registry->reg.ust;
248 break;
249 }
250 case LTTNG_BUFFER_PER_UID:
251 {
252 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
470cc211
JG
253 ua_sess->tracing_id, ua_sess->bits_per_long,
254 ua_sess->real_credentials.uid);
7972aab2
DG
255 if (!reg_uid) {
256 goto error;
257 }
258 registry = reg_uid->registry->reg.ust;
259 break;
260 }
261 default:
262 assert(0);
263 };
264
265error:
266 return registry;
267}
268
55cc08a6
DG
269/*
270 * Delete ust context safely. RCU read lock must be held before calling
271 * this function.
272 */
273static
fb45065e
MD
274void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
275 struct ust_app *app)
55cc08a6 276{
ffe60014
DG
277 int ret;
278
279 assert(ua_ctx);
280
55cc08a6 281 if (ua_ctx->obj) {
fb45065e 282 pthread_mutex_lock(&app->sock_lock);
ffe60014 283 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 284 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
285 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
286 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
287 sock, ua_ctx->obj->handle, ret);
ffe60014 288 }
55cc08a6
DG
289 free(ua_ctx->obj);
290 }
291 free(ua_ctx);
292}
293
d80a6244
DG
294/*
295 * Delete ust app event safely. RCU read lock must be held before calling
296 * this function.
297 */
8b366481 298static
fb45065e
MD
299void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
300 struct ust_app *app)
d80a6244 301{
ffe60014
DG
302 int ret;
303
304 assert(ua_event);
305
53a80697 306 free(ua_event->filter);
951f0b71
JI
307 if (ua_event->exclusion != NULL)
308 free(ua_event->exclusion);
edb67388 309 if (ua_event->obj != NULL) {
fb45065e 310 pthread_mutex_lock(&app->sock_lock);
ffe60014 311 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 312 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
313 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
314 ERR("UST app sock %d release event obj failed with ret %d",
315 sock, ret);
316 }
edb67388
DG
317 free(ua_event->obj);
318 }
d80a6244
DG
319 free(ua_event);
320}
321
322/*
7972aab2
DG
323 * Release ust data object of the given stream.
324 *
325 * Return 0 on success or else a negative value.
d80a6244 326 */
fb45065e
MD
327static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
328 struct ust_app *app)
d80a6244 329{
7972aab2 330 int ret = 0;
ffe60014
DG
331
332 assert(stream);
333
8b366481 334 if (stream->obj) {
fb45065e 335 pthread_mutex_lock(&app->sock_lock);
ffe60014 336 ret = ustctl_release_object(sock, stream->obj);
fb45065e 337 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
338 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
339 ERR("UST app sock %d release stream obj failed with ret %d",
340 sock, ret);
341 }
4063050c 342 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
343 free(stream->obj);
344 }
7972aab2
DG
345
346 return ret;
347}
348
349/*
350 * Delete ust app stream safely. RCU read lock must be held before calling
351 * this function.
352 */
353static
fb45065e
MD
354void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
355 struct ust_app *app)
7972aab2
DG
356{
357 assert(stream);
358
fb45065e 359 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 360 free(stream);
d80a6244
DG
361}
362
36b588ed
MD
363/*
364 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
365 * section and outside of call_rcu thread, so we postpone its execution
366 * using ht_cleanup_push. It is simpler than to change the semantic of
367 * the many callers of delete_ust_app_session().
36b588ed
MD
368 */
369static
370void delete_ust_app_channel_rcu(struct rcu_head *head)
371{
372 struct ust_app_channel *ua_chan =
373 caa_container_of(head, struct ust_app_channel, rcu_head);
374
0b2dc8df
MD
375 ht_cleanup_push(ua_chan->ctx);
376 ht_cleanup_push(ua_chan->events);
36b588ed
MD
377 free(ua_chan);
378}
379
fb83fe64
JD
380/*
381 * Extract the lost packet or discarded events counter when the channel is
382 * being deleted and store the value in the parent channel so we can
383 * access it from lttng list and at stop/destroy.
82cac6d2
JG
384 *
385 * The session list lock must be held by the caller.
fb83fe64
JD
386 */
387static
388void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
389{
390 uint64_t discarded = 0, lost = 0;
391 struct ltt_session *session;
392 struct ltt_ust_channel *uchan;
393
394 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
395 return;
396 }
397
398 rcu_read_lock();
399 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
400 if (!session || !session->ust_session) {
401 /*
402 * Not finding the session is not an error because there are
403 * multiple ways the channels can be torn down.
404 *
405 * 1) The session daemon can initiate the destruction of the
406 * ust app session after receiving a destroy command or
407 * during its shutdown/teardown.
408 * 2) The application, since we are in per-pid tracing, is
409 * unregistering and tearing down its ust app session.
410 *
411 * Both paths are protected by the session list lock which
412 * ensures that the accounting of lost packets and discarded
413 * events is done exactly once. The session is then unpublished
414 * from the session list, resulting in this condition.
415 */
fb83fe64
JD
416 goto end;
417 }
418
419 if (ua_chan->attr.overwrite) {
420 consumer_get_lost_packets(ua_chan->session->tracing_id,
421 ua_chan->key, session->ust_session->consumer,
422 &lost);
423 } else {
424 consumer_get_discarded_events(ua_chan->session->tracing_id,
425 ua_chan->key, session->ust_session->consumer,
426 &discarded);
427 }
428 uchan = trace_ust_find_channel_by_name(
429 session->ust_session->domain_global.channels,
430 ua_chan->name);
431 if (!uchan) {
432 ERR("Missing UST channel to store discarded counters");
433 goto end;
434 }
435
436 uchan->per_pid_closed_app_discarded += discarded;
437 uchan->per_pid_closed_app_lost += lost;
438
439end:
440 rcu_read_unlock();
e32d7f27
JG
441 if (session) {
442 session_put(session);
443 }
fb83fe64
JD
444}
445
d80a6244
DG
446/*
447 * Delete ust app channel safely. RCU read lock must be held before calling
448 * this function.
82cac6d2
JG
449 *
450 * The session list lock must be held by the caller.
d80a6244 451 */
8b366481 452static
d0b96690
DG
453void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
454 struct ust_app *app)
d80a6244
DG
455{
456 int ret;
bec39940 457 struct lttng_ht_iter iter;
d80a6244 458 struct ust_app_event *ua_event;
55cc08a6 459 struct ust_app_ctx *ua_ctx;
030a66fa 460 struct ust_app_stream *stream, *stmp;
7972aab2 461 struct ust_registry_session *registry;
d80a6244 462
ffe60014
DG
463 assert(ua_chan);
464
465 DBG3("UST app deleting channel %s", ua_chan->name);
466
55cc08a6 467 /* Wipe stream */
d80a6244 468 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 469 cds_list_del(&stream->list);
fb45065e 470 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
471 }
472
55cc08a6 473 /* Wipe context */
bec39940 474 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 475 cds_list_del(&ua_ctx->list);
bec39940 476 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 477 assert(!ret);
fb45065e 478 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 479 }
d80a6244 480
55cc08a6 481 /* Wipe events */
bec39940
DG
482 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
483 node.node) {
484 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 485 assert(!ret);
fb45065e 486 delete_ust_app_event(sock, ua_event, app);
d80a6244 487 }
edb67388 488
c8335706
MD
489 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
490 /* Wipe and free registry from session registry. */
491 registry = get_session_registry(ua_chan->session);
492 if (registry) {
e9404c27 493 ust_registry_channel_del_free(registry, ua_chan->key,
e38d96f9
MD
494 sock >= 0);
495 }
45798a31
JG
496 /*
497 * A negative socket can be used by the caller when
498 * cleaning-up a ua_chan in an error path. Skip the
499 * accounting in this case.
500 */
e38d96f9
MD
501 if (sock >= 0) {
502 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 503 }
7972aab2 504 }
d0b96690 505
edb67388 506 if (ua_chan->obj != NULL) {
d0b96690
DG
507 /* Remove channel from application UST object descriptor. */
508 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
509 ret = lttng_ht_del(app->ust_objd, &iter);
510 assert(!ret);
fb45065e 511 pthread_mutex_lock(&app->sock_lock);
ffe60014 512 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 513 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
514 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
515 ERR("UST app sock %d release channel obj failed with ret %d",
516 sock, ret);
517 }
7972aab2 518 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
519 free(ua_chan->obj);
520 }
36b588ed 521 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
522}
523
fb45065e
MD
524int ust_app_register_done(struct ust_app *app)
525{
526 int ret;
527
528 pthread_mutex_lock(&app->sock_lock);
529 ret = ustctl_register_done(app->sock);
530 pthread_mutex_unlock(&app->sock_lock);
531 return ret;
532}
533
534int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
535{
536 int ret, sock;
537
538 if (app) {
539 pthread_mutex_lock(&app->sock_lock);
540 sock = app->sock;
541 } else {
542 sock = -1;
543 }
544 ret = ustctl_release_object(sock, data);
545 if (app) {
546 pthread_mutex_unlock(&app->sock_lock);
547 }
548 return ret;
549}
550
331744e3 551/*
1b532a60
DG
552 * Push metadata to consumer socket.
553 *
dc2bbdae
MD
554 * RCU read-side lock must be held to guarantee existance of socket.
555 * Must be called with the ust app session lock held.
556 * Must be called with the registry lock held.
331744e3
JD
557 *
558 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
559 * Returning a -EPIPE return value means we could not send the metadata,
560 * but it can be caused by recoverable errors (e.g. the application has
561 * terminated concurrently).
331744e3
JD
562 */
563ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
564 struct consumer_socket *socket, int send_zero_data)
565{
566 int ret;
567 char *metadata_str = NULL;
c585821b 568 size_t len, offset, new_metadata_len_sent;
331744e3 569 ssize_t ret_val;
93ec662e 570 uint64_t metadata_key, metadata_version;
331744e3
JD
571
572 assert(registry);
573 assert(socket);
1b532a60 574
c585821b
MD
575 metadata_key = registry->metadata_key;
576
ce34fcd0 577 /*
dc2bbdae
MD
578 * Means that no metadata was assigned to the session. This can
579 * happens if no start has been done previously.
ce34fcd0 580 */
c585821b 581 if (!metadata_key) {
ce34fcd0
MD
582 return 0;
583 }
584
331744e3
JD
585 offset = registry->metadata_len_sent;
586 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 587 new_metadata_len_sent = registry->metadata_len;
93ec662e 588 metadata_version = registry->metadata_version;
331744e3
JD
589 if (len == 0) {
590 DBG3("No metadata to push for metadata key %" PRIu64,
591 registry->metadata_key);
592 ret_val = len;
593 if (send_zero_data) {
594 DBG("No metadata to push");
595 goto push_data;
596 }
597 goto end;
598 }
599
600 /* Allocate only what we have to send. */
601 metadata_str = zmalloc(len);
602 if (!metadata_str) {
603 PERROR("zmalloc ust app metadata string");
604 ret_val = -ENOMEM;
605 goto error;
606 }
c585821b 607 /* Copy what we haven't sent out. */
331744e3 608 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
609
610push_data:
c585821b
MD
611 pthread_mutex_unlock(&registry->lock);
612 /*
613 * We need to unlock the registry while we push metadata to
614 * break a circular dependency between the consumerd metadata
615 * lock and the sessiond registry lock. Indeed, pushing metadata
616 * to the consumerd awaits that it gets pushed all the way to
617 * relayd, but doing so requires grabbing the metadata lock. If
618 * a concurrent metadata request is being performed by
619 * consumerd, this can try to grab the registry lock on the
620 * sessiond while holding the metadata lock on the consumer
621 * daemon. Those push and pull schemes are performed on two
622 * different bidirectionnal communication sockets.
623 */
624 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 625 metadata_str, len, offset, metadata_version);
c585821b 626 pthread_mutex_lock(&registry->lock);
331744e3 627 if (ret < 0) {
000baf6a 628 /*
dc2bbdae
MD
629 * There is an acceptable race here between the registry
630 * metadata key assignment and the creation on the
631 * consumer. The session daemon can concurrently push
632 * metadata for this registry while being created on the
633 * consumer since the metadata key of the registry is
634 * assigned *before* it is setup to avoid the consumer
635 * to ask for metadata that could possibly be not found
636 * in the session daemon.
000baf6a 637 *
dc2bbdae
MD
638 * The metadata will get pushed either by the session
639 * being stopped or the consumer requesting metadata if
640 * that race is triggered.
000baf6a
DG
641 */
642 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
643 ret = 0;
c585821b
MD
644 } else {
645 ERR("Error pushing metadata to consumer");
000baf6a 646 }
331744e3
JD
647 ret_val = ret;
648 goto error_push;
c585821b
MD
649 } else {
650 /*
651 * Metadata may have been concurrently pushed, since
652 * we're not holding the registry lock while pushing to
653 * consumer. This is handled by the fact that we send
654 * the metadata content, size, and the offset at which
655 * that metadata belongs. This may arrive out of order
656 * on the consumer side, and the consumer is able to
657 * deal with overlapping fragments. The consumer
658 * supports overlapping fragments, which must be
659 * contiguous starting from offset 0. We keep the
660 * largest metadata_len_sent value of the concurrent
661 * send.
662 */
663 registry->metadata_len_sent =
664 max_t(size_t, registry->metadata_len_sent,
665 new_metadata_len_sent);
331744e3 666 }
331744e3
JD
667 free(metadata_str);
668 return len;
669
670end:
671error:
ce34fcd0
MD
672 if (ret_val) {
673 /*
dc2bbdae
MD
674 * On error, flag the registry that the metadata is
675 * closed. We were unable to push anything and this
676 * means that either the consumer is not responding or
677 * the metadata cache has been destroyed on the
678 * consumer.
ce34fcd0
MD
679 */
680 registry->metadata_closed = 1;
681 }
331744e3
JD
682error_push:
683 free(metadata_str);
684 return ret_val;
685}
686
d88aee68 687/*
ce34fcd0 688 * For a given application and session, push metadata to consumer.
331744e3
JD
689 * Either sock or consumer is required : if sock is NULL, the default
690 * socket to send the metadata is retrieved from consumer, if sock
691 * is not NULL we use it to send the metadata.
ce34fcd0 692 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
693 * therefore ensuring existance of registry. It also ensures existance
694 * of socket throughout this function.
d88aee68
DG
695 *
696 * Return 0 on success else a negative error.
2c57e06d
MD
697 * Returning a -EPIPE return value means we could not send the metadata,
698 * but it can be caused by recoverable errors (e.g. the application has
699 * terminated concurrently).
d88aee68 700 */
7972aab2
DG
701static int push_metadata(struct ust_registry_session *registry,
702 struct consumer_output *consumer)
d88aee68 703{
331744e3
JD
704 int ret_val;
705 ssize_t ret;
d88aee68
DG
706 struct consumer_socket *socket;
707
7972aab2
DG
708 assert(registry);
709 assert(consumer);
710
ce34fcd0 711 pthread_mutex_lock(&registry->lock);
ce34fcd0 712 if (registry->metadata_closed) {
dc2bbdae
MD
713 ret_val = -EPIPE;
714 goto error;
d88aee68
DG
715 }
716
d88aee68 717 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
718 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
719 consumer);
d88aee68 720 if (!socket) {
331744e3 721 ret_val = -1;
ce34fcd0 722 goto error;
d88aee68
DG
723 }
724
331744e3 725 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 726 if (ret < 0) {
331744e3 727 ret_val = ret;
ce34fcd0 728 goto error;
d88aee68 729 }
dc2bbdae 730 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
731 return 0;
732
ce34fcd0 733error:
dc2bbdae 734 pthread_mutex_unlock(&registry->lock);
331744e3 735 return ret_val;
d88aee68
DG
736}
737
738/*
739 * Send to the consumer a close metadata command for the given session. Once
740 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 741 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
742 * in the destroy path.
743 *
a70ac2f4
MD
744 * Do not hold the registry lock while communicating with the consumerd, because
745 * doing so causes inter-process deadlocks between consumerd and sessiond with
746 * the metadata request notification.
747 *
d88aee68
DG
748 * Return 0 on success else a negative value.
749 */
7972aab2
DG
750static int close_metadata(struct ust_registry_session *registry,
751 struct consumer_output *consumer)
d88aee68
DG
752{
753 int ret;
754 struct consumer_socket *socket;
a70ac2f4
MD
755 uint64_t metadata_key;
756 bool registry_was_already_closed;
d88aee68 757
7972aab2
DG
758 assert(registry);
759 assert(consumer);
d88aee68 760
7972aab2
DG
761 rcu_read_lock();
762
ce34fcd0 763 pthread_mutex_lock(&registry->lock);
a70ac2f4
MD
764 metadata_key = registry->metadata_key;
765 registry_was_already_closed = registry->metadata_closed;
766 if (metadata_key != 0) {
767 /*
768 * Metadata closed. Even on error this means that the consumer
769 * is not responding or not found so either way a second close
770 * should NOT be emit for this registry.
771 */
772 registry->metadata_closed = 1;
773 }
774 pthread_mutex_unlock(&registry->lock);
ce34fcd0 775
a70ac2f4 776 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 777 ret = 0;
1b532a60 778 goto end;
d88aee68
DG
779 }
780
d88aee68 781 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
782 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
783 consumer);
d88aee68
DG
784 if (!socket) {
785 ret = -1;
a70ac2f4 786 goto end;
d88aee68
DG
787 }
788
a70ac2f4 789 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 790 if (ret < 0) {
a70ac2f4 791 goto end;
d88aee68
DG
792 }
793
1b532a60 794end:
7972aab2 795 rcu_read_unlock();
d88aee68
DG
796 return ret;
797}
798
36b588ed
MD
799/*
800 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
801 * section and outside of call_rcu thread, so we postpone its execution
802 * using ht_cleanup_push. It is simpler than to change the semantic of
803 * the many callers of delete_ust_app_session().
36b588ed
MD
804 */
805static
806void delete_ust_app_session_rcu(struct rcu_head *head)
807{
808 struct ust_app_session *ua_sess =
809 caa_container_of(head, struct ust_app_session, rcu_head);
810
0b2dc8df 811 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
812 free(ua_sess);
813}
814
d80a6244
DG
815/*
816 * Delete ust app session safely. RCU read lock must be held before calling
817 * this function.
82cac6d2
JG
818 *
819 * The session list lock must be held by the caller.
d80a6244 820 */
8b366481 821static
d0b96690
DG
822void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
823 struct ust_app *app)
d80a6244
DG
824{
825 int ret;
bec39940 826 struct lttng_ht_iter iter;
d80a6244 827 struct ust_app_channel *ua_chan;
7972aab2 828 struct ust_registry_session *registry;
d80a6244 829
d88aee68
DG
830 assert(ua_sess);
831
1b532a60
DG
832 pthread_mutex_lock(&ua_sess->lock);
833
b161602a
MD
834 assert(!ua_sess->deleted);
835 ua_sess->deleted = true;
836
7972aab2 837 registry = get_session_registry(ua_sess);
fad1ed2f 838 /* Registry can be null on error path during initialization. */
ce34fcd0 839 if (registry) {
d88aee68 840 /* Push metadata for application before freeing the application. */
7972aab2 841 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 842
7972aab2
DG
843 /*
844 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
845 * metadata only on destroy trace session in this case. Also, the
846 * previous push metadata could have flag the metadata registry to
847 * close so don't send a close command if closed.
7972aab2 848 */
ce34fcd0 849 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
850 /* And ask to close it for this session registry. */
851 (void) close_metadata(registry, ua_sess->consumer);
852 }
d80a6244
DG
853 }
854
bec39940
DG
855 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
856 node.node) {
857 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 858 assert(!ret);
d0b96690 859 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 860 }
d80a6244 861
7972aab2
DG
862 /* In case of per PID, the registry is kept in the session. */
863 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
864 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
865 if (reg_pid) {
fad1ed2f
JR
866 /*
867 * Registry can be null on error path during
868 * initialization.
869 */
7972aab2
DG
870 buffer_reg_pid_remove(reg_pid);
871 buffer_reg_pid_destroy(reg_pid);
872 }
873 }
d0b96690 874
aee6bafd 875 if (ua_sess->handle != -1) {
fb45065e 876 pthread_mutex_lock(&app->sock_lock);
ffe60014 877 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 878 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
879 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
880 ERR("UST app sock %d release session handle failed with ret %d",
881 sock, ret);
882 }
10b56aef
MD
883 /* Remove session from application UST object descriptor. */
884 iter.iter.node = &ua_sess->ust_objd_node.node;
885 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
886 assert(!ret);
aee6bafd 887 }
10b56aef 888
1b532a60
DG
889 pthread_mutex_unlock(&ua_sess->lock);
890
6addfa37
MD
891 consumer_output_put(ua_sess->consumer);
892
36b588ed 893 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 894}
91d76f53
DG
895
896/*
284d8f55
DG
897 * Delete a traceable application structure from the global list. Never call
898 * this function outside of a call_rcu call.
36b588ed
MD
899 *
900 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 901 */
8b366481
DG
902static
903void delete_ust_app(struct ust_app *app)
91d76f53 904{
8b366481 905 int ret, sock;
d42f20df 906 struct ust_app_session *ua_sess, *tmp_ua_sess;
44d3bd01 907
82cac6d2
JG
908 /*
909 * The session list lock must be held during this function to guarantee
910 * the existence of ua_sess.
911 */
912 session_lock_list();
d80a6244 913 /* Delete ust app sessions info */
852d0037
DG
914 sock = app->sock;
915 app->sock = -1;
d80a6244 916
8b366481 917 /* Wipe sessions */
d42f20df
DG
918 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
919 teardown_node) {
920 /* Free every object in the session and the session. */
36b588ed 921 rcu_read_lock();
d0b96690 922 delete_ust_app_session(sock, ua_sess, app);
36b588ed 923 rcu_read_unlock();
d80a6244 924 }
36b588ed 925
0b2dc8df 926 ht_cleanup_push(app->sessions);
10b56aef 927 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 928 ht_cleanup_push(app->ust_objd);
d80a6244 929
6414a713 930 /*
852d0037
DG
931 * Wait until we have deleted the application from the sock hash table
932 * before closing this socket, otherwise an application could re-use the
933 * socket ID and race with the teardown, using the same hash table entry.
934 *
935 * It's OK to leave the close in call_rcu. We want it to stay unique for
936 * all RCU readers that could run concurrently with unregister app,
937 * therefore we _need_ to only close that socket after a grace period. So
938 * it should stay in this RCU callback.
939 *
940 * This close() is a very important step of the synchronization model so
941 * every modification to this function must be carefully reviewed.
6414a713 942 */
799e2c4f
MD
943 ret = close(sock);
944 if (ret) {
945 PERROR("close");
946 }
4063050c 947 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 948
852d0037 949 DBG2("UST app pid %d deleted", app->pid);
284d8f55 950 free(app);
82cac6d2 951 session_unlock_list();
099e26bd
DG
952}
953
954/*
f6a9efaa 955 * URCU intermediate call to delete an UST app.
099e26bd 956 */
8b366481
DG
957static
958void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 959{
bec39940
DG
960 struct lttng_ht_node_ulong *node =
961 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 962 struct ust_app *app =
852d0037 963 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 964
852d0037 965 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 966 delete_ust_app(app);
099e26bd
DG
967}
968
ffe60014
DG
969/*
970 * Delete the session from the application ht and delete the data structure by
971 * freeing every object inside and releasing them.
82cac6d2
JG
972 *
973 * The session list lock must be held by the caller.
ffe60014 974 */
d0b96690 975static void destroy_app_session(struct ust_app *app,
ffe60014
DG
976 struct ust_app_session *ua_sess)
977{
978 int ret;
979 struct lttng_ht_iter iter;
980
981 assert(app);
982 assert(ua_sess);
983
984 iter.iter.node = &ua_sess->node.node;
985 ret = lttng_ht_del(app->sessions, &iter);
986 if (ret) {
987 /* Already scheduled for teardown. */
988 goto end;
989 }
990
991 /* Once deleted, free the data structure. */
d0b96690 992 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
993
994end:
995 return;
996}
997
8b366481
DG
998/*
999 * Alloc new UST app session.
1000 */
1001static
40bbd087 1002struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1003{
1004 struct ust_app_session *ua_sess;
1005
1006 /* Init most of the default value by allocating and zeroing */
1007 ua_sess = zmalloc(sizeof(struct ust_app_session));
1008 if (ua_sess == NULL) {
1009 PERROR("malloc");
ffe60014 1010 goto error_free;
8b366481
DG
1011 }
1012
1013 ua_sess->handle = -1;
bec39940 1014 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 1015 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 1016 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1017
8b366481
DG
1018 return ua_sess;
1019
ffe60014 1020error_free:
8b366481
DG
1021 return NULL;
1022}
1023
1024/*
1025 * Alloc new UST app channel.
1026 */
1027static
1028struct ust_app_channel *alloc_ust_app_channel(char *name,
d0b96690 1029 struct ust_app_session *ua_sess,
ffe60014 1030 struct lttng_ust_channel_attr *attr)
8b366481
DG
1031{
1032 struct ust_app_channel *ua_chan;
1033
1034 /* Init most of the default value by allocating and zeroing */
1035 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1036 if (ua_chan == NULL) {
1037 PERROR("malloc");
1038 goto error;
1039 }
1040
1041 /* Setup channel name */
1042 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1043 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1044
1045 ua_chan->enabled = 1;
1046 ua_chan->handle = -1;
45893984 1047 ua_chan->session = ua_sess;
ffe60014 1048 ua_chan->key = get_next_channel_key();
bec39940
DG
1049 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1050 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1051 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1052
1053 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1054 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1055
1056 /* Copy attributes */
1057 if (attr) {
ffe60014 1058 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1059 ua_chan->attr.subbuf_size = attr->subbuf_size;
1060 ua_chan->attr.num_subbuf = attr->num_subbuf;
1061 ua_chan->attr.overwrite = attr->overwrite;
1062 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1063 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1064 ua_chan->attr.output = attr->output;
491d1539 1065 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1066 }
ffe60014
DG
1067 /* By default, the channel is a per cpu channel. */
1068 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1069
1070 DBG3("UST app channel %s allocated", ua_chan->name);
1071
1072 return ua_chan;
1073
1074error:
1075 return NULL;
1076}
1077
37f1c236
DG
1078/*
1079 * Allocate and initialize a UST app stream.
1080 *
1081 * Return newly allocated stream pointer or NULL on error.
1082 */
ffe60014 1083struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1084{
1085 struct ust_app_stream *stream = NULL;
1086
1087 stream = zmalloc(sizeof(*stream));
1088 if (stream == NULL) {
1089 PERROR("zmalloc ust app stream");
1090 goto error;
1091 }
1092
1093 /* Zero could be a valid value for a handle so flag it to -1. */
1094 stream->handle = -1;
1095
1096error:
1097 return stream;
1098}
1099
8b366481
DG
1100/*
1101 * Alloc new UST app event.
1102 */
1103static
1104struct ust_app_event *alloc_ust_app_event(char *name,
1105 struct lttng_ust_event *attr)
1106{
1107 struct ust_app_event *ua_event;
1108
1109 /* Init most of the default value by allocating and zeroing */
1110 ua_event = zmalloc(sizeof(struct ust_app_event));
1111 if (ua_event == NULL) {
20533947 1112 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1113 goto error;
1114 }
1115
1116 ua_event->enabled = 1;
1117 strncpy(ua_event->name, name, sizeof(ua_event->name));
1118 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1119 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1120
1121 /* Copy attributes */
1122 if (attr) {
1123 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1124 }
1125
1126 DBG3("UST app event %s allocated", ua_event->name);
1127
1128 return ua_event;
1129
1130error:
1131 return NULL;
1132}
1133
1134/*
1135 * Alloc new UST app context.
1136 */
1137static
bdf64013 1138struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1139{
1140 struct ust_app_ctx *ua_ctx;
1141
1142 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1143 if (ua_ctx == NULL) {
1144 goto error;
1145 }
1146
31746f93
DG
1147 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1148
8b366481
DG
1149 if (uctx) {
1150 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013
JG
1151 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1152 char *provider_name = NULL, *ctx_name = NULL;
1153
1154 provider_name = strdup(uctx->u.app_ctx.provider_name);
1155 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1156 if (!provider_name || !ctx_name) {
1157 free(provider_name);
1158 free(ctx_name);
1159 goto error;
1160 }
1161
1162 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1163 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1164 }
8b366481
DG
1165 }
1166
1167 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1168 return ua_ctx;
bdf64013
JG
1169error:
1170 free(ua_ctx);
1171 return NULL;
8b366481
DG
1172}
1173
025faf73
DG
1174/*
1175 * Allocate a filter and copy the given original filter.
1176 *
1177 * Return allocated filter or NULL on error.
1178 */
51755dc8
JG
1179static struct lttng_filter_bytecode *copy_filter_bytecode(
1180 struct lttng_filter_bytecode *orig_f)
025faf73 1181{
51755dc8 1182 struct lttng_filter_bytecode *filter = NULL;
025faf73
DG
1183
1184 /* Copy filter bytecode */
1185 filter = zmalloc(sizeof(*filter) + orig_f->len);
1186 if (!filter) {
51755dc8 1187 PERROR("zmalloc alloc filter bytecode");
025faf73
DG
1188 goto error;
1189 }
1190
1191 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1192
1193error:
1194 return filter;
1195}
1196
51755dc8
JG
1197/*
1198 * Create a liblttng-ust filter bytecode from given bytecode.
1199 *
1200 * Return allocated filter or NULL on error.
1201 */
1202static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1203 struct lttng_filter_bytecode *orig_f)
1204{
1205 struct lttng_ust_filter_bytecode *filter = NULL;
1206
1207 /* Copy filter bytecode */
1208 filter = zmalloc(sizeof(*filter) + orig_f->len);
1209 if (!filter) {
1210 PERROR("zmalloc alloc ust filter bytecode");
1211 goto error;
1212 }
1213
1214 assert(sizeof(struct lttng_filter_bytecode) ==
1215 sizeof(struct lttng_ust_filter_bytecode));
1216 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1217error:
1218 return filter;
1219}
1220
099e26bd 1221/*
421cb601
DG
1222 * Find an ust_app using the sock and return it. RCU read side lock must be
1223 * held before calling this helper function.
099e26bd 1224 */
f20baf8e 1225struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1226{
bec39940 1227 struct lttng_ht_node_ulong *node;
bec39940 1228 struct lttng_ht_iter iter;
f6a9efaa 1229
852d0037 1230 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1231 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1232 if (node == NULL) {
1233 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1234 goto error;
1235 }
852d0037
DG
1236
1237 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1238
1239error:
1240 return NULL;
099e26bd
DG
1241}
1242
d0b96690
DG
1243/*
1244 * Find an ust_app using the notify sock and return it. RCU read side lock must
1245 * be held before calling this helper function.
1246 */
1247static struct ust_app *find_app_by_notify_sock(int sock)
1248{
1249 struct lttng_ht_node_ulong *node;
1250 struct lttng_ht_iter iter;
1251
1252 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1253 &iter);
1254 node = lttng_ht_iter_get_node_ulong(&iter);
1255 if (node == NULL) {
1256 DBG2("UST app find by notify sock %d not found", sock);
1257 goto error;
1258 }
1259
1260 return caa_container_of(node, struct ust_app, notify_sock_n);
1261
1262error:
1263 return NULL;
1264}
1265
025faf73
DG
1266/*
1267 * Lookup for an ust app event based on event name, filter bytecode and the
1268 * event loglevel.
1269 *
1270 * Return an ust_app_event object or NULL on error.
1271 */
18eace3b 1272static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
88e3c2f5 1273 const char *name, const struct lttng_filter_bytecode *filter,
2106efa0 1274 int loglevel_value,
39c5a3a7 1275 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1276{
1277 struct lttng_ht_iter iter;
1278 struct lttng_ht_node_str *node;
1279 struct ust_app_event *event = NULL;
1280 struct ust_app_ht_key key;
18eace3b
DG
1281
1282 assert(name);
1283 assert(ht);
1284
1285 /* Setup key for event lookup. */
1286 key.name = name;
1287 key.filter = filter;
2106efa0 1288 key.loglevel_type = loglevel_value;
39c5a3a7 1289 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1290 key.exclusion = exclusion;
18eace3b 1291
025faf73
DG
1292 /* Lookup using the event name as hash and a custom match fct. */
1293 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1294 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1295 node = lttng_ht_iter_get_node_str(&iter);
1296 if (node == NULL) {
1297 goto end;
1298 }
1299
1300 event = caa_container_of(node, struct ust_app_event, node);
1301
1302end:
18eace3b
DG
1303 return event;
1304}
1305
55cc08a6
DG
1306/*
1307 * Create the channel context on the tracer.
d0b96690
DG
1308 *
1309 * Called with UST app session lock held.
55cc08a6
DG
1310 */
1311static
1312int create_ust_channel_context(struct ust_app_channel *ua_chan,
1313 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1314{
1315 int ret;
1316
840cb59c 1317 health_code_update();
86acf0da 1318
fb45065e 1319 pthread_mutex_lock(&app->sock_lock);
852d0037 1320 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1321 ua_chan->obj, &ua_ctx->obj);
fb45065e 1322 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1323 if (ret < 0) {
ffe60014
DG
1324 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1325 ERR("UST app create channel context failed for app (pid: %d) "
1326 "with ret %d", app->pid, ret);
1327 } else {
3757b385
DG
1328 /*
1329 * This is normal behavior, an application can die during the
1330 * creation process. Don't report an error so the execution can
1331 * continue normally.
1332 */
1333 ret = 0;
88e3c2f5 1334 DBG3("UST app add context failed. Application is dead.");
ffe60014 1335 }
55cc08a6
DG
1336 goto error;
1337 }
1338
1339 ua_ctx->handle = ua_ctx->obj->handle;
1340
d0b96690
DG
1341 DBG2("UST app context handle %d created successfully for channel %s",
1342 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1343
1344error:
840cb59c 1345 health_code_update();
55cc08a6
DG
1346 return ret;
1347}
1348
53a80697
MD
1349/*
1350 * Set the filter on the tracer.
1351 */
1352static
1353int set_ust_event_filter(struct ust_app_event *ua_event,
1354 struct ust_app *app)
1355{
1356 int ret;
51755dc8 1357 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1358
840cb59c 1359 health_code_update();
86acf0da 1360
53a80697 1361 if (!ua_event->filter) {
86acf0da
DG
1362 ret = 0;
1363 goto error;
53a80697
MD
1364 }
1365
51755dc8
JG
1366 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1367 if (!ust_bytecode) {
1368 ret = -LTTNG_ERR_NOMEM;
1369 goto error;
1370 }
fb45065e 1371 pthread_mutex_lock(&app->sock_lock);
51755dc8 1372 ret = ustctl_set_filter(app->sock, ust_bytecode,
53a80697 1373 ua_event->obj);
fb45065e 1374 pthread_mutex_unlock(&app->sock_lock);
53a80697 1375 if (ret < 0) {
ffe60014
DG
1376 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1377 ERR("UST app event %s filter failed for app (pid: %d) "
1378 "with ret %d", ua_event->attr.name, app->pid, ret);
1379 } else {
3757b385
DG
1380 /*
1381 * This is normal behavior, an application can die during the
1382 * creation process. Don't report an error so the execution can
1383 * continue normally.
1384 */
1385 ret = 0;
ffe60014
DG
1386 DBG3("UST app filter event failed. Application is dead.");
1387 }
53a80697
MD
1388 goto error;
1389 }
1390
1391 DBG2("UST filter set successfully for event %s", ua_event->name);
1392
1393error:
840cb59c 1394 health_code_update();
51755dc8 1395 free(ust_bytecode);
53a80697
MD
1396 return ret;
1397}
1398
51755dc8
JG
1399static
1400struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1401 struct lttng_event_exclusion *exclusion)
1402{
1403 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1404 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1405 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1406
1407 ust_exclusion = zmalloc(exclusion_alloc_size);
1408 if (!ust_exclusion) {
1409 PERROR("malloc");
1410 goto end;
1411 }
1412
1413 assert(sizeof(struct lttng_event_exclusion) ==
1414 sizeof(struct lttng_ust_event_exclusion));
1415 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1416end:
1417 return ust_exclusion;
1418}
1419
7cc9a73c
JI
1420/*
1421 * Set event exclusions on the tracer.
1422 */
1423static
1424int set_ust_event_exclusion(struct ust_app_event *ua_event,
1425 struct ust_app *app)
1426{
1427 int ret;
51755dc8 1428 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
7cc9a73c
JI
1429
1430 health_code_update();
1431
1432 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1433 ret = 0;
1434 goto error;
1435 }
1436
51755dc8
JG
1437 ust_exclusion = create_ust_exclusion_from_exclusion(
1438 ua_event->exclusion);
1439 if (!ust_exclusion) {
1440 ret = -LTTNG_ERR_NOMEM;
1441 goto error;
1442 }
fb45065e 1443 pthread_mutex_lock(&app->sock_lock);
51755dc8 1444 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
fb45065e 1445 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1446 if (ret < 0) {
1447 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1448 ERR("UST app event %s exclusions failed for app (pid: %d) "
1449 "with ret %d", ua_event->attr.name, app->pid, ret);
1450 } else {
1451 /*
1452 * This is normal behavior, an application can die during the
1453 * creation process. Don't report an error so the execution can
1454 * continue normally.
1455 */
1456 ret = 0;
1457 DBG3("UST app event exclusion failed. Application is dead.");
1458 }
1459 goto error;
1460 }
1461
1462 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1463
1464error:
1465 health_code_update();
51755dc8 1466 free(ust_exclusion);
7cc9a73c
JI
1467 return ret;
1468}
1469
9730260e
DG
1470/*
1471 * Disable the specified event on to UST tracer for the UST session.
1472 */
1473static int disable_ust_event(struct ust_app *app,
1474 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1475{
1476 int ret;
1477
840cb59c 1478 health_code_update();
86acf0da 1479
fb45065e 1480 pthread_mutex_lock(&app->sock_lock);
852d0037 1481 ret = ustctl_disable(app->sock, ua_event->obj);
fb45065e 1482 pthread_mutex_unlock(&app->sock_lock);
9730260e 1483 if (ret < 0) {
ffe60014
DG
1484 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1485 ERR("UST app event %s disable failed for app (pid: %d) "
1486 "and session handle %d with ret %d",
1487 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1488 } else {
3757b385
DG
1489 /*
1490 * This is normal behavior, an application can die during the
1491 * creation process. Don't report an error so the execution can
1492 * continue normally.
1493 */
1494 ret = 0;
ffe60014
DG
1495 DBG3("UST app disable event failed. Application is dead.");
1496 }
9730260e
DG
1497 goto error;
1498 }
1499
1500 DBG2("UST app event %s disabled successfully for app (pid: %d)",
852d0037 1501 ua_event->attr.name, app->pid);
9730260e
DG
1502
1503error:
840cb59c 1504 health_code_update();
9730260e
DG
1505 return ret;
1506}
1507
78f0bacd
DG
1508/*
1509 * Disable the specified channel on to UST tracer for the UST session.
1510 */
1511static int disable_ust_channel(struct ust_app *app,
1512 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1513{
1514 int ret;
1515
840cb59c 1516 health_code_update();
86acf0da 1517
fb45065e 1518 pthread_mutex_lock(&app->sock_lock);
852d0037 1519 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1520 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1521 if (ret < 0) {
ffe60014
DG
1522 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1523 ERR("UST app channel %s disable failed for app (pid: %d) "
1524 "and session handle %d with ret %d",
1525 ua_chan->name, app->pid, ua_sess->handle, ret);
1526 } else {
3757b385
DG
1527 /*
1528 * This is normal behavior, an application can die during the
1529 * creation process. Don't report an error so the execution can
1530 * continue normally.
1531 */
1532 ret = 0;
ffe60014
DG
1533 DBG3("UST app disable channel failed. Application is dead.");
1534 }
78f0bacd
DG
1535 goto error;
1536 }
1537
78f0bacd 1538 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1539 ua_chan->name, app->pid);
78f0bacd
DG
1540
1541error:
840cb59c 1542 health_code_update();
78f0bacd
DG
1543 return ret;
1544}
1545
1546/*
1547 * Enable the specified channel on to UST tracer for the UST session.
1548 */
1549static int enable_ust_channel(struct ust_app *app,
1550 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1551{
1552 int ret;
1553
840cb59c 1554 health_code_update();
86acf0da 1555
fb45065e 1556 pthread_mutex_lock(&app->sock_lock);
852d0037 1557 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1558 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1559 if (ret < 0) {
ffe60014
DG
1560 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1561 ERR("UST app channel %s enable failed for app (pid: %d) "
1562 "and session handle %d with ret %d",
1563 ua_chan->name, app->pid, ua_sess->handle, ret);
1564 } else {
3757b385
DG
1565 /*
1566 * This is normal behavior, an application can die during the
1567 * creation process. Don't report an error so the execution can
1568 * continue normally.
1569 */
1570 ret = 0;
ffe60014
DG
1571 DBG3("UST app enable channel failed. Application is dead.");
1572 }
78f0bacd
DG
1573 goto error;
1574 }
1575
1576 ua_chan->enabled = 1;
1577
1578 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1579 ua_chan->name, app->pid);
78f0bacd
DG
1580
1581error:
840cb59c 1582 health_code_update();
78f0bacd
DG
1583 return ret;
1584}
1585
edb67388
DG
1586/*
1587 * Enable the specified event on to UST tracer for the UST session.
1588 */
1589static int enable_ust_event(struct ust_app *app,
1590 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1591{
1592 int ret;
1593
840cb59c 1594 health_code_update();
86acf0da 1595
fb45065e 1596 pthread_mutex_lock(&app->sock_lock);
852d0037 1597 ret = ustctl_enable(app->sock, ua_event->obj);
fb45065e 1598 pthread_mutex_unlock(&app->sock_lock);
edb67388 1599 if (ret < 0) {
ffe60014
DG
1600 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1601 ERR("UST app event %s enable failed for app (pid: %d) "
1602 "and session handle %d with ret %d",
1603 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1604 } else {
3757b385
DG
1605 /*
1606 * This is normal behavior, an application can die during the
1607 * creation process. Don't report an error so the execution can
1608 * continue normally.
1609 */
1610 ret = 0;
ffe60014
DG
1611 DBG3("UST app enable event failed. Application is dead.");
1612 }
edb67388
DG
1613 goto error;
1614 }
1615
1616 DBG2("UST app event %s enabled successfully for app (pid: %d)",
852d0037 1617 ua_event->attr.name, app->pid);
edb67388
DG
1618
1619error:
840cb59c 1620 health_code_update();
edb67388
DG
1621 return ret;
1622}
1623
099e26bd 1624/*
7972aab2 1625 * Send channel and stream buffer to application.
4f3ab6ee 1626 *
ffe60014 1627 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1628 */
7972aab2
DG
1629static int send_channel_pid_to_ust(struct ust_app *app,
1630 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1631{
1632 int ret;
ffe60014 1633 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1634
1635 assert(app);
ffe60014 1636 assert(ua_sess);
4f3ab6ee 1637 assert(ua_chan);
4f3ab6ee 1638
840cb59c 1639 health_code_update();
4f3ab6ee 1640
7972aab2
DG
1641 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1642 app->sock);
86acf0da 1643
ffe60014
DG
1644 /* Send channel to the application. */
1645 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1646 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1647 ret = -ENOTCONN; /* Caused by app exiting. */
1648 goto error;
1649 } else if (ret < 0) {
b551a063
DG
1650 goto error;
1651 }
1652
d88aee68
DG
1653 health_code_update();
1654
ffe60014
DG
1655 /* Send all streams to application. */
1656 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1657 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1658 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1659 ret = -ENOTCONN; /* Caused by app exiting. */
1660 goto error;
1661 } else if (ret < 0) {
ffe60014
DG
1662 goto error;
1663 }
1664 /* We don't need the stream anymore once sent to the tracer. */
1665 cds_list_del(&stream->list);
fb45065e 1666 delete_ust_app_stream(-1, stream, app);
ffe60014 1667 }
ffe60014
DG
1668 /* Flag the channel that it is sent to the application. */
1669 ua_chan->is_sent = 1;
ffe60014 1670
b551a063 1671error:
840cb59c 1672 health_code_update();
b551a063
DG
1673 return ret;
1674}
1675
91d76f53 1676/*
5b4a0ec0 1677 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1678 *
1679 * Should be called with session mutex held.
91d76f53 1680 */
edb67388
DG
1681static
1682int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1683 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1684{
5b4a0ec0 1685 int ret = 0;
284d8f55 1686
840cb59c 1687 health_code_update();
86acf0da 1688
5b4a0ec0 1689 /* Create UST event on tracer */
fb45065e 1690 pthread_mutex_lock(&app->sock_lock);
852d0037 1691 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1692 &ua_event->obj);
fb45065e 1693 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1694 if (ret < 0) {
ffe60014 1695 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
88e3c2f5 1696 abort();
ffe60014
DG
1697 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1698 ua_event->attr.name, app->pid, ret);
1699 } else {
3757b385
DG
1700 /*
1701 * This is normal behavior, an application can die during the
1702 * creation process. Don't report an error so the execution can
1703 * continue normally.
1704 */
1705 ret = 0;
ffe60014
DG
1706 DBG3("UST app create event failed. Application is dead.");
1707 }
5b4a0ec0 1708 goto error;
91d76f53 1709 }
f6a9efaa 1710
5b4a0ec0 1711 ua_event->handle = ua_event->obj->handle;
284d8f55 1712
5b4a0ec0 1713 DBG2("UST app event %s created successfully for pid:%d",
852d0037 1714 ua_event->attr.name, app->pid);
f6a9efaa 1715
840cb59c 1716 health_code_update();
86acf0da 1717
025faf73
DG
1718 /* Set filter if one is present. */
1719 if (ua_event->filter) {
1720 ret = set_ust_event_filter(ua_event, app);
1721 if (ret < 0) {
1722 goto error;
1723 }
1724 }
1725
7cc9a73c
JI
1726 /* Set exclusions for the event */
1727 if (ua_event->exclusion) {
1728 ret = set_ust_event_exclusion(ua_event, app);
1729 if (ret < 0) {
1730 goto error;
1731 }
1732 }
1733
8535a6d9 1734 /* If event not enabled, disable it on the tracer */
40113787
MD
1735 if (ua_event->enabled) {
1736 /*
1737 * We now need to explicitly enable the event, since it
1738 * is now disabled at creation.
1739 */
1740 ret = enable_ust_event(app, ua_sess, ua_event);
1741 if (ret < 0) {
1742 /*
1743 * If we hit an EPERM, something is wrong with our enable call. If
1744 * we get an EEXIST, there is a problem on the tracer side since we
1745 * just created it.
1746 */
1747 switch (ret) {
1748 case -LTTNG_UST_ERR_PERM:
1749 /* Code flow problem */
1750 assert(0);
1751 case -LTTNG_UST_ERR_EXIST:
1752 /* It's OK for our use case. */
1753 ret = 0;
1754 break;
1755 default:
1756 break;
1757 }
1758 goto error;
1759 }
8535a6d9
DG
1760 }
1761
5b4a0ec0 1762error:
840cb59c 1763 health_code_update();
5b4a0ec0 1764 return ret;
91d76f53 1765}
48842b30 1766
5b4a0ec0
DG
1767/*
1768 * Copy data between an UST app event and a LTT event.
1769 */
421cb601 1770static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
1771 struct ltt_ust_event *uevent)
1772{
b4ffad32
JI
1773 size_t exclusion_alloc_size;
1774
48842b30
DG
1775 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1776 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1777
fc34caaa
DG
1778 ua_event->enabled = uevent->enabled;
1779
5b4a0ec0
DG
1780 /* Copy event attributes */
1781 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1782
53a80697
MD
1783 /* Copy filter bytecode */
1784 if (uevent->filter) {
51755dc8 1785 ua_event->filter = copy_filter_bytecode(uevent->filter);
025faf73 1786 /* Filter might be NULL here in case of ENONEM. */
53a80697 1787 }
b4ffad32
JI
1788
1789 /* Copy exclusion data */
1790 if (uevent->exclusion) {
51755dc8 1791 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
1792 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1793 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
1794 if (ua_event->exclusion == NULL) {
1795 PERROR("malloc");
1796 } else {
1797 memcpy(ua_event->exclusion, uevent->exclusion,
1798 exclusion_alloc_size);
b4ffad32
JI
1799 }
1800 }
48842b30
DG
1801}
1802
5b4a0ec0
DG
1803/*
1804 * Copy data between an UST app channel and a LTT channel.
1805 */
421cb601 1806static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
1807 struct ltt_ust_channel *uchan)
1808{
fc34caaa 1809 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
1810
1811 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1812 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 1813
1624d5b7
JD
1814 ua_chan->tracefile_size = uchan->tracefile_size;
1815 ua_chan->tracefile_count = uchan->tracefile_count;
1816
ffe60014
DG
1817 /* Copy event attributes since the layout is different. */
1818 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1819 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1820 ua_chan->attr.overwrite = uchan->attr.overwrite;
1821 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1822 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 1823 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 1824 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
1825 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
1826
ffe60014
DG
1827 /*
1828 * Note that the attribute channel type is not set since the channel on the
1829 * tracing registry side does not have this information.
1830 */
48842b30 1831
fc34caaa 1832 ua_chan->enabled = uchan->enabled;
7972aab2 1833 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 1834
fc34caaa 1835 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
1836}
1837
5b4a0ec0
DG
1838/*
1839 * Copy data between a UST app session and a regular LTT session.
1840 */
421cb601 1841static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 1842 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1843{
477d7741
MD
1844 struct tm *timeinfo;
1845 char datetime[16];
1846 int ret;
d7ba1388 1847 char tmp_shm_path[PATH_MAX];
477d7741 1848
940c4592 1849 timeinfo = localtime(&app->registration_time);
477d7741 1850 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 1851
421cb601 1852 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 1853
7972aab2
DG
1854 ua_sess->tracing_id = usess->id;
1855 ua_sess->id = get_next_session_id();
470cc211
JG
1856 ua_sess->real_credentials.uid = app->uid;
1857 ua_sess->real_credentials.gid = app->gid;
1858 ua_sess->effective_credentials.uid = usess->uid;
1859 ua_sess->effective_credentials.gid = usess->gid;
7972aab2
DG
1860 ua_sess->buffer_type = usess->buffer_type;
1861 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 1862
7972aab2 1863 /* There is only one consumer object per session possible. */
6addfa37 1864 consumer_output_get(usess->consumer);
7972aab2 1865 ua_sess->consumer = usess->consumer;
6addfa37 1866
2bba9e53 1867 ua_sess->output_traces = usess->output_traces;
ecc48a90 1868 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
1869 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1870 &usess->metadata_attr);
7972aab2
DG
1871
1872 switch (ua_sess->buffer_type) {
1873 case LTTNG_BUFFER_PER_PID:
1874 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 1875 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
1876 datetime);
1877 break;
1878 case LTTNG_BUFFER_PER_UID:
1879 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
470cc211
JG
1880 DEFAULT_UST_TRACE_UID_PATH,
1881 ua_sess->real_credentials.uid,
1882 app->bits_per_long);
7972aab2
DG
1883 break;
1884 default:
1885 assert(0);
1886 goto error;
1887 }
477d7741
MD
1888 if (ret < 0) {
1889 PERROR("asprintf UST shadow copy session");
477d7741 1890 assert(0);
7972aab2 1891 goto error;
477d7741
MD
1892 }
1893
3d071855
MD
1894 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1895 sizeof(ua_sess->root_shm_path));
1896 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
1897 strncpy(ua_sess->shm_path, usess->shm_path,
1898 sizeof(ua_sess->shm_path));
1899 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1900 if (ua_sess->shm_path[0]) {
1901 switch (ua_sess->buffer_type) {
1902 case LTTNG_BUFFER_PER_PID:
1903 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 1904 "/" DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
d7ba1388
MD
1905 app->name, app->pid, datetime);
1906 break;
1907 case LTTNG_BUFFER_PER_UID:
1908 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
5da88b0f 1909 "/" DEFAULT_UST_TRACE_UID_PATH,
d7ba1388
MD
1910 app->uid, app->bits_per_long);
1911 break;
1912 default:
1913 assert(0);
1914 goto error;
1915 }
1916 if (ret < 0) {
1917 PERROR("sprintf UST shadow copy session");
1918 assert(0);
1919 goto error;
1920 }
1921 strncat(ua_sess->shm_path, tmp_shm_path,
1922 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1923 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1924 }
6addfa37 1925 return;
7972aab2
DG
1926
1927error:
6addfa37 1928 consumer_output_put(ua_sess->consumer);
48842b30
DG
1929}
1930
78f0bacd
DG
1931/*
1932 * Lookup sesison wrapper.
1933 */
84cd17c6 1934static
fb9a95c4 1935void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 1936 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
1937{
1938 /* Get right UST app session from app */
d9bf3ca4 1939 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
1940}
1941
421cb601
DG
1942/*
1943 * Return ust app session from the app session hashtable using the UST session
a991f516 1944 * id.
421cb601 1945 */
48842b30 1946static struct ust_app_session *lookup_session_by_app(
fb9a95c4 1947 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1948{
bec39940 1949 struct lttng_ht_iter iter;
d9bf3ca4 1950 struct lttng_ht_node_u64 *node;
48842b30 1951
84cd17c6 1952 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 1953 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
1954 if (node == NULL) {
1955 goto error;
1956 }
1957
1958 return caa_container_of(node, struct ust_app_session, node);
1959
1960error:
1961 return NULL;
1962}
1963
7972aab2
DG
1964/*
1965 * Setup buffer registry per PID for the given session and application. If none
1966 * is found, a new one is created, added to the global registry and
1967 * initialized. If regp is valid, it's set with the newly created object.
1968 *
1969 * Return 0 on success or else a negative value.
1970 */
1971static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1972 struct ust_app *app, struct buffer_reg_pid **regp)
1973{
1974 int ret = 0;
1975 struct buffer_reg_pid *reg_pid;
1976
1977 assert(ua_sess);
1978 assert(app);
1979
1980 rcu_read_lock();
1981
1982 reg_pid = buffer_reg_pid_find(ua_sess->id);
1983 if (!reg_pid) {
1984 /*
1985 * This is the create channel path meaning that if there is NO
1986 * registry available, we have to create one for this session.
1987 */
d7ba1388 1988 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 1989 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
1990 if (ret < 0) {
1991 goto error;
1992 }
7972aab2
DG
1993 } else {
1994 goto end;
1995 }
1996
1997 /* Initialize registry. */
1998 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
1999 app->bits_per_long, app->uint8_t_alignment,
2000 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2001 app->uint64_t_alignment, app->long_alignment,
470cc211
JG
2002 app->byte_order, app->version.major, app->version.minor,
2003 reg_pid->root_shm_path, reg_pid->shm_path,
2004 ua_sess->effective_credentials.uid,
8de88061
JR
2005 ua_sess->effective_credentials.gid, ua_sess->tracing_id,
2006 app->uid);
7972aab2 2007 if (ret < 0) {
286c991a
MD
2008 /*
2009 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2010 * destroy the buffer registry, because it is always expected
2011 * that if the buffer registry can be found, its ust registry is
2012 * non-NULL.
2013 */
2014 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2015 goto error;
2016 }
2017
286c991a
MD
2018 buffer_reg_pid_add(reg_pid);
2019
7972aab2
DG
2020 DBG3("UST app buffer registry per PID created successfully");
2021
2022end:
2023 if (regp) {
2024 *regp = reg_pid;
2025 }
2026error:
2027 rcu_read_unlock();
2028 return ret;
2029}
2030
2031/*
2032 * Setup buffer registry per UID for the given session and application. If none
2033 * is found, a new one is created, added to the global registry and
2034 * initialized. If regp is valid, it's set with the newly created object.
2035 *
2036 * Return 0 on success or else a negative value.
2037 */
2038static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2039 struct ust_app_session *ua_sess,
7972aab2
DG
2040 struct ust_app *app, struct buffer_reg_uid **regp)
2041{
2042 int ret = 0;
2043 struct buffer_reg_uid *reg_uid;
2044
2045 assert(usess);
2046 assert(app);
2047
2048 rcu_read_lock();
2049
2050 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2051 if (!reg_uid) {
2052 /*
2053 * This is the create channel path meaning that if there is NO
2054 * registry available, we have to create one for this session.
2055 */
2056 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2057 LTTNG_DOMAIN_UST, &reg_uid,
2058 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2059 if (ret < 0) {
2060 goto error;
2061 }
7972aab2
DG
2062 } else {
2063 goto end;
2064 }
2065
2066 /* Initialize registry. */
af6142cf 2067 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2068 app->bits_per_long, app->uint8_t_alignment,
2069 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2070 app->uint64_t_alignment, app->long_alignment,
2071 app->byte_order, app->version.major,
3d071855 2072 app->version.minor, reg_uid->root_shm_path,
8de88061
JR
2073 reg_uid->shm_path, usess->uid, usess->gid,
2074 ua_sess->tracing_id, app->uid);
7972aab2 2075 if (ret < 0) {
286c991a
MD
2076 /*
2077 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2078 * destroy the buffer registry, because it is always expected
2079 * that if the buffer registry can be found, its ust registry is
2080 * non-NULL.
2081 */
2082 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2083 goto error;
2084 }
2085 /* Add node to teardown list of the session. */
2086 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2087
286c991a 2088 buffer_reg_uid_add(reg_uid);
7972aab2 2089
286c991a 2090 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2091end:
2092 if (regp) {
2093 *regp = reg_uid;
2094 }
2095error:
2096 rcu_read_unlock();
2097 return ret;
2098}
2099
421cb601 2100/*
3d8ca23b 2101 * Create a session on the tracer side for the given app.
421cb601 2102 *
3d8ca23b
DG
2103 * On success, ua_sess_ptr is populated with the session pointer or else left
2104 * untouched. If the session was created, is_created is set to 1. On error,
2105 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2106 * be NULL.
2107 *
2108 * Returns 0 on success or else a negative code which is either -ENOMEM or
2109 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2110 */
03f91eaa 2111static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2112 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2113 int *is_created)
421cb601 2114{
3d8ca23b 2115 int ret, created = 0;
421cb601
DG
2116 struct ust_app_session *ua_sess;
2117
3d8ca23b
DG
2118 assert(usess);
2119 assert(app);
2120 assert(ua_sess_ptr);
2121
840cb59c 2122 health_code_update();
86acf0da 2123
421cb601
DG
2124 ua_sess = lookup_session_by_app(usess, app);
2125 if (ua_sess == NULL) {
d9bf3ca4 2126 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2127 app->pid, usess->id);
40bbd087 2128 ua_sess = alloc_ust_app_session();
421cb601
DG
2129 if (ua_sess == NULL) {
2130 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2131 ret = -ENOMEM;
2132 goto error;
421cb601 2133 }
477d7741 2134 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2135 created = 1;
421cb601
DG
2136 }
2137
7972aab2
DG
2138 switch (usess->buffer_type) {
2139 case LTTNG_BUFFER_PER_PID:
2140 /* Init local registry. */
2141 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2142 if (ret < 0) {
e64207cf 2143 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2144 goto error;
2145 }
2146 break;
2147 case LTTNG_BUFFER_PER_UID:
2148 /* Look for a global registry. If none exists, create one. */
d7ba1388 2149 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2150 if (ret < 0) {
e64207cf 2151 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2152 goto error;
2153 }
2154 break;
2155 default:
2156 assert(0);
2157 ret = -EINVAL;
2158 goto error;
2159 }
2160
2161 health_code_update();
2162
2163 if (ua_sess->handle == -1) {
fb45065e 2164 pthread_mutex_lock(&app->sock_lock);
7972aab2 2165 ret = ustctl_create_session(app->sock);
fb45065e 2166 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2167 if (ret < 0) {
2168 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2169 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2170 app->pid, ret);
2171 } else {
2172 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2173 /*
2174 * This is normal behavior, an application can die during the
2175 * creation process. Don't report an error so the execution can
2176 * continue normally. This will get flagged ENOTCONN and the
2177 * caller will handle it.
2178 */
2179 ret = 0;
ffe60014 2180 }
d0b96690 2181 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2182 if (ret != -ENOMEM) {
2183 /*
2184 * Tracer is probably gone or got an internal error so let's
2185 * behave like it will soon unregister or not usable.
2186 */
2187 ret = -ENOTCONN;
2188 }
2189 goto error;
421cb601
DG
2190 }
2191
7972aab2
DG
2192 ua_sess->handle = ret;
2193
2194 /* Add ust app session to app's HT */
d9bf3ca4
MD
2195 lttng_ht_node_init_u64(&ua_sess->node,
2196 ua_sess->tracing_id);
2197 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2198 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2199 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2200 &ua_sess->ust_objd_node);
7972aab2
DG
2201
2202 DBG2("UST app session created successfully with handle %d", ret);
2203 }
2204
2205 *ua_sess_ptr = ua_sess;
2206 if (is_created) {
2207 *is_created = created;
2208 }
2209
2210 /* Everything went well. */
2211 ret = 0;
2212
2213error:
2214 health_code_update();
2215 return ret;
2216}
2217
6a6b2068
JG
2218/*
2219 * Match function for a hash table lookup of ust_app_ctx.
2220 *
2221 * It matches an ust app context based on the context type and, in the case
2222 * of perf counters, their name.
2223 */
2224static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2225{
2226 struct ust_app_ctx *ctx;
bdf64013 2227 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2228
2229 assert(node);
2230 assert(_key);
2231
2232 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2233 key = _key;
2234
2235 /* Context type */
2236 if (ctx->ctx.ctx != key->ctx) {
2237 goto no_match;
2238 }
2239
bdf64013
JG
2240 switch(key->ctx) {
2241 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2242 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2243 ctx->ctx.u.perf_counter.name,
2244 sizeof(key->u.perf_counter.name))) {
2245 goto no_match;
2246 }
2247 break;
2248 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2249 if (strcmp(key->u.app_ctx.provider_name,
2250 ctx->ctx.u.app_ctx.provider_name) ||
2251 strcmp(key->u.app_ctx.ctx_name,
2252 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2253 goto no_match;
2254 }
bdf64013
JG
2255 break;
2256 default:
2257 break;
6a6b2068
JG
2258 }
2259
2260 /* Match. */
2261 return 1;
2262
2263no_match:
2264 return 0;
2265}
2266
2267/*
2268 * Lookup for an ust app context from an lttng_ust_context.
2269 *
be184a0f 2270 * Must be called while holding RCU read side lock.
6a6b2068
JG
2271 * Return an ust_app_ctx object or NULL on error.
2272 */
2273static
2274struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2275 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2276{
2277 struct lttng_ht_iter iter;
2278 struct lttng_ht_node_ulong *node;
2279 struct ust_app_ctx *app_ctx = NULL;
2280
2281 assert(uctx);
2282 assert(ht);
2283
2284 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2285 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2286 ht_match_ust_app_ctx, uctx, &iter.iter);
2287 node = lttng_ht_iter_get_node_ulong(&iter);
2288 if (!node) {
2289 goto end;
2290 }
2291
2292 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2293
2294end:
2295 return app_ctx;
2296}
2297
7972aab2
DG
2298/*
2299 * Create a context for the channel on the tracer.
2300 *
2301 * Called with UST app session lock held and a RCU read side lock.
2302 */
2303static
c9edf082 2304int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
bdf64013 2305 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2306 struct ust_app *app)
2307{
2308 int ret = 0;
7972aab2
DG
2309 struct ust_app_ctx *ua_ctx;
2310
2311 DBG2("UST app adding context to channel %s", ua_chan->name);
2312
6a6b2068
JG
2313 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2314 if (ua_ctx) {
7972aab2
DG
2315 ret = -EEXIST;
2316 goto error;
2317 }
2318
2319 ua_ctx = alloc_ust_app_ctx(uctx);
2320 if (ua_ctx == NULL) {
2321 /* malloc failed */
7682f304 2322 ret = -ENOMEM;
7972aab2
DG
2323 goto error;
2324 }
2325
2326 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2327 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2328 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2329
2330 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2331 if (ret < 0) {
2332 goto error;
2333 }
2334
2335error:
2336 return ret;
2337}
2338
2339/*
2340 * Enable on the tracer side a ust app event for the session and channel.
2341 *
2342 * Called with UST app session lock held.
2343 */
2344static
2345int enable_ust_app_event(struct ust_app_session *ua_sess,
2346 struct ust_app_event *ua_event, struct ust_app *app)
2347{
2348 int ret;
2349
2350 ret = enable_ust_event(app, ua_sess, ua_event);
2351 if (ret < 0) {
2352 goto error;
2353 }
2354
2355 ua_event->enabled = 1;
2356
2357error:
2358 return ret;
2359}
2360
2361/*
2362 * Disable on the tracer side a ust app event for the session and channel.
2363 */
2364static int disable_ust_app_event(struct ust_app_session *ua_sess,
2365 struct ust_app_event *ua_event, struct ust_app *app)
2366{
2367 int ret;
2368
2369 ret = disable_ust_event(app, ua_sess, ua_event);
2370 if (ret < 0) {
2371 goto error;
2372 }
2373
2374 ua_event->enabled = 0;
2375
2376error:
2377 return ret;
2378}
2379
2380/*
2381 * Lookup ust app channel for session and disable it on the tracer side.
2382 */
2383static
2384int disable_ust_app_channel(struct ust_app_session *ua_sess,
2385 struct ust_app_channel *ua_chan, struct ust_app *app)
2386{
2387 int ret;
2388
2389 ret = disable_ust_channel(app, ua_sess, ua_chan);
2390 if (ret < 0) {
2391 goto error;
2392 }
2393
2394 ua_chan->enabled = 0;
2395
2396error:
2397 return ret;
2398}
2399
2400/*
2401 * Lookup ust app channel for session and enable it on the tracer side. This
2402 * MUST be called with a RCU read side lock acquired.
2403 */
2404static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2405 struct ltt_ust_channel *uchan, struct ust_app *app)
2406{
2407 int ret = 0;
2408 struct lttng_ht_iter iter;
2409 struct lttng_ht_node_str *ua_chan_node;
2410 struct ust_app_channel *ua_chan;
2411
2412 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2413 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2414 if (ua_chan_node == NULL) {
d9bf3ca4 2415 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2416 uchan->name, ua_sess->tracing_id);
2417 goto error;
2418 }
2419
2420 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2421
2422 ret = enable_ust_channel(app, ua_sess, ua_chan);
2423 if (ret < 0) {
2424 goto error;
2425 }
2426
2427error:
2428 return ret;
2429}
2430
2431/*
2432 * Ask the consumer to create a channel and get it if successful.
2433 *
fad1ed2f
JR
2434 * Called with UST app session lock held.
2435 *
7972aab2
DG
2436 * Return 0 on success or else a negative value.
2437 */
2438static int do_consumer_create_channel(struct ltt_ust_session *usess,
2439 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2440 int bitness, struct ust_registry_session *registry,
2441 uint64_t trace_archive_id)
7972aab2
DG
2442{
2443 int ret;
2444 unsigned int nb_fd = 0;
2445 struct consumer_socket *socket;
2446
2447 assert(usess);
2448 assert(ua_sess);
2449 assert(ua_chan);
2450 assert(registry);
2451
2452 rcu_read_lock();
2453 health_code_update();
2454
2455 /* Get the right consumer socket for the application. */
2456 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2457 if (!socket) {
2458 ret = -EINVAL;
2459 goto error;
2460 }
2461
2462 health_code_update();
2463
2464 /* Need one fd for the channel. */
2465 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2466 if (ret < 0) {
2467 ERR("Exhausted number of available FD upon create channel");
2468 goto error;
2469 }
2470
2471 /*
2472 * Ask consumer to create channel. The consumer will return the number of
2473 * stream we have to expect.
2474 */
2475 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
d2956687 2476 registry, usess->current_trace_chunk);
7972aab2
DG
2477 if (ret < 0) {
2478 goto error_ask;
2479 }
2480
2481 /*
2482 * Compute the number of fd needed before receiving them. It must be 2 per
2483 * stream (2 being the default value here).
2484 */
2485 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2486
2487 /* Reserve the amount of file descriptor we need. */
2488 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2489 if (ret < 0) {
2490 ERR("Exhausted number of available FD upon create channel");
2491 goto error_fd_get_stream;
2492 }
2493
2494 health_code_update();
2495
2496 /*
2497 * Now get the channel from the consumer. This call wil populate the stream
2498 * list of that channel and set the ust objects.
2499 */
d9078d0c
DG
2500 if (usess->consumer->enabled) {
2501 ret = ust_consumer_get_channel(socket, ua_chan);
2502 if (ret < 0) {
2503 goto error_destroy;
2504 }
7972aab2
DG
2505 }
2506
2507 rcu_read_unlock();
2508 return 0;
2509
2510error_destroy:
2511 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2512error_fd_get_stream:
2513 /*
2514 * Initiate a destroy channel on the consumer since we had an error
2515 * handling it on our side. The return value is of no importance since we
2516 * already have a ret value set by the previous error that we need to
2517 * return.
2518 */
2519 (void) ust_consumer_destroy_channel(socket, ua_chan);
2520error_ask:
2521 lttng_fd_put(LTTNG_FD_APPS, 1);
2522error:
2523 health_code_update();
2524 rcu_read_unlock();
2525 return ret;
2526}
2527
2528/*
2529 * Duplicate the ust data object of the ust app stream and save it in the
2530 * buffer registry stream.
2531 *
2532 * Return 0 on success or else a negative value.
2533 */
2534static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2535 struct ust_app_stream *stream)
2536{
2537 int ret;
2538
2539 assert(reg_stream);
2540 assert(stream);
2541
2542 /* Reserve the amount of file descriptor we need. */
2543 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2544 if (ret < 0) {
2545 ERR("Exhausted number of available FD upon duplicate stream");
2546 goto error;
2547 }
2548
2549 /* Duplicate object for stream once the original is in the registry. */
2550 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2551 reg_stream->obj.ust);
2552 if (ret < 0) {
2553 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2554 reg_stream->obj.ust, stream->obj, ret);
2555 lttng_fd_put(LTTNG_FD_APPS, 2);
2556 goto error;
2557 }
2558 stream->handle = stream->obj->handle;
2559
2560error:
2561 return ret;
2562}
2563
2564/*
2565 * Duplicate the ust data object of the ust app. channel and save it in the
2566 * buffer registry channel.
2567 *
2568 * Return 0 on success or else a negative value.
2569 */
2570static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2571 struct ust_app_channel *ua_chan)
2572{
2573 int ret;
2574
2575 assert(reg_chan);
2576 assert(ua_chan);
2577
2578 /* Need two fds for the channel. */
2579 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2580 if (ret < 0) {
2581 ERR("Exhausted number of available FD upon duplicate channel");
2582 goto error_fd_get;
2583 }
2584
2585 /* Duplicate object for stream once the original is in the registry. */
2586 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2587 if (ret < 0) {
2588 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2589 reg_chan->obj.ust, ua_chan->obj, ret);
2590 goto error;
2591 }
2592 ua_chan->handle = ua_chan->obj->handle;
2593
2594 return 0;
2595
2596error:
2597 lttng_fd_put(LTTNG_FD_APPS, 1);
2598error_fd_get:
2599 return ret;
2600}
2601
2602/*
2603 * For a given channel buffer registry, setup all streams of the given ust
2604 * application channel.
2605 *
2606 * Return 0 on success or else a negative value.
2607 */
2608static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2609 struct ust_app_channel *ua_chan,
2610 struct ust_app *app)
7972aab2
DG
2611{
2612 int ret = 0;
2613 struct ust_app_stream *stream, *stmp;
2614
2615 assert(reg_chan);
2616 assert(ua_chan);
2617
2618 DBG2("UST app setup buffer registry stream");
2619
2620 /* Send all streams to application. */
2621 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2622 struct buffer_reg_stream *reg_stream;
2623
2624 ret = buffer_reg_stream_create(&reg_stream);
2625 if (ret < 0) {
2626 goto error;
2627 }
2628
2629 /*
2630 * Keep original pointer and nullify it in the stream so the delete
2631 * stream call does not release the object.
2632 */
2633 reg_stream->obj.ust = stream->obj;
2634 stream->obj = NULL;
2635 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2636
7972aab2
DG
2637 /* We don't need the streams anymore. */
2638 cds_list_del(&stream->list);
fb45065e 2639 delete_ust_app_stream(-1, stream, app);
7972aab2 2640 }
421cb601 2641
7972aab2
DG
2642error:
2643 return ret;
2644}
2645
2646/*
2647 * Create a buffer registry channel for the given session registry and
2648 * application channel object. If regp pointer is valid, it's set with the
2649 * created object. Important, the created object is NOT added to the session
2650 * registry hash table.
2651 *
2652 * Return 0 on success else a negative value.
2653 */
2654static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2655 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2656{
2657 int ret;
2658 struct buffer_reg_channel *reg_chan = NULL;
2659
2660 assert(reg_sess);
2661 assert(ua_chan);
2662
2663 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2664
2665 /* Create buffer registry channel. */
2666 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2667 if (ret < 0) {
2668 goto error_create;
421cb601 2669 }
7972aab2
DG
2670 assert(reg_chan);
2671 reg_chan->consumer_key = ua_chan->key;
8c924c7b 2672 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 2673 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 2674
7972aab2
DG
2675 /* Create and add a channel registry to session. */
2676 ret = ust_registry_channel_add(reg_sess->reg.ust,
2677 ua_chan->tracing_channel_id);
2678 if (ret < 0) {
2679 goto error;
d88aee68 2680 }
7972aab2 2681 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 2682
7972aab2
DG
2683 if (regp) {
2684 *regp = reg_chan;
3d8ca23b 2685 }
d88aee68 2686
7972aab2 2687 return 0;
3d8ca23b
DG
2688
2689error:
7972aab2
DG
2690 /* Safe because the registry channel object was not added to any HT. */
2691 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2692error_create:
3d8ca23b 2693 return ret;
421cb601
DG
2694}
2695
55cc08a6 2696/*
7972aab2
DG
2697 * Setup buffer registry channel for the given session registry and application
2698 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 2699 *
7972aab2 2700 * Return 0 on success else a negative value.
55cc08a6 2701 */
7972aab2 2702static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
2703 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2704 struct ust_app *app)
55cc08a6 2705{
7972aab2 2706 int ret;
55cc08a6 2707
7972aab2
DG
2708 assert(reg_sess);
2709 assert(reg_chan);
2710 assert(ua_chan);
2711 assert(ua_chan->obj);
55cc08a6 2712
7972aab2 2713 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 2714
7972aab2 2715 /* Setup all streams for the registry. */
fb45065e 2716 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 2717 if (ret < 0) {
55cc08a6
DG
2718 goto error;
2719 }
2720
7972aab2
DG
2721 reg_chan->obj.ust = ua_chan->obj;
2722 ua_chan->obj = NULL;
55cc08a6 2723
7972aab2 2724 return 0;
55cc08a6
DG
2725
2726error:
7972aab2
DG
2727 buffer_reg_channel_remove(reg_sess, reg_chan);
2728 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
2729 return ret;
2730}
2731
edb67388 2732/*
7972aab2 2733 * Send buffer registry channel to the application.
d0b96690 2734 *
7972aab2 2735 * Return 0 on success else a negative value.
edb67388 2736 */
7972aab2
DG
2737static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2738 struct ust_app *app, struct ust_app_session *ua_sess,
2739 struct ust_app_channel *ua_chan)
edb67388
DG
2740{
2741 int ret;
7972aab2 2742 struct buffer_reg_stream *reg_stream;
edb67388 2743
7972aab2
DG
2744 assert(reg_chan);
2745 assert(app);
2746 assert(ua_sess);
2747 assert(ua_chan);
2748
2749 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2750
2751 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
2752 if (ret < 0) {
2753 goto error;
2754 }
2755
7972aab2
DG
2756 /* Send channel to the application. */
2757 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
2758 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2759 ret = -ENOTCONN; /* Caused by app exiting. */
2760 goto error;
2761 } else if (ret < 0) {
7972aab2
DG
2762 goto error;
2763 }
2764
2765 health_code_update();
2766
2767 /* Send all streams to application. */
2768 pthread_mutex_lock(&reg_chan->stream_list_lock);
2769 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2770 struct ust_app_stream stream;
2771
2772 ret = duplicate_stream_object(reg_stream, &stream);
2773 if (ret < 0) {
2774 goto error_stream_unlock;
2775 }
2776
2777 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2778 if (ret < 0) {
fb45065e 2779 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
2780 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2781 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 2782 }
7972aab2
DG
2783 goto error_stream_unlock;
2784 }
edb67388 2785
7972aab2
DG
2786 /*
2787 * The return value is not important here. This function will output an
2788 * error if needed.
2789 */
fb45065e 2790 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
2791 }
2792 ua_chan->is_sent = 1;
2793
2794error_stream_unlock:
2795 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
2796error:
2797 return ret;
2798}
2799
9730260e 2800/*
7972aab2
DG
2801 * Create and send to the application the created buffers with per UID buffers.
2802 *
9acdc1d6 2803 * This MUST be called with a RCU read side lock acquired.
71e0a100 2804 * The session list lock and the session's lock must be acquired.
9acdc1d6 2805 *
7972aab2 2806 * Return 0 on success else a negative value.
9730260e 2807 */
7972aab2
DG
2808static int create_channel_per_uid(struct ust_app *app,
2809 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2810 struct ust_app_channel *ua_chan)
9730260e
DG
2811{
2812 int ret;
7972aab2
DG
2813 struct buffer_reg_uid *reg_uid;
2814 struct buffer_reg_channel *reg_chan;
e32d7f27 2815 struct ltt_session *session = NULL;
e098433c
JG
2816 enum lttng_error_code notification_ret;
2817 struct ust_registry_channel *chan_reg;
9730260e 2818
7972aab2
DG
2819 assert(app);
2820 assert(usess);
2821 assert(ua_sess);
2822 assert(ua_chan);
2823
2824 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2825
2826 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2827 /*
2828 * The session creation handles the creation of this global registry
2829 * object. If none can be find, there is a code flow problem or a
2830 * teardown race.
2831 */
2832 assert(reg_uid);
2833
2834 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2835 reg_uid);
2721f7ea
JG
2836 if (reg_chan) {
2837 goto send_channel;
2838 }
7972aab2 2839
2721f7ea
JG
2840 /* Create the buffer registry channel object. */
2841 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2842 if (ret < 0) {
2843 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 2844 ua_chan->name);
2721f7ea
JG
2845 goto error;
2846 }
f14256d6 2847
e098433c
JG
2848 session = session_find_by_id(ua_sess->tracing_id);
2849 assert(session);
2850 assert(pthread_mutex_trylock(&session->lock));
2851 assert(session_trylock_list());
2852
2721f7ea
JG
2853 /*
2854 * Create the buffers on the consumer side. This call populates the
2855 * ust app channel object with all streams and data object.
2856 */
2857 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 2858 app->bits_per_long, reg_uid->registry->reg.ust,
d2956687 2859 session->most_recent_chunk_id.value);
2721f7ea
JG
2860 if (ret < 0) {
2861 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2862 ua_chan->name);
7972aab2
DG
2863
2864 /*
2721f7ea
JG
2865 * Let's remove the previously created buffer registry channel so
2866 * it's not visible anymore in the session registry.
7972aab2 2867 */
2721f7ea
JG
2868 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2869 ua_chan->tracing_channel_id, false);
2870 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2871 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2872 goto error;
7972aab2
DG
2873 }
2874
2721f7ea
JG
2875 /*
2876 * Setup the streams and add it to the session registry.
2877 */
2878 ret = setup_buffer_reg_channel(reg_uid->registry,
2879 ua_chan, reg_chan, app);
2880 if (ret < 0) {
2881 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
2882 goto error;
2883 }
2884
e098433c
JG
2885 /* Notify the notification subsystem of the channel's creation. */
2886 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
2887 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
2888 ua_chan->tracing_channel_id);
2889 assert(chan_reg);
2890 chan_reg->consumer_key = ua_chan->key;
2891 chan_reg = NULL;
2892 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 2893
e098433c
JG
2894 notification_ret = notification_thread_command_add_channel(
2895 notification_thread_handle, session->name,
470cc211
JG
2896 ua_sess->effective_credentials.uid,
2897 ua_sess->effective_credentials.gid, ua_chan->name,
2898 ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
2899 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
2900 if (notification_ret != LTTNG_OK) {
2901 ret = - (int) notification_ret;
2902 ERR("Failed to add channel to notification thread");
2903 goto error;
e9404c27
JG
2904 }
2905
2721f7ea 2906send_channel:
66ff8e3f
JG
2907 /* Send buffers to the application. */
2908 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2909 if (ret < 0) {
2910 if (ret != -ENOTCONN) {
2911 ERR("Error sending channel to application");
2912 }
2913 goto error;
2914 }
2915
9730260e 2916error:
e32d7f27
JG
2917 if (session) {
2918 session_put(session);
2919 }
9730260e
DG
2920 return ret;
2921}
2922
78f0bacd 2923/*
7972aab2
DG
2924 * Create and send to the application the created buffers with per PID buffers.
2925 *
fad1ed2f 2926 * Called with UST app session lock held.
71e0a100 2927 * The session list lock and the session's lock must be acquired.
fad1ed2f 2928 *
7972aab2 2929 * Return 0 on success else a negative value.
78f0bacd 2930 */
7972aab2
DG
2931static int create_channel_per_pid(struct ust_app *app,
2932 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2933 struct ust_app_channel *ua_chan)
78f0bacd 2934{
8535a6d9 2935 int ret;
7972aab2 2936 struct ust_registry_session *registry;
e9404c27 2937 enum lttng_error_code cmd_ret;
e32d7f27 2938 struct ltt_session *session = NULL;
e9404c27
JG
2939 uint64_t chan_reg_key;
2940 struct ust_registry_channel *chan_reg;
78f0bacd 2941
7972aab2
DG
2942 assert(app);
2943 assert(usess);
2944 assert(ua_sess);
2945 assert(ua_chan);
2946
2947 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2948
2949 rcu_read_lock();
2950
2951 registry = get_session_registry(ua_sess);
fad1ed2f 2952 /* The UST app session lock is held, registry shall not be null. */
7972aab2
DG
2953 assert(registry);
2954
2955 /* Create and add a new channel registry to session. */
2956 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 2957 if (ret < 0) {
f14256d6
MD
2958 ERR("Error creating the UST channel \"%s\" registry instance",
2959 ua_chan->name);
78f0bacd
DG
2960 goto error;
2961 }
2962
e098433c
JG
2963 session = session_find_by_id(ua_sess->tracing_id);
2964 assert(session);
2965
2966 assert(pthread_mutex_trylock(&session->lock));
2967 assert(session_trylock_list());
2968
7972aab2
DG
2969 /* Create and get channel on the consumer side. */
2970 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 2971 app->bits_per_long, registry,
d2956687 2972 session->most_recent_chunk_id.value);
7972aab2 2973 if (ret < 0) {
f14256d6
MD
2974 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2975 ua_chan->name);
5b951542 2976 goto error_remove_from_registry;
7972aab2
DG
2977 }
2978
2979 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2980 if (ret < 0) {
a7169585
MD
2981 if (ret != -ENOTCONN) {
2982 ERR("Error sending channel to application");
2983 }
5b951542 2984 goto error_remove_from_registry;
7972aab2 2985 }
8535a6d9 2986
e9404c27
JG
2987 chan_reg_key = ua_chan->key;
2988 pthread_mutex_lock(&registry->lock);
2989 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
2990 assert(chan_reg);
2991 chan_reg->consumer_key = ua_chan->key;
2992 pthread_mutex_unlock(&registry->lock);
2993
2994 cmd_ret = notification_thread_command_add_channel(
2995 notification_thread_handle, session->name,
470cc211
JG
2996 ua_sess->effective_credentials.uid,
2997 ua_sess->effective_credentials.gid, ua_chan->name,
2998 ua_chan->key, LTTNG_DOMAIN_UST,
e9404c27
JG
2999 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3000 if (cmd_ret != LTTNG_OK) {
3001 ret = - (int) cmd_ret;
3002 ERR("Failed to add channel to notification thread");
5b951542 3003 goto error_remove_from_registry;
e9404c27
JG
3004 }
3005
5b951542
MD
3006error_remove_from_registry:
3007 if (ret) {
3008 ust_registry_channel_del_free(registry, ua_chan->key, false);
3009 }
78f0bacd 3010error:
7972aab2 3011 rcu_read_unlock();
e32d7f27
JG
3012 if (session) {
3013 session_put(session);
3014 }
78f0bacd
DG
3015 return ret;
3016}
3017
3018/*
7972aab2 3019 * From an already allocated ust app channel, create the channel buffers if
88e3c2f5 3020 * needed and send them to the application. This MUST be called with a RCU read
7972aab2
DG
3021 * side lock acquired.
3022 *
fad1ed2f
JR
3023 * Called with UST app session lock held.
3024 *
a7169585
MD
3025 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3026 * the application exited concurrently.
78f0bacd 3027 */
88e3c2f5 3028static int ust_app_channel_send(struct ust_app *app,
7972aab2
DG
3029 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3030 struct ust_app_channel *ua_chan)
78f0bacd 3031{
7972aab2 3032 int ret;
78f0bacd 3033
7972aab2
DG
3034 assert(app);
3035 assert(usess);
88e3c2f5 3036 assert(usess->active);
7972aab2
DG
3037 assert(ua_sess);
3038 assert(ua_chan);
3039
3040 /* Handle buffer type before sending the channel to the application. */
3041 switch (usess->buffer_type) {
3042 case LTTNG_BUFFER_PER_UID:
3043 {
3044 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3045 if (ret < 0) {
3046 goto error;
3047 }
3048 break;
3049 }
3050 case LTTNG_BUFFER_PER_PID:
3051 {
3052 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3053 if (ret < 0) {
3054 goto error;
3055 }
3056 break;
3057 }
3058 default:
3059 assert(0);
3060 ret = -EINVAL;
78f0bacd
DG
3061 goto error;
3062 }
3063
7972aab2
DG
3064 /* Initialize ust objd object using the received handle and add it. */
3065 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3066 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 3067
7972aab2
DG
3068 /* If channel is not enabled, disable it on the tracer */
3069 if (!ua_chan->enabled) {
3070 ret = disable_ust_channel(app, ua_sess, ua_chan);
3071 if (ret < 0) {
3072 goto error;
3073 }
78f0bacd
DG
3074 }
3075
3076error:
3077 return ret;
3078}
3079
284d8f55 3080/*
88e3c2f5 3081 * Create UST app channel and return it through ua_chanp if not NULL.
d0b96690 3082 *
36b588ed 3083 * Called with UST app session lock and RCU read-side lock held.
7972aab2 3084 *
88e3c2f5 3085 * Return 0 on success or else a negative value.
284d8f55 3086 */
88e3c2f5
JG
3087static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3088 struct ltt_ust_channel *uchan,
7972aab2 3089 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
4d710ac2 3090 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
3091{
3092 int ret = 0;
bec39940
DG
3093 struct lttng_ht_iter iter;
3094 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
3095 struct ust_app_channel *ua_chan;
3096
3097 /* Lookup channel in the ust app session */
bec39940
DG
3098 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3099 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 3100 if (ua_chan_node != NULL) {
5b4a0ec0 3101 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 3102 goto end;
5b4a0ec0
DG
3103 }
3104
d0b96690 3105 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
3106 if (ua_chan == NULL) {
3107 /* Only malloc can fail here */
4d710ac2 3108 ret = -ENOMEM;
88e3c2f5 3109 goto error;
fc34caaa
DG
3110 }
3111 shadow_copy_channel(ua_chan, uchan);
3112
ffe60014
DG
3113 /* Set channel type. */
3114 ua_chan->attr.type = type;
3115
d0b96690
DG
3116 /* Only add the channel if successful on the tracer side. */
3117 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
fc34caaa 3118end:
4d710ac2
DG
3119 if (ua_chanp) {
3120 *ua_chanp = ua_chan;
3121 }
3122
3123 /* Everything went well. */
3124 return 0;
5b4a0ec0
DG
3125
3126error:
4d710ac2 3127 return ret;
5b4a0ec0
DG
3128}
3129
3130/*
3131 * Create UST app event and create it on the tracer side.
d0b96690
DG
3132 *
3133 * Called with ust app session mutex held.
5b4a0ec0 3134 */
edb67388
DG
3135static
3136int create_ust_app_event(struct ust_app_session *ua_sess,
3137 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3138 struct ust_app *app)
284d8f55 3139{
edb67388 3140 int ret = 0;
5b4a0ec0 3141 struct ust_app_event *ua_event;
284d8f55 3142
edb67388
DG
3143 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3144 if (ua_event == NULL) {
20533947 3145 /* Only failure mode of alloc_ust_app_event(). */
edb67388 3146 ret = -ENOMEM;
fc34caaa 3147 goto end;
5b4a0ec0 3148 }
edb67388 3149 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3150
edb67388 3151 /* Create it on the tracer side */
5b4a0ec0 3152 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3153 if (ret < 0) {
e9f11505
JG
3154 /*
3155 * Not found previously means that it does not exist on the
3156 * tracer. If the application reports that the event existed,
3157 * it means there is a bug in the sessiond or lttng-ust
3158 * (or corruption, etc.)
3159 */
3160 if (ret == -LTTNG_UST_ERR_EXIST) {
3161 ERR("Tracer for application reported that an event being created already existed: "
3162 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3163 uevent->attr.name,
3164 app->pid, app->ppid, app->uid,
3165 app->gid);
3166 }
284d8f55
DG
3167 goto error;
3168 }
3169
d0b96690 3170 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3171
fc34caaa 3172 DBG2("UST app create event %s for PID %d completed", ua_event->name,
852d0037 3173 app->pid);
7f79d3a1 3174
edb67388 3175end:
fc34caaa
DG
3176 return ret;
3177
5b4a0ec0 3178error:
fc34caaa 3179 /* Valid. Calling here is already in a read side lock */
fb45065e 3180 delete_ust_app_event(-1, ua_event, app);
edb67388 3181 return ret;
5b4a0ec0
DG
3182}
3183
3184/*
3185 * Create UST metadata and open it on the tracer side.
d0b96690 3186 *
7972aab2 3187 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3188 */
3189static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3190 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3191{
3192 int ret = 0;
ffe60014 3193 struct ust_app_channel *metadata;
d88aee68 3194 struct consumer_socket *socket;
7972aab2 3195 struct ust_registry_session *registry;
e32d7f27 3196 struct ltt_session *session = NULL;
5b4a0ec0 3197
ffe60014
DG
3198 assert(ua_sess);
3199 assert(app);
d88aee68 3200 assert(consumer);
5b4a0ec0 3201
7972aab2 3202 registry = get_session_registry(ua_sess);
fad1ed2f 3203 /* The UST app session is held registry shall not be null. */
7972aab2
DG
3204 assert(registry);
3205
ce34fcd0
MD
3206 pthread_mutex_lock(&registry->lock);
3207
1b532a60
DG
3208 /* Metadata already exists for this registry or it was closed previously */
3209 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3210 ret = 0;
3211 goto error;
5b4a0ec0
DG
3212 }
3213
ffe60014 3214 /* Allocate UST metadata */
d0b96690 3215 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3216 if (!metadata) {
3217 /* malloc() failed */
3218 ret = -ENOMEM;
3219 goto error;
3220 }
5b4a0ec0 3221
ad7a9107 3222 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3223
7972aab2
DG
3224 /* Need one fd for the channel. */
3225 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3226 if (ret < 0) {
3227 ERR("Exhausted number of available FD upon create metadata");
3228 goto error;
3229 }
3230
4dc3dfc5
DG
3231 /* Get the right consumer socket for the application. */
3232 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3233 if (!socket) {
3234 ret = -EINVAL;
3235 goto error_consumer;
3236 }
3237
331744e3
JD
3238 /*
3239 * Keep metadata key so we can identify it on the consumer side. Assign it
3240 * to the registry *before* we ask the consumer so we avoid the race of the
3241 * consumer requesting the metadata and the ask_channel call on our side
3242 * did not returned yet.
3243 */
3244 registry->metadata_key = metadata->key;
3245
e098433c
JG
3246 session = session_find_by_id(ua_sess->tracing_id);
3247 assert(session);
3248
3249 assert(pthread_mutex_trylock(&session->lock));
3250 assert(session_trylock_list());
3251
d88aee68
DG
3252 /*
3253 * Ask the metadata channel creation to the consumer. The metadata object
3254 * will be created by the consumer and kept their. However, the stream is
3255 * never added or monitored until we do a first push metadata to the
3256 * consumer.
3257 */
7972aab2 3258 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
d2956687 3259 registry, session->current_trace_chunk);
d88aee68 3260 if (ret < 0) {
f2a444f1
DG
3261 /* Nullify the metadata key so we don't try to close it later on. */
3262 registry->metadata_key = 0;
d88aee68
DG
3263 goto error_consumer;
3264 }
3265
3266 /*
3267 * The setup command will make the metadata stream be sent to the relayd,
3268 * if applicable, and the thread managing the metadatas. This is important
3269 * because after this point, if an error occurs, the only way the stream
3270 * can be deleted is to be monitored in the consumer.
3271 */
7972aab2 3272 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3273 if (ret < 0) {
f2a444f1
DG
3274 /* Nullify the metadata key so we don't try to close it later on. */
3275 registry->metadata_key = 0;
d88aee68 3276 goto error_consumer;
5b4a0ec0
DG
3277 }
3278
7972aab2
DG
3279 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3280 metadata->key, app->pid);
5b4a0ec0 3281
d88aee68 3282error_consumer:
b80f0b6c 3283 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3284 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3285error:
ce34fcd0 3286 pthread_mutex_unlock(&registry->lock);
e32d7f27
JG
3287 if (session) {
3288 session_put(session);
3289 }
ffe60014 3290 return ret;
5b4a0ec0
DG
3291}
3292
5b4a0ec0 3293/*
d88aee68
DG
3294 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3295 * acquired before calling this function.
5b4a0ec0
DG
3296 */
3297struct ust_app *ust_app_find_by_pid(pid_t pid)
3298{
d88aee68 3299 struct ust_app *app = NULL;
bec39940
DG
3300 struct lttng_ht_node_ulong *node;
3301 struct lttng_ht_iter iter;
5b4a0ec0 3302
bec39940
DG
3303 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3304 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3305 if (node == NULL) {
3306 DBG2("UST app no found with pid %d", pid);
3307 goto error;
3308 }
5b4a0ec0
DG
3309
3310 DBG2("Found UST app by pid %d", pid);
3311
d88aee68 3312 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3313
3314error:
d88aee68 3315 return app;
5b4a0ec0
DG
3316}
3317
d88aee68
DG
3318/*
3319 * Allocate and init an UST app object using the registration information and
3320 * the command socket. This is called when the command socket connects to the
3321 * session daemon.
3322 *
3323 * The object is returned on success or else NULL.
3324 */
d0b96690 3325struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3326{
d0b96690
DG
3327 struct ust_app *lta = NULL;
3328
3329 assert(msg);
3330 assert(sock >= 0);
3331
3332 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3333
173af62f
DG
3334 if ((msg->bits_per_long == 64 &&
3335 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3336 || (msg->bits_per_long == 32 &&
3337 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
f943b0fb 3338 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3339 "%d-bit long, but no consumerd for this size is available.\n",
3340 msg->name, msg->pid, msg->bits_per_long);
3341 goto error;
3f2c5fcc 3342 }
d0b96690 3343
5b4a0ec0
DG
3344 lta = zmalloc(sizeof(struct ust_app));
3345 if (lta == NULL) {
3346 PERROR("malloc");
d0b96690 3347 goto error;
5b4a0ec0
DG
3348 }
3349
3350 lta->ppid = msg->ppid;
3351 lta->uid = msg->uid;
3352 lta->gid = msg->gid;
d0b96690 3353
7753dea8 3354 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3355 lta->uint8_t_alignment = msg->uint8_t_alignment;
3356 lta->uint16_t_alignment = msg->uint16_t_alignment;
3357 lta->uint32_t_alignment = msg->uint32_t_alignment;
3358 lta->uint64_t_alignment = msg->uint64_t_alignment;
3359 lta->long_alignment = msg->long_alignment;
3360 lta->byte_order = msg->byte_order;
3361
5b4a0ec0
DG
3362 lta->v_major = msg->major;
3363 lta->v_minor = msg->minor;
d9bf3ca4 3364 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690 3365 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
10b56aef 3366 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
d0b96690 3367 lta->notify_sock = -1;
d88aee68
DG
3368
3369 /* Copy name and make sure it's NULL terminated. */
3370 strncpy(lta->name, msg->name, sizeof(lta->name));
3371 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3372
3373 /*
3374 * Before this can be called, when receiving the registration information,
3375 * the application compatibility is checked. So, at this point, the
3376 * application can work with this session daemon.
3377 */
d0b96690 3378 lta->compatible = 1;
5b4a0ec0 3379
852d0037 3380 lta->pid = msg->pid;
d0b96690 3381 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 3382 lta->sock = sock;
fb45065e 3383 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 3384 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 3385
d42f20df 3386 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690
DG
3387error:
3388 return lta;
3389}
3390
d88aee68
DG
3391/*
3392 * For a given application object, add it to every hash table.
3393 */
d0b96690
DG
3394void ust_app_add(struct ust_app *app)
3395{
3396 assert(app);
3397 assert(app->notify_sock >= 0);
3398
940c4592
JR
3399 app->registration_time = time(NULL);
3400
5b4a0ec0 3401 rcu_read_lock();
852d0037
DG
3402
3403 /*
3404 * On a re-registration, we want to kick out the previous registration of
3405 * that pid
3406 */
d0b96690 3407 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
3408
3409 /*
3410 * The socket _should_ be unique until _we_ call close. So, a add_unique
3411 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3412 * already in the table.
3413 */
d0b96690 3414 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 3415
d0b96690
DG
3416 /* Add application to the notify socket hash table. */
3417 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3418 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 3419
d0b96690 3420 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
d88aee68
DG
3421 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3422 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3423 app->v_minor);
5b4a0ec0 3424
d0b96690
DG
3425 rcu_read_unlock();
3426}
3427
d88aee68
DG
3428/*
3429 * Set the application version into the object.
3430 *
3431 * Return 0 on success else a negative value either an errno code or a
3432 * LTTng-UST error code.
3433 */
d0b96690
DG
3434int ust_app_version(struct ust_app *app)
3435{
d88aee68
DG
3436 int ret;
3437
d0b96690 3438 assert(app);
d88aee68 3439
fb45065e 3440 pthread_mutex_lock(&app->sock_lock);
d88aee68 3441 ret = ustctl_tracer_version(app->sock, &app->version);
fb45065e 3442 pthread_mutex_unlock(&app->sock_lock);
d88aee68
DG
3443 if (ret < 0) {
3444 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
5368d366 3445 ERR("UST app %d version failed with ret %d", app->sock, ret);
d88aee68 3446 } else {
5368d366 3447 DBG3("UST app %d version failed. Application is dead", app->sock);
d88aee68
DG
3448 }
3449 }
3450
3451 return ret;
5b4a0ec0
DG
3452}
3453
3454/*
3455 * Unregister app by removing it from the global traceable app list and freeing
3456 * the data struct.
3457 *
3458 * The socket is already closed at this point so no close to sock.
3459 */
3460void ust_app_unregister(int sock)
3461{
3462 struct ust_app *lta;
bec39940 3463 struct lttng_ht_node_ulong *node;
c4b88406 3464 struct lttng_ht_iter ust_app_sock_iter;
bec39940 3465 struct lttng_ht_iter iter;
d42f20df 3466 struct ust_app_session *ua_sess;
525b0740 3467 int ret;
5b4a0ec0
DG
3468
3469 rcu_read_lock();
886459c6 3470
5b4a0ec0 3471 /* Get the node reference for a call_rcu */
c4b88406
MD
3472 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3473 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
d0b96690 3474 assert(node);
284d8f55 3475
852d0037 3476 lta = caa_container_of(node, struct ust_app, sock_n);
852d0037
DG
3477 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3478
d88aee68 3479 /*
ce34fcd0
MD
3480 * For per-PID buffers, perform "push metadata" and flush all
3481 * application streams before removing app from hash tables,
3482 * ensuring proper behavior of data_pending check.
c4b88406 3483 * Remove sessions so they are not visible during deletion.
d88aee68 3484 */
d42f20df
DG
3485 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3486 node.node) {
7972aab2
DG
3487 struct ust_registry_session *registry;
3488
d42f20df
DG
3489 ret = lttng_ht_del(lta->sessions, &iter);
3490 if (ret) {
3491 /* The session was already removed so scheduled for teardown. */
3492 continue;
3493 }
3494
ce34fcd0
MD
3495 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3496 (void) ust_app_flush_app_session(lta, ua_sess);
3497 }
c4b88406 3498
d42f20df
DG
3499 /*
3500 * Add session to list for teardown. This is safe since at this point we
3501 * are the only one using this list.
3502 */
d88aee68
DG
3503 pthread_mutex_lock(&ua_sess->lock);
3504
b161602a
MD
3505 if (ua_sess->deleted) {
3506 pthread_mutex_unlock(&ua_sess->lock);
3507 continue;
3508 }
3509
d88aee68
DG
3510 /*
3511 * Normally, this is done in the delete session process which is
3512 * executed in the call rcu below. However, upon registration we can't
3513 * afford to wait for the grace period before pushing data or else the
3514 * data pending feature can race between the unregistration and stop
3515 * command where the data pending command is sent *before* the grace
3516 * period ended.
3517 *
3518 * The close metadata below nullifies the metadata pointer in the
3519 * session so the delete session will NOT push/close a second time.
3520 */
7972aab2 3521 registry = get_session_registry(ua_sess);
ce34fcd0 3522 if (registry) {
7972aab2
DG
3523 /* Push metadata for application before freeing the application. */
3524 (void) push_metadata(registry, ua_sess->consumer);
3525
3526 /*
3527 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
3528 * metadata only on destroy trace session in this case. Also, the
3529 * previous push metadata could have flag the metadata registry to
3530 * close so don't send a close command if closed.
7972aab2 3531 */
ce34fcd0 3532 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
3533 /* And ask to close it for this session registry. */
3534 (void) close_metadata(registry, ua_sess->consumer);
3535 }
3536 }
d42f20df 3537 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
c4b88406 3538
d88aee68 3539 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
3540 }
3541
c4b88406
MD
3542 /* Remove application from PID hash table */
3543 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3544 assert(!ret);
3545
3546 /*
3547 * Remove application from notify hash table. The thread handling the
3548 * notify socket could have deleted the node so ignore on error because
c48239ca
JG
3549 * either way it's valid. The close of that socket is handled by the
3550 * apps_notify_thread.
c4b88406
MD
3551 */
3552 iter.iter.node = &lta->notify_sock_n.node;
3553 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3554
3555 /*
3556 * Ignore return value since the node might have been removed before by an
3557 * add replace during app registration because the PID can be reassigned by
3558 * the OS.
3559 */
3560 iter.iter.node = &lta->pid_n.node;
3561 ret = lttng_ht_del(ust_app_ht, &iter);
3562 if (ret) {
3563 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3564 lta->pid);
3565 }
3566
852d0037
DG
3567 /* Free memory */
3568 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3569
5b4a0ec0
DG
3570 rcu_read_unlock();
3571 return;
284d8f55
DG
3572}
3573
5b4a0ec0
DG
3574/*
3575 * Fill events array with all events name of all registered apps.
3576 */
3577int ust_app_list_events(struct lttng_event **events)
421cb601 3578{
5b4a0ec0
DG
3579 int ret, handle;
3580 size_t nbmem, count = 0;
bec39940 3581 struct lttng_ht_iter iter;
5b4a0ec0 3582 struct ust_app *app;
c617c0c6 3583 struct lttng_event *tmp_event;
421cb601 3584
5b4a0ec0 3585 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3586 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3587 if (tmp_event == NULL) {
5b4a0ec0
DG
3588 PERROR("zmalloc ust app events");
3589 ret = -ENOMEM;
421cb601
DG
3590 goto error;
3591 }
3592
5b4a0ec0 3593 rcu_read_lock();
421cb601 3594
852d0037 3595 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
90eaa0d2 3596 struct lttng_ust_tracepoint_iter uiter;
ac3bd9c0 3597
840cb59c 3598 health_code_update();
86acf0da 3599
e0c7ec2b
DG
3600 if (!app->compatible) {
3601 /*
3602 * TODO: In time, we should notice the caller of this error by
3603 * telling him that this is a version error.
3604 */
3605 continue;
3606 }
fb45065e 3607 pthread_mutex_lock(&app->sock_lock);
852d0037 3608 handle = ustctl_tracepoint_list(app->sock);
5b4a0ec0 3609 if (handle < 0) {
ffe60014
DG
3610 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3611 ERR("UST app list events getting handle failed for app pid %d",
3612 app->pid);
3613 }
fb45065e 3614 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3615 continue;
3616 }
421cb601 3617
852d0037 3618 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 3619 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3620 /* Handle ustctl error. */
3621 if (ret < 0) {
fb45065e
MD
3622 int release_ret;
3623
a2ba1ab0 3624 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3625 ERR("UST app tp list get failed for app %d with ret %d",
3626 app->sock, ret);
3627 } else {
3628 DBG3("UST app tp list get failed. Application is dead");
3757b385
DG
3629 /*
3630 * This is normal behavior, an application can die during the
3631 * creation process. Don't report an error so the execution can
3632 * continue normally. Continue normal execution.
3633 */
3634 break;
ffe60014 3635 }
98f595d4 3636 free(tmp_event);
fb45065e 3637 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3638 if (release_ret < 0 &&
3639 release_ret != -LTTNG_UST_ERR_EXITING &&
3640 release_ret != -EPIPE) {
fb45065e
MD
3641 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3642 }
3643 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
3644 goto rcu_error;
3645 }
3646
840cb59c 3647 health_code_update();
815564d8 3648 if (count >= nbmem) {
d7b3776f 3649 /* In case the realloc fails, we free the memory */
53efb85a
MD
3650 struct lttng_event *new_tmp_event;
3651 size_t new_nbmem;
3652
3653 new_nbmem = nbmem << 1;
3654 DBG2("Reallocating event list from %zu to %zu entries",
3655 nbmem, new_nbmem);
3656 new_tmp_event = realloc(tmp_event,
3657 new_nbmem * sizeof(struct lttng_event));
3658 if (new_tmp_event == NULL) {
fb45065e
MD
3659 int release_ret;
3660
5b4a0ec0 3661 PERROR("realloc ust app events");
c617c0c6 3662 free(tmp_event);
5b4a0ec0 3663 ret = -ENOMEM;
fb45065e 3664 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3665 if (release_ret < 0 &&
3666 release_ret != -LTTNG_UST_ERR_EXITING &&
3667 release_ret != -EPIPE) {
fb45065e
MD
3668 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3669 }
3670 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3671 goto rcu_error;
3672 }
53efb85a
MD
3673 /* Zero the new memory */
3674 memset(new_tmp_event + nbmem, 0,
3675 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3676 nbmem = new_nbmem;
3677 tmp_event = new_tmp_event;
5b4a0ec0 3678 }
c617c0c6
MD
3679 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3680 tmp_event[count].loglevel = uiter.loglevel;
3681 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3682 tmp_event[count].pid = app->pid;
3683 tmp_event[count].enabled = -1;
5b4a0ec0 3684 count++;
421cb601 3685 }
fb45065e
MD
3686 ret = ustctl_release_handle(app->sock, handle);
3687 pthread_mutex_unlock(&app->sock_lock);
68313703 3688 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
fb45065e
MD
3689 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3690 }
421cb601
DG
3691 }
3692
5b4a0ec0 3693 ret = count;
c617c0c6 3694 *events = tmp_event;
421cb601 3695
5b4a0ec0 3696 DBG2("UST app list events done (%zu events)", count);
421cb601 3697
5b4a0ec0
DG
3698rcu_error:
3699 rcu_read_unlock();
421cb601 3700error:
840cb59c 3701 health_code_update();
5b4a0ec0 3702 return ret;
421cb601
DG
3703}
3704
f37d259d
MD
3705/*
3706 * Fill events array with all events name of all registered apps.
3707 */
3708int ust_app_list_event_fields(struct lttng_event_field **fields)
3709{
3710 int ret, handle;
3711 size_t nbmem, count = 0;
3712 struct lttng_ht_iter iter;
3713 struct ust_app *app;
c617c0c6 3714 struct lttng_event_field *tmp_event;
f37d259d
MD
3715
3716 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3717 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3718 if (tmp_event == NULL) {
f37d259d
MD
3719 PERROR("zmalloc ust app event fields");
3720 ret = -ENOMEM;
3721 goto error;
3722 }
3723
3724 rcu_read_lock();
3725
3726 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3727 struct lttng_ust_field_iter uiter;
3728
840cb59c 3729 health_code_update();
86acf0da 3730
f37d259d
MD
3731 if (!app->compatible) {
3732 /*
3733 * TODO: In time, we should notice the caller of this error by
3734 * telling him that this is a version error.
3735 */
3736 continue;
3737 }
fb45065e 3738 pthread_mutex_lock(&app->sock_lock);
f37d259d
MD
3739 handle = ustctl_tracepoint_field_list(app->sock);
3740 if (handle < 0) {
ffe60014
DG
3741 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3742 ERR("UST app list field getting handle failed for app pid %d",
3743 app->pid);
3744 }
fb45065e 3745 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
3746 continue;
3747 }
3748
3749 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 3750 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3751 /* Handle ustctl error. */
3752 if (ret < 0) {
fb45065e
MD
3753 int release_ret;
3754
a2ba1ab0 3755 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3756 ERR("UST app tp list field failed for app %d with ret %d",
3757 app->sock, ret);
3758 } else {
3759 DBG3("UST app tp list field failed. Application is dead");
3757b385
DG
3760 /*
3761 * This is normal behavior, an application can die during the
3762 * creation process. Don't report an error so the execution can
98f595d4 3763 * continue normally. Reset list and count for next app.
3757b385
DG
3764 */
3765 break;
ffe60014 3766 }
98f595d4 3767 free(tmp_event);
fb45065e
MD
3768 release_ret = ustctl_release_handle(app->sock, handle);
3769 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3770 if (release_ret < 0 &&
3771 release_ret != -LTTNG_UST_ERR_EXITING &&
3772 release_ret != -EPIPE) {
fb45065e
MD
3773 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3774 }
ffe60014
DG
3775 goto rcu_error;
3776 }
3777
840cb59c 3778 health_code_update();
f37d259d 3779 if (count >= nbmem) {
d7b3776f 3780 /* In case the realloc fails, we free the memory */
53efb85a
MD
3781 struct lttng_event_field *new_tmp_event;
3782 size_t new_nbmem;
3783
3784 new_nbmem = nbmem << 1;
3785 DBG2("Reallocating event field list from %zu to %zu entries",
3786 nbmem, new_nbmem);
3787 new_tmp_event = realloc(tmp_event,
3788 new_nbmem * sizeof(struct lttng_event_field));
3789 if (new_tmp_event == NULL) {
fb45065e
MD
3790 int release_ret;
3791
f37d259d 3792 PERROR("realloc ust app event fields");
c617c0c6 3793 free(tmp_event);
f37d259d 3794 ret = -ENOMEM;
fb45065e
MD
3795 release_ret = ustctl_release_handle(app->sock, handle);
3796 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3797 if (release_ret &&
3798 release_ret != -LTTNG_UST_ERR_EXITING &&
3799 release_ret != -EPIPE) {
fb45065e
MD
3800 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3801 }
f37d259d
MD
3802 goto rcu_error;
3803 }
53efb85a
MD
3804 /* Zero the new memory */
3805 memset(new_tmp_event + nbmem, 0,
3806 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3807 nbmem = new_nbmem;
3808 tmp_event = new_tmp_event;
f37d259d 3809 }
f37d259d 3810
c617c0c6 3811 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
2e84128e
DG
3812 /* Mapping between these enums matches 1 to 1. */
3813 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 3814 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 3815
c617c0c6
MD
3816 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3817 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 3818 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
3819 tmp_event[count].event.pid = app->pid;
3820 tmp_event[count].event.enabled = -1;
f37d259d
MD
3821 count++;
3822 }
fb45065e
MD
3823 ret = ustctl_release_handle(app->sock, handle);
3824 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3825 if (ret < 0 &&
3826 ret != -LTTNG_UST_ERR_EXITING &&
3827 ret != -EPIPE) {
fb45065e
MD
3828 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3829 }
f37d259d
MD
3830 }
3831
3832 ret = count;
c617c0c6 3833 *fields = tmp_event;
f37d259d
MD
3834
3835 DBG2("UST app list event fields done (%zu events)", count);
3836
3837rcu_error:
3838 rcu_read_unlock();
3839error:
840cb59c 3840 health_code_update();
f37d259d
MD
3841 return ret;
3842}
3843
5b4a0ec0
DG
3844/*
3845 * Free and clean all traceable apps of the global list.
36b588ed
MD
3846 *
3847 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
3848 */
3849void ust_app_clean_list(void)
421cb601 3850{
5b4a0ec0 3851 int ret;
659ed79f 3852 struct ust_app *app;
bec39940 3853 struct lttng_ht_iter iter;
421cb601 3854
5b4a0ec0 3855 DBG2("UST app cleaning registered apps hash table");
421cb601 3856
5b4a0ec0 3857 rcu_read_lock();
421cb601 3858
faadaa3a
JG
3859 /* Cleanup notify socket hash table */
3860 if (ust_app_ht_by_notify_sock) {
3861 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3862 notify_sock_n.node) {
3863 struct cds_lfht_node *node;
3864 struct ust_app *app;
3865
3866 node = cds_lfht_iter_get_node(&iter.iter);
3867 if (!node) {
3868 continue;
3869 }
3870
3871 app = container_of(node, struct ust_app,
3872 notify_sock_n.node);
3873 ust_app_notify_sock_unregister(app->notify_sock);
3874 }
3875 }
3876
f1b711c4
MD
3877 if (ust_app_ht) {
3878 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3879 ret = lttng_ht_del(ust_app_ht, &iter);
3880 assert(!ret);
3881 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3882 }
421cb601
DG
3883 }
3884
852d0037 3885 /* Cleanup socket hash table */
f1b711c4
MD
3886 if (ust_app_ht_by_sock) {
3887 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3888 sock_n.node) {
3889 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3890 assert(!ret);
3891 }
bec39940 3892 }
852d0037 3893
36b588ed 3894 rcu_read_unlock();
d88aee68 3895
bec39940 3896 /* Destroy is done only when the ht is empty */
f1b711c4
MD
3897 if (ust_app_ht) {
3898 ht_cleanup_push(ust_app_ht);
3899 }
3900 if (ust_app_ht_by_sock) {
3901 ht_cleanup_push(ust_app_ht_by_sock);
3902 }
3903 if (ust_app_ht_by_notify_sock) {
3904 ht_cleanup_push(ust_app_ht_by_notify_sock);
3905 }
5b4a0ec0
DG
3906}
3907
3908/*
3909 * Init UST app hash table.
3910 */
57703f6e 3911int ust_app_ht_alloc(void)
5b4a0ec0 3912{
bec39940 3913 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3914 if (!ust_app_ht) {
3915 return -1;
3916 }
852d0037 3917 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3918 if (!ust_app_ht_by_sock) {
3919 return -1;
3920 }
d0b96690 3921 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3922 if (!ust_app_ht_by_notify_sock) {
3923 return -1;
3924 }
3925 return 0;
421cb601
DG
3926}
3927
78f0bacd
DG
3928/*
3929 * For a specific UST session, disable the channel for all registered apps.
3930 */
35a9059d 3931int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3932 struct ltt_ust_channel *uchan)
3933{
3934 int ret = 0;
bec39940
DG
3935 struct lttng_ht_iter iter;
3936 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
3937 struct ust_app *app;
3938 struct ust_app_session *ua_sess;
8535a6d9 3939 struct ust_app_channel *ua_chan;
78f0bacd 3940
88e3c2f5 3941 assert(usess->active);
d9bf3ca4 3942 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 3943 uchan->name, usess->id);
78f0bacd
DG
3944
3945 rcu_read_lock();
3946
3947 /* For every registered applications */
852d0037 3948 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 3949 struct lttng_ht_iter uiter;
e0c7ec2b
DG
3950 if (!app->compatible) {
3951 /*
3952 * TODO: In time, we should notice the caller of this error by
3953 * telling him that this is a version error.
3954 */
3955 continue;
3956 }
78f0bacd
DG
3957 ua_sess = lookup_session_by_app(usess, app);
3958 if (ua_sess == NULL) {
3959 continue;
3960 }
3961
8535a6d9 3962 /* Get channel */
bec39940
DG
3963 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3964 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
3965 /* If the session if found for the app, the channel must be there */
3966 assert(ua_chan_node);
3967
3968 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3969 /* The channel must not be already disabled */
3970 assert(ua_chan->enabled == 1);
3971
3972 /* Disable channel onto application */
3973 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
3974 if (ret < 0) {
3975 /* XXX: We might want to report this error at some point... */
3976 continue;
3977 }
3978 }
3979
3980 rcu_read_unlock();
78f0bacd
DG
3981 return ret;
3982}
3983
3984/*
3985 * For a specific UST session, enable the channel for all registered apps.
3986 */
35a9059d 3987int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3988 struct ltt_ust_channel *uchan)
3989{
3990 int ret = 0;
bec39940 3991 struct lttng_ht_iter iter;
78f0bacd
DG
3992 struct ust_app *app;
3993 struct ust_app_session *ua_sess;
3994
88e3c2f5 3995 assert(usess->active);
d9bf3ca4 3996 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 3997 uchan->name, usess->id);
78f0bacd
DG
3998
3999 rcu_read_lock();
4000
4001 /* For every registered applications */
852d0037 4002 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4003 if (!app->compatible) {
4004 /*
4005 * TODO: In time, we should notice the caller of this error by
4006 * telling him that this is a version error.
4007 */
4008 continue;
4009 }
78f0bacd
DG
4010 ua_sess = lookup_session_by_app(usess, app);
4011 if (ua_sess == NULL) {
4012 continue;
4013 }
4014
4015 /* Enable channel onto application */
4016 ret = enable_ust_app_channel(ua_sess, uchan, app);
4017 if (ret < 0) {
4018 /* XXX: We might want to report this error at some point... */
4019 continue;
4020 }
4021 }
4022
4023 rcu_read_unlock();
78f0bacd
DG
4024 return ret;
4025}
4026
b0a40d28
DG
4027/*
4028 * Disable an event in a channel and for a specific session.
4029 */
35a9059d
DG
4030int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4031 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
4032{
4033 int ret = 0;
bec39940 4034 struct lttng_ht_iter iter, uiter;
700c5a9d 4035 struct lttng_ht_node_str *ua_chan_node;
b0a40d28
DG
4036 struct ust_app *app;
4037 struct ust_app_session *ua_sess;
4038 struct ust_app_channel *ua_chan;
4039 struct ust_app_event *ua_event;
4040
88e3c2f5 4041 assert(usess->active);
b0a40d28 4042 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
4043 "%s for session id %" PRIu64,
4044 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
4045
4046 rcu_read_lock();
4047
4048 /* For all registered applications */
852d0037 4049 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4050 if (!app->compatible) {
4051 /*
4052 * TODO: In time, we should notice the caller of this error by
4053 * telling him that this is a version error.
4054 */
4055 continue;
4056 }
b0a40d28
DG
4057 ua_sess = lookup_session_by_app(usess, app);
4058 if (ua_sess == NULL) {
4059 /* Next app */
4060 continue;
4061 }
4062
4063 /* Lookup channel in the ust app session */
bec39940
DG
4064 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4065 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 4066 if (ua_chan_node == NULL) {
d9bf3ca4 4067 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 4068 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
4069 continue;
4070 }
4071 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4072
700c5a9d
JR
4073 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4074 uevent->filter, uevent->attr.loglevel,
4075 uevent->exclusion);
4076 if (ua_event == NULL) {
b0a40d28 4077 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 4078 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
4079 continue;
4080 }
b0a40d28 4081
7f79d3a1 4082 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
4083 if (ret < 0) {
4084 /* XXX: Report error someday... */
4085 continue;
4086 }
4087 }
4088
4089 rcu_read_unlock();
88e3c2f5
JG
4090 return ret;
4091}
4092
4093/* The ua_sess lock must be held by the caller. */
4094static
4095int ust_app_channel_create(struct ltt_ust_session *usess,
4096 struct ust_app_session *ua_sess,
4097 struct ltt_ust_channel *uchan, struct ust_app *app,
4098 struct ust_app_channel **_ua_chan)
4099{
4100 int ret = 0;
4101 struct ust_app_channel *ua_chan = NULL;
4102
4103 assert(ua_sess);
4104 ASSERT_LOCKED(ua_sess->lock);
4105
4106 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4107 sizeof(uchan->name))) {
4108 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4109 &uchan->attr);
4110 ret = 0;
4111 } else {
4112 struct ltt_ust_context *uctx = NULL;
4113
4114 /*
4115 * Create channel onto application and synchronize its
4116 * configuration.
4117 */
4118 ret = ust_app_channel_allocate(ua_sess, uchan,
4119 LTTNG_UST_CHAN_PER_CPU, usess,
4120 &ua_chan);
4121 if (ret == 0) {
4122 ret = ust_app_channel_send(app, usess,
4123 ua_sess, ua_chan);
4124 } else {
4125 goto end;
4126 }
4127
4128 /* Add contexts. */
4129 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4130 ret = create_ust_app_channel_context(ua_chan,
4131 &uctx->ctx, app);
4132 if (ret) {
4133 goto end;
4134 }
4135 }
4136 }
4137 if (ret < 0) {
4138 switch (ret) {
4139 case -ENOTCONN:
4140 /*
4141 * The application's socket is not valid. Either a bad socket
4142 * or a timeout on it. We can't inform the caller that for a
4143 * specific app, the session failed so lets continue here.
4144 */
4145 ret = 0; /* Not an error. */
4146 break;
4147 case -ENOMEM:
4148 default:
4149 break;
4150 }
4151 }
0498a00c 4152end:
88e3c2f5
JG
4153 if (ret == 0 && _ua_chan) {
4154 /*
4155 * Only return the application's channel on success. Note
4156 * that the channel can still be part of the application's
4157 * channel hashtable on error.
4158 */
4159 *_ua_chan = ua_chan;
4160 }
b0a40d28
DG
4161 return ret;
4162}
4163
5b4a0ec0 4164/*
edb67388 4165 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4166 */
35a9059d 4167int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4168 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4169{
4170 int ret = 0;
bec39940 4171 struct lttng_ht_iter iter, uiter;
18eace3b 4172 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4173 struct ust_app *app;
4174 struct ust_app_session *ua_sess;
4175 struct ust_app_channel *ua_chan;
4176 struct ust_app_event *ua_event;
48842b30 4177
88e3c2f5 4178 assert(usess->active);
d9bf3ca4 4179 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4180 uevent->attr.name, usess->id);
48842b30 4181
edb67388
DG
4182 /*
4183 * NOTE: At this point, this function is called only if the session and
4184 * channel passed are already created for all apps. and enabled on the
4185 * tracer also.
4186 */
4187
48842b30 4188 rcu_read_lock();
421cb601
DG
4189
4190 /* For all registered applications */
852d0037 4191 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4192 if (!app->compatible) {
4193 /*
4194 * TODO: In time, we should notice the caller of this error by
4195 * telling him that this is a version error.
4196 */
4197 continue;
4198 }
edb67388 4199 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4200 if (!ua_sess) {
4201 /* The application has problem or is probably dead. */
4202 continue;
4203 }
ba767faf 4204
d0b96690
DG
4205 pthread_mutex_lock(&ua_sess->lock);
4206
b161602a
MD
4207 if (ua_sess->deleted) {
4208 pthread_mutex_unlock(&ua_sess->lock);
4209 continue;
4210 }
4211
edb67388 4212 /* Lookup channel in the ust app session */
bec39940
DG
4213 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4214 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4215 /*
4216 * It is possible that the channel cannot be found is
4217 * the channel/event creation occurs concurrently with
4218 * an application exit.
4219 */
4220 if (!ua_chan_node) {
4221 pthread_mutex_unlock(&ua_sess->lock);
4222 continue;
4223 }
edb67388
DG
4224
4225 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4226
18eace3b
DG
4227 /* Get event node */
4228 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4229 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4230 if (ua_event == NULL) {
7f79d3a1 4231 DBG3("UST app enable event %s not found for app PID %d."
852d0037 4232 "Skipping app", uevent->attr.name, app->pid);
d0b96690 4233 goto next_app;
35a9059d 4234 }
35a9059d
DG
4235
4236 ret = enable_ust_app_event(ua_sess, ua_event, app);
4237 if (ret < 0) {
d0b96690 4238 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 4239 goto error;
48842b30 4240 }
d0b96690
DG
4241 next_app:
4242 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
4243 }
4244
7f79d3a1 4245error:
edb67388 4246 rcu_read_unlock();
edb67388
DG
4247 return ret;
4248}
4249
4250/*
4251 * For a specific existing UST session and UST channel, creates the event for
4252 * all registered apps.
4253 */
35a9059d 4254int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
4255 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4256{
4257 int ret = 0;
bec39940
DG
4258 struct lttng_ht_iter iter, uiter;
4259 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
4260 struct ust_app *app;
4261 struct ust_app_session *ua_sess;
4262 struct ust_app_channel *ua_chan;
4263
88e3c2f5 4264 assert(usess->active);
d9bf3ca4 4265 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 4266 uevent->attr.name, usess->id);
edb67388 4267
edb67388
DG
4268 rcu_read_lock();
4269
4270 /* For all registered applications */
852d0037 4271 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4272 if (!app->compatible) {
4273 /*
4274 * TODO: In time, we should notice the caller of this error by
4275 * telling him that this is a version error.
4276 */
4277 continue;
4278 }
edb67388 4279 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4280 if (!ua_sess) {
4281 /* The application has problem or is probably dead. */
4282 continue;
4283 }
48842b30 4284
d0b96690 4285 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4286
4287 if (ua_sess->deleted) {
4288 pthread_mutex_unlock(&ua_sess->lock);
4289 continue;
4290 }
4291
48842b30 4292 /* Lookup channel in the ust app session */
bec39940
DG
4293 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4294 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
4295 /* If the channel is not found, there is a code flow error */
4296 assert(ua_chan_node);
4297
48842b30
DG
4298 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4299
edb67388 4300 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 4301 pthread_mutex_unlock(&ua_sess->lock);
edb67388 4302 if (ret < 0) {
49c336c1 4303 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
4304 /* Possible value at this point: -ENOMEM. If so, we stop! */
4305 break;
4306 }
4307 DBG2("UST app event %s already exist on app PID %d",
852d0037 4308 uevent->attr.name, app->pid);
5b4a0ec0 4309 continue;
48842b30 4310 }
48842b30 4311 }
5b4a0ec0 4312
48842b30 4313 rcu_read_unlock();
48842b30
DG
4314 return ret;
4315}
4316
5b4a0ec0
DG
4317/*
4318 * Start tracing for a specific UST session and app.
fad1ed2f
JR
4319 *
4320 * Called with UST app session lock held.
4321 *
5b4a0ec0 4322 */
b34cbebf 4323static
421cb601 4324int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
4325{
4326 int ret = 0;
48842b30 4327 struct ust_app_session *ua_sess;
48842b30 4328
852d0037 4329 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 4330
509cbaf8
MD
4331 rcu_read_lock();
4332
e0c7ec2b
DG
4333 if (!app->compatible) {
4334 goto end;
4335 }
4336
421cb601
DG
4337 ua_sess = lookup_session_by_app(usess, app);
4338 if (ua_sess == NULL) {
d42f20df
DG
4339 /* The session is in teardown process. Ignore and continue. */
4340 goto end;
421cb601 4341 }
48842b30 4342
d0b96690
DG
4343 pthread_mutex_lock(&ua_sess->lock);
4344
b161602a
MD
4345 if (ua_sess->deleted) {
4346 pthread_mutex_unlock(&ua_sess->lock);
4347 goto end;
4348 }
4349
b0a1c741
JR
4350 if (ua_sess->enabled) {
4351 pthread_mutex_unlock(&ua_sess->lock);
4352 goto end;
4353 }
4354
aea829b3
DG
4355 /* Upon restart, we skip the setup, already done */
4356 if (ua_sess->started) {
8be98f9a 4357 goto skip_setup;
aea829b3 4358 }
8be98f9a 4359
d65d2de8
DG
4360 /*
4361 * Create the metadata for the application. This returns gracefully if a
4362 * metadata was already set for the session.
4363 */
ad7a9107 4364 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
421cb601 4365 if (ret < 0) {
d0b96690 4366 goto error_unlock;
421cb601 4367 }
48842b30 4368
840cb59c 4369 health_code_update();
86acf0da 4370
8be98f9a 4371skip_setup:
a945cdc7 4372 /* This starts the UST tracing */
fb45065e 4373 pthread_mutex_lock(&app->sock_lock);
852d0037 4374 ret = ustctl_start_session(app->sock, ua_sess->handle);
fb45065e 4375 pthread_mutex_unlock(&app->sock_lock);
421cb601 4376 if (ret < 0) {
ffe60014
DG
4377 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4378 ERR("Error starting tracing for app pid: %d (ret: %d)",
4379 app->pid, ret);
4380 } else {
4381 DBG("UST app start session failed. Application is dead.");
3757b385
DG
4382 /*
4383 * This is normal behavior, an application can die during the
4384 * creation process. Don't report an error so the execution can
4385 * continue normally.
4386 */
4387 pthread_mutex_unlock(&ua_sess->lock);
4388 goto end;
ffe60014 4389 }
d0b96690 4390 goto error_unlock;
421cb601 4391 }
5b4a0ec0 4392
55c3953d
DG
4393 /* Indicate that the session has been started once */
4394 ua_sess->started = 1;
b0a1c741 4395 ua_sess->enabled = 1;
55c3953d 4396
d0b96690
DG
4397 pthread_mutex_unlock(&ua_sess->lock);
4398
840cb59c 4399 health_code_update();
86acf0da 4400
421cb601 4401 /* Quiescent wait after starting trace */
fb45065e 4402 pthread_mutex_lock(&app->sock_lock);
ffe60014 4403 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4404 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4405 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4406 ERR("UST app wait quiescent failed for app pid %d ret %d",
4407 app->pid, ret);
4408 }
48842b30 4409
e0c7ec2b
DG
4410end:
4411 rcu_read_unlock();
840cb59c 4412 health_code_update();
421cb601 4413 return 0;
48842b30 4414
d0b96690
DG
4415error_unlock:
4416 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 4417 rcu_read_unlock();
840cb59c 4418 health_code_update();
421cb601
DG
4419 return -1;
4420}
48842b30 4421
8be98f9a
MD
4422/*
4423 * Stop tracing for a specific UST session and app.
4424 */
b34cbebf 4425static
8be98f9a
MD
4426int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4427{
4428 int ret = 0;
4429 struct ust_app_session *ua_sess;
7972aab2 4430 struct ust_registry_session *registry;
8be98f9a 4431
852d0037 4432 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
4433
4434 rcu_read_lock();
4435
e0c7ec2b 4436 if (!app->compatible) {
d88aee68 4437 goto end_no_session;
e0c7ec2b
DG
4438 }
4439
8be98f9a
MD
4440 ua_sess = lookup_session_by_app(usess, app);
4441 if (ua_sess == NULL) {
d88aee68 4442 goto end_no_session;
8be98f9a
MD
4443 }
4444
d88aee68
DG
4445 pthread_mutex_lock(&ua_sess->lock);
4446
b161602a
MD
4447 if (ua_sess->deleted) {
4448 pthread_mutex_unlock(&ua_sess->lock);
4449 goto end_no_session;
4450 }
4451
9bc07046
DG
4452 /*
4453 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
4454 * that was never started. It's possible since we can have a fail start
4455 * from either the application manager thread or the command thread. Simply
4456 * indicate that this is a stop error.
9bc07046 4457 */
f9dfc3d9 4458 if (!ua_sess->started) {
c45536e1
DG
4459 goto error_rcu_unlock;
4460 }
7db205b5 4461
840cb59c 4462 health_code_update();
86acf0da 4463
9d6c7d3f 4464 /* This inhibits UST tracing */
fb45065e 4465 pthread_mutex_lock(&app->sock_lock);
852d0037 4466 ret = ustctl_stop_session(app->sock, ua_sess->handle);
fb45065e 4467 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 4468 if (ret < 0) {
ffe60014
DG
4469 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4470 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4471 app->pid, ret);
4472 } else {
4473 DBG("UST app stop session failed. Application is dead.");
3757b385
DG
4474 /*
4475 * This is normal behavior, an application can die during the
4476 * creation process. Don't report an error so the execution can
4477 * continue normally.
4478 */
4479 goto end_unlock;
ffe60014 4480 }
9d6c7d3f
DG
4481 goto error_rcu_unlock;
4482 }
4483
840cb59c 4484 health_code_update();
b0a1c741 4485 ua_sess->enabled = 0;
86acf0da 4486
9d6c7d3f 4487 /* Quiescent wait after stopping trace */
fb45065e 4488 pthread_mutex_lock(&app->sock_lock);
ffe60014 4489 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4490 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4491 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4492 ERR("UST app wait quiescent failed for app pid %d ret %d",
4493 app->pid, ret);
4494 }
9d6c7d3f 4495
840cb59c 4496 health_code_update();
86acf0da 4497
b34cbebf 4498 registry = get_session_registry(ua_sess);
fad1ed2f
JR
4499
4500 /* The UST app session is held registry shall not be null. */
b34cbebf 4501 assert(registry);
1b532a60 4502
ce34fcd0
MD
4503 /* Push metadata for application before freeing the application. */
4504 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 4505
3757b385 4506end_unlock:
b34cbebf
MD
4507 pthread_mutex_unlock(&ua_sess->lock);
4508end_no_session:
4509 rcu_read_unlock();
4510 health_code_update();
4511 return 0;
4512
4513error_rcu_unlock:
4514 pthread_mutex_unlock(&ua_sess->lock);
4515 rcu_read_unlock();
4516 health_code_update();
4517 return -1;
4518}
4519
b34cbebf 4520static
c4b88406
MD
4521int ust_app_flush_app_session(struct ust_app *app,
4522 struct ust_app_session *ua_sess)
b34cbebf 4523{
c4b88406 4524 int ret, retval = 0;
b34cbebf 4525 struct lttng_ht_iter iter;
b34cbebf 4526 struct ust_app_channel *ua_chan;
c4b88406 4527 struct consumer_socket *socket;
b34cbebf 4528
c4b88406 4529 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
4530
4531 rcu_read_lock();
4532
4533 if (!app->compatible) {
c4b88406 4534 goto end_not_compatible;
b34cbebf
MD
4535 }
4536
4537 pthread_mutex_lock(&ua_sess->lock);
4538
b161602a
MD
4539 if (ua_sess->deleted) {
4540 goto end_deleted;
4541 }
4542
b34cbebf
MD
4543 health_code_update();
4544
9d6c7d3f 4545 /* Flushing buffers */
c4b88406
MD
4546 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4547 ua_sess->consumer);
ce34fcd0
MD
4548
4549 /* Flush buffers and push metadata. */
4550 switch (ua_sess->buffer_type) {
4551 case LTTNG_BUFFER_PER_PID:
4552 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4553 node.node) {
4554 health_code_update();
ce34fcd0
MD
4555 ret = consumer_flush_channel(socket, ua_chan->key);
4556 if (ret) {
4557 ERR("Error flushing consumer channel");
4558 retval = -1;
4559 continue;
4560 }
8be98f9a 4561 }
ce34fcd0
MD
4562 break;
4563 case LTTNG_BUFFER_PER_UID:
4564 default:
4565 assert(0);
4566 break;
8be98f9a 4567 }
8be98f9a 4568
840cb59c 4569 health_code_update();
86acf0da 4570
b161602a 4571end_deleted:
d88aee68 4572 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 4573
c4b88406
MD
4574end_not_compatible:
4575 rcu_read_unlock();
4576 health_code_update();
4577 return retval;
4578}
4579
4580/*
ce34fcd0
MD
4581 * Flush buffers for all applications for a specific UST session.
4582 * Called with UST session lock held.
c4b88406
MD
4583 */
4584static
ce34fcd0 4585int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
4586
4587{
99b1411c 4588 int ret = 0;
c4b88406 4589
ce34fcd0 4590 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
4591
4592 rcu_read_lock();
4593
ce34fcd0
MD
4594 /* Flush buffers and push metadata. */
4595 switch (usess->buffer_type) {
4596 case LTTNG_BUFFER_PER_UID:
4597 {
4598 struct buffer_reg_uid *reg;
4599 struct lttng_ht_iter iter;
4600
4601 /* Flush all per UID buffers associated to that session. */
4602 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4603 struct ust_registry_session *ust_session_reg;
4604 struct buffer_reg_channel *reg_chan;
4605 struct consumer_socket *socket;
4606
4607 /* Get consumer socket to use to push the metadata.*/
4608 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4609 usess->consumer);
4610 if (!socket) {
4611 /* Ignore request if no consumer is found for the session. */
4612 continue;
4613 }
4614
4615 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4616 reg_chan, node.node) {
4617 /*
4618 * The following call will print error values so the return
4619 * code is of little importance because whatever happens, we
4620 * have to try them all.
4621 */
4622 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4623 }
4624
4625 ust_session_reg = reg->registry->reg.ust;
4626 /* Push metadata. */
4627 (void) push_metadata(ust_session_reg, usess->consumer);
4628 }
ce34fcd0
MD
4629 break;
4630 }
4631 case LTTNG_BUFFER_PER_PID:
4632 {
4633 struct ust_app_session *ua_sess;
4634 struct lttng_ht_iter iter;
4635 struct ust_app *app;
4636
4637 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4638 ua_sess = lookup_session_by_app(usess, app);
4639 if (ua_sess == NULL) {
4640 continue;
4641 }
4642 (void) ust_app_flush_app_session(app, ua_sess);
4643 }
4644 break;
4645 }
4646 default:
99b1411c 4647 ret = -1;
ce34fcd0
MD
4648 assert(0);
4649 break;
c4b88406 4650 }
c4b88406 4651
7db205b5 4652 rcu_read_unlock();
840cb59c 4653 health_code_update();
c4b88406 4654 return ret;
8be98f9a
MD
4655}
4656
0dd01979
MD
4657static
4658int ust_app_clear_quiescent_app_session(struct ust_app *app,
4659 struct ust_app_session *ua_sess)
4660{
4661 int ret = 0;
4662 struct lttng_ht_iter iter;
4663 struct ust_app_channel *ua_chan;
4664 struct consumer_socket *socket;
4665
4666 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4667
4668 rcu_read_lock();
4669
4670 if (!app->compatible) {
4671 goto end_not_compatible;
4672 }
4673
4674 pthread_mutex_lock(&ua_sess->lock);
4675
4676 if (ua_sess->deleted) {
4677 goto end_unlock;
4678 }
4679
4680 health_code_update();
4681
4682 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4683 ua_sess->consumer);
4684 if (!socket) {
4685 ERR("Failed to find consumer (%" PRIu32 ") socket",
4686 app->bits_per_long);
4687 ret = -1;
4688 goto end_unlock;
4689 }
4690
4691 /* Clear quiescent state. */
4692 switch (ua_sess->buffer_type) {
4693 case LTTNG_BUFFER_PER_PID:
4694 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4695 ua_chan, node.node) {
4696 health_code_update();
4697 ret = consumer_clear_quiescent_channel(socket,
4698 ua_chan->key);
4699 if (ret) {
4700 ERR("Error clearing quiescent state for consumer channel");
4701 ret = -1;
4702 continue;
4703 }
4704 }
4705 break;
4706 case LTTNG_BUFFER_PER_UID:
4707 default:
4708 assert(0);
4709 ret = -1;
4710 break;
4711 }
4712
4713 health_code_update();
4714
4715end_unlock:
4716 pthread_mutex_unlock(&ua_sess->lock);
4717
4718end_not_compatible:
4719 rcu_read_unlock();
4720 health_code_update();
4721 return ret;
4722}
4723
4724/*
4725 * Clear quiescent state in each stream for all applications for a
4726 * specific UST session.
4727 * Called with UST session lock held.
4728 */
4729static
4730int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4731
4732{
4733 int ret = 0;
4734
4735 DBG("Clearing stream quiescent state for all ust apps");
4736
4737 rcu_read_lock();
4738
4739 switch (usess->buffer_type) {
4740 case LTTNG_BUFFER_PER_UID:
4741 {
4742 struct lttng_ht_iter iter;
4743 struct buffer_reg_uid *reg;
4744
4745 /*
4746 * Clear quiescent for all per UID buffers associated to
4747 * that session.
4748 */
4749 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4750 struct consumer_socket *socket;
4751 struct buffer_reg_channel *reg_chan;
4752
4753 /* Get associated consumer socket.*/
4754 socket = consumer_find_socket_by_bitness(
4755 reg->bits_per_long, usess->consumer);
4756 if (!socket) {
4757 /*
4758 * Ignore request if no consumer is found for
4759 * the session.
4760 */
4761 continue;
4762 }
4763
4764 cds_lfht_for_each_entry(reg->registry->channels->ht,
4765 &iter.iter, reg_chan, node.node) {
4766 /*
4767 * The following call will print error values so
4768 * the return code is of little importance
4769 * because whatever happens, we have to try them
4770 * all.
4771 */
4772 (void) consumer_clear_quiescent_channel(socket,
4773 reg_chan->consumer_key);
4774 }
4775 }
4776 break;
4777 }
4778 case LTTNG_BUFFER_PER_PID:
4779 {
4780 struct ust_app_session *ua_sess;
4781 struct lttng_ht_iter iter;
4782 struct ust_app *app;
4783
4784 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4785 pid_n.node) {
4786 ua_sess = lookup_session_by_app(usess, app);
4787 if (ua_sess == NULL) {
4788 continue;
4789 }
4790 (void) ust_app_clear_quiescent_app_session(app,
4791 ua_sess);
4792 }
4793 break;
4794 }
4795 default:
4796 ret = -1;
4797 assert(0);
4798 break;
4799 }
4800
4801 rcu_read_unlock();
4802 health_code_update();
4803 return ret;
4804}
4805
84cd17c6
MD
4806/*
4807 * Destroy a specific UST session in apps.
4808 */
3353de95 4809static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 4810{
ffe60014 4811 int ret;
84cd17c6 4812 struct ust_app_session *ua_sess;
bec39940 4813 struct lttng_ht_iter iter;
d9bf3ca4 4814 struct lttng_ht_node_u64 *node;
84cd17c6 4815
852d0037 4816 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
4817
4818 rcu_read_lock();
4819
e0c7ec2b
DG
4820 if (!app->compatible) {
4821 goto end;
4822 }
4823
84cd17c6 4824 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 4825 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 4826 if (node == NULL) {
d42f20df
DG
4827 /* Session is being or is deleted. */
4828 goto end;
84cd17c6
MD
4829 }
4830 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 4831
840cb59c 4832 health_code_update();
d0b96690 4833 destroy_app_session(app, ua_sess);
84cd17c6 4834
840cb59c 4835 health_code_update();
7db205b5 4836
84cd17c6 4837 /* Quiescent wait after stopping trace */
fb45065e 4838 pthread_mutex_lock(&app->sock_lock);
ffe60014 4839 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4840 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4841 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4842 ERR("UST app wait quiescent failed for app pid %d ret %d",
4843 app->pid, ret);
4844 }
e0c7ec2b
DG
4845end:
4846 rcu_read_unlock();
840cb59c 4847 health_code_update();
84cd17c6 4848 return 0;
84cd17c6
MD
4849}
4850
5b4a0ec0
DG
4851/*
4852 * Start tracing for the UST session.
4853 */
421cb601
DG
4854int ust_app_start_trace_all(struct ltt_ust_session *usess)
4855{
bec39940 4856 struct lttng_ht_iter iter;
421cb601 4857 struct ust_app *app;
48842b30 4858
421cb601
DG
4859 DBG("Starting all UST traces");
4860
bb2452c8
MD
4861 /*
4862 * Even though the start trace might fail, flag this session active so
4863 * other application coming in are started by default.
4864 */
4865 usess->active = 1;
4866
421cb601 4867 rcu_read_lock();
421cb601 4868
0dd01979
MD
4869 /*
4870 * In a start-stop-start use-case, we need to clear the quiescent state
4871 * of each channel set by the prior stop command, thus ensuring that a
4872 * following stop or destroy is sure to grab a timestamp_end near those
4873 * operations, even if the packet is empty.
4874 */
4875 (void) ust_app_clear_quiescent_session(usess);
4876
0498a00c
MD
4877 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4878 ust_app_global_update(usess, app);
4879 }
4880
48842b30
DG
4881 rcu_read_unlock();
4882
4883 return 0;
4884}
487cf67c 4885
8be98f9a
MD
4886/*
4887 * Start tracing for the UST session.
ce34fcd0 4888 * Called with UST session lock held.
8be98f9a
MD
4889 */
4890int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4891{
4892 int ret = 0;
bec39940 4893 struct lttng_ht_iter iter;
8be98f9a
MD
4894 struct ust_app *app;
4895
4896 DBG("Stopping all UST traces");
4897
bb2452c8
MD
4898 /*
4899 * Even though the stop trace might fail, flag this session inactive so
4900 * other application coming in are not started by default.
4901 */
4902 usess->active = 0;
4903
8be98f9a
MD
4904 rcu_read_lock();
4905
b34cbebf
MD
4906 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4907 ret = ust_app_stop_trace(usess, app);
4908 if (ret < 0) {
4909 /* Continue to next apps even on error */
4910 continue;
4911 }
4912 }
4913
ce34fcd0 4914 (void) ust_app_flush_session(usess);
8be98f9a
MD
4915
4916 rcu_read_unlock();
4917
4918 return 0;
4919}
4920
84cd17c6
MD
4921/*
4922 * Destroy app UST session.
4923 */
4924int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4925{
4926 int ret = 0;
bec39940 4927 struct lttng_ht_iter iter;
84cd17c6
MD
4928 struct ust_app *app;
4929
4930 DBG("Destroy all UST traces");
4931
4932 rcu_read_lock();
4933
852d0037 4934 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 4935 ret = destroy_trace(usess, app);
84cd17c6
MD
4936 if (ret < 0) {
4937 /* Continue to next apps even on error */
4938 continue;
4939 }
4940 }
4941
4942 rcu_read_unlock();
4943
4944 return 0;
4945}
4946
88e3c2f5 4947/* The ua_sess lock must be held by the caller. */
a9ad0c8f 4948static
88e3c2f5
JG
4949int find_or_create_ust_app_channel(
4950 struct ltt_ust_session *usess,
4951 struct ust_app_session *ua_sess,
4952 struct ust_app *app,
4953 struct ltt_ust_channel *uchan,
4954 struct ust_app_channel **ua_chan)
487cf67c 4955{
55c54cce 4956 int ret = 0;
88e3c2f5
JG
4957 struct lttng_ht_iter iter;
4958 struct lttng_ht_node_str *ua_chan_node;
4959
4960 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
4961 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
4962 if (ua_chan_node) {
4963 *ua_chan = caa_container_of(ua_chan_node,
4964 struct ust_app_channel, node);
4965 goto end;
4966 }
4967
4968 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
4969 if (ret) {
4970 goto end;
4971 }
4972end:
4973 return ret;
4974}
4975
4976static
4977int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
4978 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
4979 struct ust_app *app)
4980{
4981 int ret = 0;
4982 struct ust_app_event *ua_event = NULL;
4983
4984 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4985 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
4986 if (!ua_event) {
4987 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
4988 if (ret < 0) {
4989 goto end;
4990 }
4991 } else {
4992 if (ua_event->enabled != uevent->enabled) {
4993 ret = uevent->enabled ?
4994 enable_ust_app_event(ua_sess, ua_event, app) :
4995 disable_ust_app_event(ua_sess, ua_event, app);
4996 }
4997 }
4998
4999end:
5000 return ret;
5001}
5002
5003/*
5004 * The caller must ensure that the application is compatible and is tracked
5005 * by the PID tracker.
5006 */
5007static
5008void ust_app_synchronize(struct ltt_ust_session *usess,
5009 struct ust_app *app)
5010{
5011 int ret = 0;
5012 struct cds_lfht_iter uchan_iter;
5013 struct ltt_ust_channel *uchan;
3d8ca23b 5014 struct ust_app_session *ua_sess = NULL;
1f3580c7 5015
88e3c2f5
JG
5016 /*
5017 * The application's configuration should only be synchronized for
5018 * active sessions.
5019 */
5020 assert(usess->active);
5021
5022 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
3d8ca23b
DG
5023 if (ret < 0) {
5024 /* Tracer is probably gone or ENOMEM. */
487cf67c
DG
5025 goto error;
5026 }
3d8ca23b 5027 assert(ua_sess);
487cf67c 5028
d0b96690 5029 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5030 if (ua_sess->deleted) {
5031 pthread_mutex_unlock(&ua_sess->lock);
5032 goto end;
5033 }
5034
88e3c2f5
JG
5035 rcu_read_lock();
5036 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5037 uchan, node.node) {
5038 struct ust_app_channel *ua_chan;
5039 struct cds_lfht_iter uevent_iter;
5040 struct ltt_ust_event *uevent;
487cf67c 5041
31746f93 5042 /*
88e3c2f5
JG
5043 * Search for a matching ust_app_channel. If none is found,
5044 * create it. Creating the channel will cause the ua_chan
5045 * structure to be allocated, the channel buffers to be
5046 * allocated (if necessary) and sent to the application, and
5047 * all enabled contexts will be added to the channel.
31746f93 5048 */
88e3c2f5
JG
5049 ret = find_or_create_ust_app_channel(usess, ua_sess,
5050 app, uchan, &ua_chan);
5051 if (ret) {
5052 /* Tracer is probably gone or ENOMEM. */
5053 goto error_unlock;
727d5404
DG
5054 }
5055
88e3c2f5
JG
5056 if (!ua_chan) {
5057 /* ua_chan will be NULL for the metadata channel */
5058 continue;
5059 }
727d5404 5060
88e3c2f5 5061 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
bec39940 5062 node.node) {
88e3c2f5
JG
5063 ret = ust_app_channel_synchronize_event(ua_chan,
5064 uevent, ua_sess, app);
5065 if (ret) {
d0b96690 5066 goto error_unlock;
487cf67c 5067 }
36dc12cc 5068 }
d0b96690 5069
88e3c2f5
JG
5070 if (ua_chan->enabled != uchan->enabled) {
5071 ret = uchan->enabled ?
5072 enable_ust_app_channel(ua_sess, uchan, app) :
5073 disable_ust_app_channel(ua_sess, ua_chan, app);
5074 if (ret) {
5075 goto error_unlock;
5076 }
5077 }
36dc12cc 5078 }
88e3c2f5 5079 rcu_read_unlock();
0498a00c 5080
a9ad0c8f 5081end:
88e3c2f5 5082 pthread_mutex_unlock(&ua_sess->lock);
ffe60014 5083 /* Everything went well at this point. */
ffe60014
DG
5084 return;
5085
d0b96690 5086error_unlock:
88e3c2f5 5087 rcu_read_unlock();
d0b96690 5088 pthread_mutex_unlock(&ua_sess->lock);
487cf67c 5089error:
ffe60014 5090 if (ua_sess) {
d0b96690 5091 destroy_app_session(app, ua_sess);
ffe60014 5092 }
487cf67c
DG
5093 return;
5094}
55cc08a6 5095
a9ad0c8f
MD
5096static
5097void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5098{
5099 struct ust_app_session *ua_sess;
5100
5101 ua_sess = lookup_session_by_app(usess, app);
5102 if (ua_sess == NULL) {
5103 return;
5104 }
5105 destroy_app_session(app, ua_sess);
5106}
5107
5108/*
5109 * Add channels/events from UST global domain to registered apps at sock.
5110 *
5111 * Called with session lock held.
5112 * Called with RCU read-side lock held.
5113 */
5114void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5115{
5116 assert(usess);
88e3c2f5 5117 assert(usess->active);
a9ad0c8f
MD
5118
5119 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5120 app->sock, usess->id);
5121
5122 if (!app->compatible) {
5123 return;
5124 }
55c9e7ca
JR
5125 if (trace_ust_id_tracker_lookup(LTTNG_TRACKER_VPID, usess, app->pid) &&
5126 trace_ust_id_tracker_lookup(
5127 LTTNG_TRACKER_VUID, usess, app->uid) &&
5128 trace_ust_id_tracker_lookup(
5129 LTTNG_TRACKER_VGID, usess, app->gid)) {
88e3c2f5
JG
5130 /*
5131 * Synchronize the application's internal tracing configuration
5132 * and start tracing.
5133 */
5134 ust_app_synchronize(usess, app);
5135 ust_app_start_trace(usess, app);
a9ad0c8f
MD
5136 } else {
5137 ust_app_global_destroy(usess, app);
5138 }
5139}
5140
5141/*
5142 * Called with session lock held.
5143 */
5144void ust_app_global_update_all(struct ltt_ust_session *usess)
5145{
5146 struct lttng_ht_iter iter;
5147 struct ust_app *app;
5148
5149 rcu_read_lock();
5150 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5151 ust_app_global_update(usess, app);
5152 }
5153 rcu_read_unlock();
5154}
5155
55cc08a6
DG
5156/*
5157 * Add context to a specific channel for global UST domain.
5158 */
5159int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5160 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5161{
5162 int ret = 0;
bec39940
DG
5163 struct lttng_ht_node_str *ua_chan_node;
5164 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
5165 struct ust_app_channel *ua_chan = NULL;
5166 struct ust_app_session *ua_sess;
5167 struct ust_app *app;
5168
88e3c2f5 5169 assert(usess->active);
0498a00c 5170
55cc08a6 5171 rcu_read_lock();
852d0037 5172 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
5173 if (!app->compatible) {
5174 /*
5175 * TODO: In time, we should notice the caller of this error by
5176 * telling him that this is a version error.
5177 */
5178 continue;
5179 }
55cc08a6
DG
5180 ua_sess = lookup_session_by_app(usess, app);
5181 if (ua_sess == NULL) {
5182 continue;
5183 }
5184
d0b96690 5185 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5186
5187 if (ua_sess->deleted) {
5188 pthread_mutex_unlock(&ua_sess->lock);
5189 continue;
5190 }
5191
55cc08a6 5192 /* Lookup channel in the ust app session */
bec39940
DG
5193 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5194 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 5195 if (ua_chan_node == NULL) {
d0b96690 5196 goto next_app;
55cc08a6
DG
5197 }
5198 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5199 node);
c9edf082 5200 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
55cc08a6 5201 if (ret < 0) {
d0b96690 5202 goto next_app;
55cc08a6 5203 }
d0b96690
DG
5204 next_app:
5205 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
5206 }
5207
55cc08a6 5208 rcu_read_unlock();
76d45b40
DG
5209 return ret;
5210}
7f79d3a1 5211
d0b96690
DG
5212/*
5213 * Receive registration and populate the given msg structure.
5214 *
5215 * On success return 0 else a negative value returned by the ustctl call.
5216 */
5217int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5218{
5219 int ret;
5220 uint32_t pid, ppid, uid, gid;
5221
5222 assert(msg);
5223
5224 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5225 &pid, &ppid, &uid, &gid,
5226 &msg->bits_per_long,
5227 &msg->uint8_t_alignment,
5228 &msg->uint16_t_alignment,
5229 &msg->uint32_t_alignment,
5230 &msg->uint64_t_alignment,
5231 &msg->long_alignment,
5232 &msg->byte_order,
5233 msg->name);
5234 if (ret < 0) {
5235 switch (-ret) {
5236 case EPIPE:
5237 case ECONNRESET:
5238 case LTTNG_UST_ERR_EXITING:
5239 DBG3("UST app recv reg message failed. Application died");
5240 break;
5241 case LTTNG_UST_ERR_UNSUP_MAJOR:
5242 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5243 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5244 LTTNG_UST_ABI_MINOR_VERSION);
5245 break;
5246 default:
5247 ERR("UST app recv reg message failed with ret %d", ret);
5248 break;
5249 }
5250 goto error;
5251 }
5252 msg->pid = (pid_t) pid;
5253 msg->ppid = (pid_t) ppid;
5254 msg->uid = (uid_t) uid;
5255 msg->gid = (gid_t) gid;
5256
5257error:
5258 return ret;
5259}
5260
10b56aef
MD
5261/*
5262 * Return a ust app session object using the application object and the
5263 * session object descriptor has a key. If not found, NULL is returned.
5264 * A RCU read side lock MUST be acquired when calling this function.
5265*/
5266static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5267 int objd)
5268{
5269 struct lttng_ht_node_ulong *node;
5270 struct lttng_ht_iter iter;
5271 struct ust_app_session *ua_sess = NULL;
5272
5273 assert(app);
5274
5275 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5276 node = lttng_ht_iter_get_node_ulong(&iter);
5277 if (node == NULL) {
5278 DBG2("UST app session find by objd %d not found", objd);
5279 goto error;
5280 }
5281
5282 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5283
5284error:
5285 return ua_sess;
5286}
5287
d88aee68
DG
5288/*
5289 * Return a ust app channel object using the application object and the channel
5290 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5291 * lock MUST be acquired before calling this function.
5292 */
d0b96690
DG
5293static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5294 int objd)
5295{
5296 struct lttng_ht_node_ulong *node;
5297 struct lttng_ht_iter iter;
5298 struct ust_app_channel *ua_chan = NULL;
5299
5300 assert(app);
5301
5302 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5303 node = lttng_ht_iter_get_node_ulong(&iter);
5304 if (node == NULL) {
5305 DBG2("UST app channel find by objd %d not found", objd);
5306 goto error;
5307 }
5308
5309 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5310
5311error:
5312 return ua_chan;
5313}
5314
d88aee68
DG
5315/*
5316 * Reply to a register channel notification from an application on the notify
5317 * socket. The channel metadata is also created.
5318 *
5319 * The session UST registry lock is acquired in this function.
5320 *
5321 * On success 0 is returned else a negative value.
5322 */
8eede835 5323static int reply_ust_register_channel(int sock, int cobjd,
d0b96690
DG
5324 size_t nr_fields, struct ustctl_field *fields)
5325{
5326 int ret, ret_code = 0;
294e218e 5327 uint32_t chan_id;
7972aab2 5328 uint64_t chan_reg_key;
d0b96690
DG
5329 enum ustctl_channel_header type;
5330 struct ust_app *app;
5331 struct ust_app_channel *ua_chan;
5332 struct ust_app_session *ua_sess;
7972aab2 5333 struct ust_registry_session *registry;
45893984 5334 struct ust_registry_channel *chan_reg;
d0b96690
DG
5335
5336 rcu_read_lock();
5337
5338 /* Lookup application. If not found, there is a code flow error. */
5339 app = find_app_by_notify_sock(sock);
d88aee68 5340 if (!app) {
fad1ed2f 5341 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
5342 sock);
5343 ret = 0;
5344 goto error_rcu_unlock;
5345 }
d0b96690 5346
4950b860 5347 /* Lookup channel by UST object descriptor. */
d0b96690 5348 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 5349 if (!ua_chan) {
fad1ed2f 5350 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
5351 ret = 0;
5352 goto error_rcu_unlock;
5353 }
5354
d0b96690
DG
5355 assert(ua_chan->session);
5356 ua_sess = ua_chan->session;
d0b96690 5357
7972aab2
DG
5358 /* Get right session registry depending on the session buffer type. */
5359 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5360 if (!registry) {
5361 DBG("Application session is being torn down. Abort event notify");
5362 ret = 0;
5363 goto error_rcu_unlock;
5364 };
45893984 5365
7972aab2
DG
5366 /* Depending on the buffer type, a different channel key is used. */
5367 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5368 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 5369 } else {
7972aab2 5370 chan_reg_key = ua_chan->key;
d0b96690
DG
5371 }
5372
7972aab2
DG
5373 pthread_mutex_lock(&registry->lock);
5374
5375 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5376 assert(chan_reg);
5377
5378 if (!chan_reg->register_done) {
294e218e
MD
5379 /*
5380 * TODO: eventually use the registry event count for
5381 * this channel to better guess header type for per-pid
5382 * buffers.
5383 */
5384 type = USTCTL_CHANNEL_HEADER_LARGE;
7972aab2
DG
5385 chan_reg->nr_ctx_fields = nr_fields;
5386 chan_reg->ctx_fields = fields;
fad1ed2f 5387 fields = NULL;
7972aab2 5388 chan_reg->header_type = type;
d0b96690 5389 } else {
7972aab2
DG
5390 /* Get current already assigned values. */
5391 type = chan_reg->header_type;
d0b96690 5392 }
7972aab2
DG
5393 /* Channel id is set during the object creation. */
5394 chan_id = chan_reg->chan_id;
d0b96690
DG
5395
5396 /* Append to metadata */
7972aab2
DG
5397 if (!chan_reg->metadata_dumped) {
5398 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
d0b96690
DG
5399 if (ret_code) {
5400 ERR("Error appending channel metadata (errno = %d)", ret_code);
5401 goto reply;
5402 }
5403 }
5404
5405reply:
7972aab2
DG
5406 DBG3("UST app replying to register channel key %" PRIu64
5407 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5408 ret_code);
d0b96690
DG
5409
5410 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5411 if (ret < 0) {
5412 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5413 ERR("UST app reply channel failed with ret %d", ret);
5414 } else {
5415 DBG3("UST app reply channel failed. Application died");
5416 }
5417 goto error;
5418 }
5419
7972aab2
DG
5420 /* This channel registry registration is completed. */
5421 chan_reg->register_done = 1;
5422
d0b96690 5423error:
7972aab2 5424 pthread_mutex_unlock(&registry->lock);
d88aee68 5425error_rcu_unlock:
d0b96690 5426 rcu_read_unlock();
fad1ed2f 5427 free(fields);
d0b96690
DG
5428 return ret;
5429}
5430
d88aee68
DG
5431/*
5432 * Add event to the UST channel registry. When the event is added to the
5433 * registry, the metadata is also created. Once done, this replies to the
5434 * application with the appropriate error code.
5435 *
5436 * The session UST registry lock is acquired in the function.
5437 *
5438 * On success 0 is returned else a negative value.
5439 */
d0b96690 5440static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
2106efa0
PP
5441 char *sig, size_t nr_fields, struct ustctl_field *fields,
5442 int loglevel_value, char *model_emf_uri)
d0b96690
DG
5443{
5444 int ret, ret_code;
5445 uint32_t event_id = 0;
7972aab2 5446 uint64_t chan_reg_key;
d0b96690
DG
5447 struct ust_app *app;
5448 struct ust_app_channel *ua_chan;
5449 struct ust_app_session *ua_sess;
7972aab2 5450 struct ust_registry_session *registry;
d0b96690
DG
5451
5452 rcu_read_lock();
5453
5454 /* Lookup application. If not found, there is a code flow error. */
5455 app = find_app_by_notify_sock(sock);
d88aee68 5456 if (!app) {
fad1ed2f 5457 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
5458 sock);
5459 ret = 0;
5460 goto error_rcu_unlock;
5461 }
d0b96690 5462
4950b860 5463 /* Lookup channel by UST object descriptor. */
d0b96690 5464 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 5465 if (!ua_chan) {
fad1ed2f 5466 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
5467 ret = 0;
5468 goto error_rcu_unlock;
5469 }
5470
d0b96690
DG
5471 assert(ua_chan->session);
5472 ua_sess = ua_chan->session;
5473
7972aab2 5474 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5475 if (!registry) {
5476 DBG("Application session is being torn down. Abort event notify");
5477 ret = 0;
5478 goto error_rcu_unlock;
5479 }
7972aab2
DG
5480
5481 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5482 chan_reg_key = ua_chan->tracing_channel_id;
5483 } else {
5484 chan_reg_key = ua_chan->key;
5485 }
5486
5487 pthread_mutex_lock(&registry->lock);
d0b96690 5488
d5d629b5
DG
5489 /*
5490 * From this point on, this call acquires the ownership of the sig, fields
5491 * and model_emf_uri meaning any free are done inside it if needed. These
5492 * three variables MUST NOT be read/write after this.
5493 */
7972aab2 5494 ret_code = ust_registry_create_event(registry, chan_reg_key,
2106efa0
PP
5495 sobjd, cobjd, name, sig, nr_fields, fields,
5496 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5497 &event_id, app);
fad1ed2f
JR
5498 sig = NULL;
5499 fields = NULL;
5500 model_emf_uri = NULL;
d0b96690
DG
5501
5502 /*
5503 * The return value is returned to ustctl so in case of an error, the
5504 * application can be notified. In case of an error, it's important not to
5505 * return a negative error or else the application will get closed.
5506 */
5507 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5508 if (ret < 0) {
5509 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5510 ERR("UST app reply event failed with ret %d", ret);
5511 } else {
5512 DBG3("UST app reply event failed. Application died");
5513 }
5514 /*
5515 * No need to wipe the create event since the application socket will
5516 * get close on error hence cleaning up everything by itself.
5517 */
5518 goto error;
5519 }
5520
7972aab2
DG
5521 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5522 name, event_id);
d88aee68 5523
d0b96690 5524error:
7972aab2 5525 pthread_mutex_unlock(&registry->lock);
d88aee68 5526error_rcu_unlock:
d0b96690 5527 rcu_read_unlock();
fad1ed2f
JR
5528 free(sig);
5529 free(fields);
5530 free(model_emf_uri);
d0b96690
DG
5531 return ret;
5532}
5533
10b56aef
MD
5534/*
5535 * Add enum to the UST session registry. Once done, this replies to the
5536 * application with the appropriate error code.
5537 *
5538 * The session UST registry lock is acquired within this function.
5539 *
5540 * On success 0 is returned else a negative value.
5541 */
5542static int add_enum_ust_registry(int sock, int sobjd, char *name,
5543 struct ustctl_enum_entry *entries, size_t nr_entries)
5544{
5545 int ret = 0, ret_code;
5546 struct ust_app *app;
5547 struct ust_app_session *ua_sess;
5548 struct ust_registry_session *registry;
5549 uint64_t enum_id = -1ULL;
5550
5551 rcu_read_lock();
5552
5553 /* Lookup application. If not found, there is a code flow error. */
5554 app = find_app_by_notify_sock(sock);
5555 if (!app) {
5556 /* Return an error since this is not an error */
5557 DBG("Application socket %d is being torn down. Aborting enum registration",
5558 sock);
5559 free(entries);
5560 goto error_rcu_unlock;
5561 }
5562
5563 /* Lookup session by UST object descriptor. */
5564 ua_sess = find_session_by_objd(app, sobjd);
5565 if (!ua_sess) {
5566 /* Return an error since this is not an error */
fad1ed2f 5567 DBG("Application session is being torn down (session not found). Aborting enum registration.");
10b56aef
MD
5568 free(entries);
5569 goto error_rcu_unlock;
5570 }
5571
5572 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5573 if (!registry) {
5574 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5575 free(entries);
5576 goto error_rcu_unlock;
5577 }
10b56aef
MD
5578
5579 pthread_mutex_lock(&registry->lock);
5580
5581 /*
5582 * From this point on, the callee acquires the ownership of
5583 * entries. The variable entries MUST NOT be read/written after
5584 * call.
5585 */
5586 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5587 entries, nr_entries, &enum_id);
5588 entries = NULL;
5589
5590 /*
5591 * The return value is returned to ustctl so in case of an error, the
5592 * application can be notified. In case of an error, it's important not to
5593 * return a negative error or else the application will get closed.
5594 */
5595 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5596 if (ret < 0) {
5597 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5598 ERR("UST app reply enum failed with ret %d", ret);
5599 } else {
5600 DBG3("UST app reply enum failed. Application died");
5601 }
5602 /*
5603 * No need to wipe the create enum since the application socket will
5604 * get close on error hence cleaning up everything by itself.
5605 */
5606 goto error;
5607 }
5608
5609 DBG3("UST registry enum %s added successfully or already found", name);
5610
5611error:
5612 pthread_mutex_unlock(&registry->lock);
5613error_rcu_unlock:
5614 rcu_read_unlock();
5615 return ret;
5616}
5617
d88aee68
DG
5618/*
5619 * Handle application notification through the given notify socket.
5620 *
5621 * Return 0 on success or else a negative value.
5622 */
d0b96690
DG
5623int ust_app_recv_notify(int sock)
5624{
5625 int ret;
5626 enum ustctl_notify_cmd cmd;
5627
5628 DBG3("UST app receiving notify from sock %d", sock);
5629
5630 ret = ustctl_recv_notify(sock, &cmd);
5631 if (ret < 0) {
5632 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5633 ERR("UST app recv notify failed with ret %d", ret);
5634 } else {
5635 DBG3("UST app recv notify failed. Application died");
5636 }
5637 goto error;
5638 }
5639
5640 switch (cmd) {
5641 case USTCTL_NOTIFY_CMD_EVENT:
5642 {
2106efa0 5643 int sobjd, cobjd, loglevel_value;
d0b96690
DG
5644 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5645 size_t nr_fields;
5646 struct ustctl_field *fields;
5647
5648 DBG2("UST app ustctl register event received");
5649
2106efa0
PP
5650 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5651 &loglevel_value, &sig, &nr_fields, &fields,
5652 &model_emf_uri);
d0b96690
DG
5653 if (ret < 0) {
5654 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5655 ERR("UST app recv event failed with ret %d", ret);
5656 } else {
5657 DBG3("UST app recv event failed. Application died");
5658 }
5659 goto error;
5660 }
5661
d5d629b5
DG
5662 /*
5663 * Add event to the UST registry coming from the notify socket. This
5664 * call will free if needed the sig, fields and model_emf_uri. This
5665 * code path loses the ownsership of these variables and transfer them
5666 * to the this function.
5667 */
d0b96690 5668 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
2106efa0 5669 fields, loglevel_value, model_emf_uri);
d0b96690
DG
5670 if (ret < 0) {
5671 goto error;
5672 }
5673
5674 break;
5675 }
5676 case USTCTL_NOTIFY_CMD_CHANNEL:
5677 {
5678 int sobjd, cobjd;
5679 size_t nr_fields;
5680 struct ustctl_field *fields;
5681
5682 DBG2("UST app ustctl register channel received");
5683
5684 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5685 &fields);
5686 if (ret < 0) {
5687 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5688 ERR("UST app recv channel failed with ret %d", ret);
5689 } else {
5690 DBG3("UST app recv channel failed. Application died");
5691 }
5692 goto error;
5693 }
5694
d5d629b5
DG
5695 /*
5696 * The fields ownership are transfered to this function call meaning
5697 * that if needed it will be freed. After this, it's invalid to access
5698 * fields or clean it up.
5699 */
8eede835 5700 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
d0b96690
DG
5701 fields);
5702 if (ret < 0) {
5703 goto error;
5704 }
5705
5706 break;
5707 }
10b56aef
MD
5708 case USTCTL_NOTIFY_CMD_ENUM:
5709 {
5710 int sobjd;
5711 char name[LTTNG_UST_SYM_NAME_LEN];
5712 size_t nr_entries;
5713 struct ustctl_enum_entry *entries;
5714
5715 DBG2("UST app ustctl register enum received");
5716
5717 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5718 &entries, &nr_entries);
5719 if (ret < 0) {
5720 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5721 ERR("UST app recv enum failed with ret %d", ret);
5722 } else {
5723 DBG3("UST app recv enum failed. Application died");
5724 }
5725 goto error;
5726 }
5727
5728 /* Callee assumes ownership of entries */
5729 ret = add_enum_ust_registry(sock, sobjd, name,
5730 entries, nr_entries);
5731 if (ret < 0) {
5732 goto error;
5733 }
5734
5735 break;
5736 }
d0b96690
DG
5737 default:
5738 /* Should NEVER happen. */
5739 assert(0);
5740 }
5741
5742error:
5743 return ret;
5744}
d88aee68
DG
5745
5746/*
5747 * Once the notify socket hangs up, this is called. First, it tries to find the
5748 * corresponding application. On failure, the call_rcu to close the socket is
5749 * executed. If an application is found, it tries to delete it from the notify
5750 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5751 *
5752 * Note that an object needs to be allocated here so on ENOMEM failure, the
5753 * call RCU is not done but the rest of the cleanup is.
5754 */
5755void ust_app_notify_sock_unregister(int sock)
5756{
5757 int err_enomem = 0;
5758 struct lttng_ht_iter iter;
5759 struct ust_app *app;
5760 struct ust_app_notify_sock_obj *obj;
5761
5762 assert(sock >= 0);
5763
5764 rcu_read_lock();
5765
5766 obj = zmalloc(sizeof(*obj));
5767 if (!obj) {
5768 /*
5769 * An ENOMEM is kind of uncool. If this strikes we continue the
5770 * procedure but the call_rcu will not be called. In this case, we
5771 * accept the fd leak rather than possibly creating an unsynchronized
5772 * state between threads.
5773 *
5774 * TODO: The notify object should be created once the notify socket is
5775 * registered and stored independantely from the ust app object. The
5776 * tricky part is to synchronize the teardown of the application and
5777 * this notify object. Let's keep that in mind so we can avoid this
5778 * kind of shenanigans with ENOMEM in the teardown path.
5779 */
5780 err_enomem = 1;
5781 } else {
5782 obj->fd = sock;
5783 }
5784
5785 DBG("UST app notify socket unregister %d", sock);
5786
5787 /*
5788 * Lookup application by notify socket. If this fails, this means that the
5789 * hash table delete has already been done by the application
5790 * unregistration process so we can safely close the notify socket in a
5791 * call RCU.
5792 */
5793 app = find_app_by_notify_sock(sock);
5794 if (!app) {
5795 goto close_socket;
5796 }
5797
5798 iter.iter.node = &app->notify_sock_n.node;
5799
5800 /*
5801 * Whatever happens here either we fail or succeed, in both cases we have
5802 * to close the socket after a grace period to continue to the call RCU
5803 * here. If the deletion is successful, the application is not visible
5804 * anymore by other threads and is it fails it means that it was already
5805 * deleted from the hash table so either way we just have to close the
5806 * socket.
5807 */
5808 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5809
5810close_socket:
5811 rcu_read_unlock();
5812
5813 /*
5814 * Close socket after a grace period to avoid for the socket to be reused
5815 * before the application object is freed creating potential race between
5816 * threads trying to add unique in the global hash table.
5817 */
5818 if (!err_enomem) {
5819 call_rcu(&obj->head, close_notify_sock_rcu);
5820 }
5821}
f45e313d
DG
5822
5823/*
5824 * Destroy a ust app data structure and free its memory.
5825 */
5826void ust_app_destroy(struct ust_app *app)
5827{
5828 if (!app) {
5829 return;
5830 }
5831
5832 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5833}
6dc3064a
DG
5834
5835/*
5836 * Take a snapshot for a given UST session. The snapshot is sent to the given
5837 * output.
5838 *
9a654598 5839 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6dc3064a 5840 */
fb9a95c4
JG
5841enum lttng_error_code ust_app_snapshot_record(
5842 const struct ltt_ust_session *usess,
348a81dc 5843 const struct consumer_output *output, int wait,
d07ceecd 5844 uint64_t nb_packets_per_stream)
6dc3064a
DG
5845{
5846 int ret = 0;
9a654598 5847 enum lttng_error_code status = LTTNG_OK;
6dc3064a
DG
5848 struct lttng_ht_iter iter;
5849 struct ust_app *app;
affce97e 5850 char *trace_path = NULL;
6dc3064a
DG
5851
5852 assert(usess);
5853 assert(output);
5854
5855 rcu_read_lock();
5856
8c924c7b
MD
5857 switch (usess->buffer_type) {
5858 case LTTNG_BUFFER_PER_UID:
5859 {
5860 struct buffer_reg_uid *reg;
6dc3064a 5861
8c924c7b
MD
5862 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5863 struct buffer_reg_channel *reg_chan;
5864 struct consumer_socket *socket;
3b967712 5865 char pathname[PATH_MAX];
5da88b0f 5866 size_t consumer_path_offset = 0;
6dc3064a 5867
2b269489
JR
5868 if (!reg->registry->reg.ust->metadata_key) {
5869 /* Skip since no metadata is present */
5870 continue;
5871 }
5872
8c924c7b
MD
5873 /* Get consumer socket to use to push the metadata.*/
5874 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5875 usess->consumer);
5876 if (!socket) {
9a654598 5877 status = LTTNG_ERR_INVALID;
8c924c7b
MD
5878 goto error;
5879 }
6dc3064a 5880
8c924c7b
MD
5881 memset(pathname, 0, sizeof(pathname));
5882 ret = snprintf(pathname, sizeof(pathname),
5da88b0f 5883 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
8c924c7b
MD
5884 reg->uid, reg->bits_per_long);
5885 if (ret < 0) {
5886 PERROR("snprintf snapshot path");
9a654598 5887 status = LTTNG_ERR_INVALID;
8c924c7b
MD
5888 goto error;
5889 }
affce97e
JG
5890 /* Free path allowed on previous iteration. */
5891 free(trace_path);
5da88b0f
MD
5892 trace_path = setup_channel_trace_path(usess->consumer, pathname,
5893 &consumer_path_offset);
3b967712
MD
5894 if (!trace_path) {
5895 status = LTTNG_ERR_INVALID;
5896 goto error;
5897 }
d2956687 5898 /* Add the UST default trace dir to path. */
8c924c7b
MD
5899 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5900 reg_chan, node.node) {
9a654598 5901 status = consumer_snapshot_channel(socket,
e098433c
JG
5902 reg_chan->consumer_key,
5903 output, 0, usess->uid,
5da88b0f 5904 usess->gid, &trace_path[consumer_path_offset], wait,
d2956687 5905 nb_packets_per_stream);
9a654598 5906 if (status != LTTNG_OK) {
8c924c7b
MD
5907 goto error;
5908 }
5909 }
9a654598 5910 status = consumer_snapshot_channel(socket,
68808f4e 5911 reg->registry->reg.ust->metadata_key, output, 1,
5da88b0f
MD
5912 usess->uid, usess->gid, &trace_path[consumer_path_offset],
5913 wait, 0);
9a654598 5914 if (status != LTTNG_OK) {
8c924c7b
MD
5915 goto error;
5916 }
af706bb7 5917 }
8c924c7b
MD
5918 break;
5919 }
5920 case LTTNG_BUFFER_PER_PID:
5921 {
5922 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5923 struct consumer_socket *socket;
5924 struct lttng_ht_iter chan_iter;
5925 struct ust_app_channel *ua_chan;
5926 struct ust_app_session *ua_sess;
5927 struct ust_registry_session *registry;
3b967712 5928 char pathname[PATH_MAX];
5da88b0f 5929 size_t consumer_path_offset = 0;
8c924c7b
MD
5930
5931 ua_sess = lookup_session_by_app(usess, app);
5932 if (!ua_sess) {
5933 /* Session not associated with this app. */
5934 continue;
5935 }
af706bb7 5936
8c924c7b
MD
5937 /* Get the right consumer socket for the application. */
5938 socket = consumer_find_socket_by_bitness(app->bits_per_long,
348a81dc 5939 output);
8c924c7b 5940 if (!socket) {
9a654598 5941 status = LTTNG_ERR_INVALID;
5c786ded
JD
5942 goto error;
5943 }
5944
8c924c7b
MD
5945 /* Add the UST default trace dir to path. */
5946 memset(pathname, 0, sizeof(pathname));
5da88b0f 5947 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "/%s",
8c924c7b 5948 ua_sess->path);
6dc3064a 5949 if (ret < 0) {
9a654598 5950 status = LTTNG_ERR_INVALID;
8c924c7b 5951 PERROR("snprintf snapshot path");
6dc3064a
DG
5952 goto error;
5953 }
affce97e
JG
5954 /* Free path allowed on previous iteration. */
5955 free(trace_path);
5da88b0f
MD
5956 trace_path = setup_channel_trace_path(usess->consumer, pathname,
5957 &consumer_path_offset);
3b967712
MD
5958 if (!trace_path) {
5959 status = LTTNG_ERR_INVALID;
5960 goto error;
5961 }
d2956687 5962 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
8c924c7b 5963 ua_chan, node.node) {
9a654598 5964 status = consumer_snapshot_channel(socket,
470cc211
JG
5965 ua_chan->key, output, 0,
5966 ua_sess->effective_credentials
5967 .uid,
5968 ua_sess->effective_credentials
5969 .gid,
5da88b0f 5970 &trace_path[consumer_path_offset], wait,
d2956687 5971 nb_packets_per_stream);
9a654598
JG
5972 switch (status) {
5973 case LTTNG_OK:
5974 break;
5975 case LTTNG_ERR_CHAN_NOT_FOUND:
5976 continue;
5977 default:
8c924c7b
MD
5978 goto error;
5979 }
5980 }
5981
5982 registry = get_session_registry(ua_sess);
fad1ed2f 5983 if (!registry) {
9bbfb88c
MD
5984 DBG("Application session is being torn down. Skip application.");
5985 continue;
fad1ed2f 5986 }
9a654598 5987 status = consumer_snapshot_channel(socket,
470cc211
JG
5988 registry->metadata_key, output, 1,
5989 ua_sess->effective_credentials.uid,
5990 ua_sess->effective_credentials.gid,
5da88b0f 5991 &trace_path[consumer_path_offset], wait, 0);
9a654598
JG
5992 switch (status) {
5993 case LTTNG_OK:
5994 break;
5995 case LTTNG_ERR_CHAN_NOT_FOUND:
5996 continue;
5997 default:
8c924c7b
MD
5998 goto error;
5999 }
6000 }
6001 break;
6002 }
6003 default:
6004 assert(0);
6005 break;
6dc3064a
DG
6006 }
6007
6008error:
affce97e 6009 free(trace_path);
6dc3064a 6010 rcu_read_unlock();
9a654598 6011 return status;
6dc3064a 6012}
5c786ded
JD
6013
6014/*
d07ceecd 6015 * Return the size taken by one more packet per stream.
5c786ded 6016 */
fb9a95c4
JG
6017uint64_t ust_app_get_size_one_more_packet_per_stream(
6018 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
5c786ded 6019{
d07ceecd 6020 uint64_t tot_size = 0;
5c786ded
JD
6021 struct ust_app *app;
6022 struct lttng_ht_iter iter;
6023
6024 assert(usess);
6025
6026 switch (usess->buffer_type) {
6027 case LTTNG_BUFFER_PER_UID:
6028 {
6029 struct buffer_reg_uid *reg;
6030
6031 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6032 struct buffer_reg_channel *reg_chan;
6033
b7064eaa 6034 rcu_read_lock();
5c786ded
JD
6035 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6036 reg_chan, node.node) {
d07ceecd
MD
6037 if (cur_nr_packets >= reg_chan->num_subbuf) {
6038 /*
6039 * Don't take channel into account if we
6040 * already grab all its packets.
6041 */
6042 continue;
6043 }
6044 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5c786ded 6045 }
b7064eaa 6046 rcu_read_unlock();
5c786ded
JD
6047 }
6048 break;
6049 }
6050 case LTTNG_BUFFER_PER_PID:
6051 {
6052 rcu_read_lock();
6053 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6054 struct ust_app_channel *ua_chan;
6055 struct ust_app_session *ua_sess;
6056 struct lttng_ht_iter chan_iter;
6057
6058 ua_sess = lookup_session_by_app(usess, app);
6059 if (!ua_sess) {
6060 /* Session not associated with this app. */
6061 continue;
6062 }
6063
6064 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6065 ua_chan, node.node) {
d07ceecd
MD
6066 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6067 /*
6068 * Don't take channel into account if we
6069 * already grab all its packets.
6070 */
6071 continue;
6072 }
6073 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
6074 }
6075 }
6076 rcu_read_unlock();
6077 break;
6078 }
6079 default:
6080 assert(0);
6081 break;
6082 }
6083
d07ceecd 6084 return tot_size;
5c786ded 6085}
fb83fe64
JD
6086
6087int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6088 struct cds_list_head *buffer_reg_uid_list,
6089 struct consumer_output *consumer, uint64_t uchan_id,
6090 int overwrite, uint64_t *discarded, uint64_t *lost)
6091{
6092 int ret;
6093 uint64_t consumer_chan_key;
6094
70dd8162
MD
6095 *discarded = 0;
6096 *lost = 0;
6097
fb83fe64 6098 ret = buffer_reg_uid_consumer_channel_key(
76604852 6099 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
fb83fe64 6100 if (ret < 0) {
70dd8162
MD
6101 /* Not found */
6102 ret = 0;
fb83fe64
JD
6103 goto end;
6104 }
6105
6106 if (overwrite) {
6107 ret = consumer_get_lost_packets(ust_session_id,
6108 consumer_chan_key, consumer, lost);
6109 } else {
6110 ret = consumer_get_discarded_events(ust_session_id,
6111 consumer_chan_key, consumer, discarded);
6112 }
6113
6114end:
6115 return ret;
6116}
6117
6118int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6119 struct ltt_ust_channel *uchan,
6120 struct consumer_output *consumer, int overwrite,
6121 uint64_t *discarded, uint64_t *lost)
6122{
6123 int ret = 0;
6124 struct lttng_ht_iter iter;
6125 struct lttng_ht_node_str *ua_chan_node;
6126 struct ust_app *app;
6127 struct ust_app_session *ua_sess;
6128 struct ust_app_channel *ua_chan;
6129
70dd8162
MD
6130 *discarded = 0;
6131 *lost = 0;
6132
fb83fe64
JD
6133 rcu_read_lock();
6134 /*
70dd8162
MD
6135 * Iterate over every registered applications. Sum counters for
6136 * all applications containing requested session and channel.
fb83fe64
JD
6137 */
6138 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6139 struct lttng_ht_iter uiter;
6140
6141 ua_sess = lookup_session_by_app(usess, app);
6142 if (ua_sess == NULL) {
6143 continue;
6144 }
6145
6146 /* Get channel */
ee022399 6147 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
fb83fe64
JD
6148 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6149 /* If the session is found for the app, the channel must be there */
6150 assert(ua_chan_node);
6151
6152 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6153
6154 if (overwrite) {
70dd8162
MD
6155 uint64_t _lost;
6156
fb83fe64 6157 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
70dd8162
MD
6158 consumer, &_lost);
6159 if (ret < 0) {
6160 break;
6161 }
6162 (*lost) += _lost;
fb83fe64 6163 } else {
70dd8162
MD
6164 uint64_t _discarded;
6165
fb83fe64 6166 ret = consumer_get_discarded_events(usess->id,
70dd8162
MD
6167 ua_chan->key, consumer, &_discarded);
6168 if (ret < 0) {
6169 break;
6170 }
6171 (*discarded) += _discarded;
fb83fe64 6172 }
fb83fe64
JD
6173 }
6174
fb83fe64
JD
6175 rcu_read_unlock();
6176 return ret;
6177}
c2561365
JD
6178
6179static
6180int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6181 struct ust_app *app)
6182{
6183 int ret = 0;
6184 struct ust_app_session *ua_sess;
6185
6186 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6187
6188 rcu_read_lock();
6189
6190 ua_sess = lookup_session_by_app(usess, app);
6191 if (ua_sess == NULL) {
6192 /* The session is in teardown process. Ignore and continue. */
6193 goto end;
6194 }
6195
6196 pthread_mutex_lock(&ua_sess->lock);
6197
6198 if (ua_sess->deleted) {
6199 goto end_unlock;
6200 }
6201
6202 pthread_mutex_lock(&app->sock_lock);
6203 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6204 pthread_mutex_unlock(&app->sock_lock);
6205
6206end_unlock:
6207 pthread_mutex_unlock(&ua_sess->lock);
6208
6209end:
6210 rcu_read_unlock();
6211 health_code_update();
6212 return ret;
6213}
6214
6215/*
6216 * Regenerate the statedump for each app in the session.
6217 */
6218int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6219{
6220 int ret = 0;
6221 struct lttng_ht_iter iter;
6222 struct ust_app *app;
6223
6224 DBG("Regenerating the metadata for all UST apps");
6225
6226 rcu_read_lock();
6227
6228 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6229 if (!app->compatible) {
6230 continue;
6231 }
6232
6233 ret = ust_app_regenerate_statedump(usess, app);
6234 if (ret < 0) {
6235 /* Continue to the next app even on error */
6236 continue;
6237 }
6238 }
6239
6240 rcu_read_unlock();
6241
6242 return 0;
6243}
5c408ad8
JD
6244
6245/*
6246 * Rotate all the channels of a session.
6247 *
6f6d3b69 6248 * Return LTTNG_OK on success or else an LTTng error code.
5c408ad8 6249 */
6f6d3b69 6250enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
5c408ad8 6251{
6f6d3b69
MD
6252 int ret;
6253 enum lttng_error_code cmd_ret = LTTNG_OK;
5c408ad8
JD
6254 struct lttng_ht_iter iter;
6255 struct ust_app *app;
6256 struct ltt_ust_session *usess = session->ust_session;
5c408ad8
JD
6257
6258 assert(usess);
6259
6260 rcu_read_lock();
6261
6262 switch (usess->buffer_type) {
6263 case LTTNG_BUFFER_PER_UID:
6264 {
6265 struct buffer_reg_uid *reg;
6266
6267 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6268 struct buffer_reg_channel *reg_chan;
6269 struct consumer_socket *socket;
6270
14d3fca9
JR
6271 if (!reg->registry->reg.ust->metadata_key) {
6272 /* Skip since no metadata is present */
6273 continue;
6274 }
6275
5c408ad8
JD
6276 /* Get consumer socket to use to push the metadata.*/
6277 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6278 usess->consumer);
6279 if (!socket) {
6f6d3b69 6280 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6281 goto error;
6282 }
6283
5c408ad8
JD
6284 /* Rotate the data channels. */
6285 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6286 reg_chan, node.node) {
5c408ad8
JD
6287 ret = consumer_rotate_channel(socket,
6288 reg_chan->consumer_key,
6289 usess->uid, usess->gid,
d2956687
JG
6290 usess->consumer,
6291 /* is_metadata_channel */ false);
5c408ad8 6292 if (ret < 0) {
6f6d3b69 6293 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6294 goto error;
6295 }
6296 }
6297
6298 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6299
6300 ret = consumer_rotate_channel(socket,
6301 reg->registry->reg.ust->metadata_key,
6302 usess->uid, usess->gid,
d2956687
JG
6303 usess->consumer,
6304 /* is_metadata_channel */ true);
5c408ad8 6305 if (ret < 0) {
6f6d3b69 6306 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6307 goto error;
6308 }
5c408ad8
JD
6309 }
6310 break;
6311 }
6312 case LTTNG_BUFFER_PER_PID:
6313 {
6314 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6315 struct consumer_socket *socket;
6316 struct lttng_ht_iter chan_iter;
6317 struct ust_app_channel *ua_chan;
6318 struct ust_app_session *ua_sess;
6319 struct ust_registry_session *registry;
6320
6321 ua_sess = lookup_session_by_app(usess, app);
6322 if (!ua_sess) {
6323 /* Session not associated with this app. */
6324 continue;
6325 }
5c408ad8
JD
6326
6327 /* Get the right consumer socket for the application. */
6328 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6329 usess->consumer);
6330 if (!socket) {
6f6d3b69 6331 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6332 goto error;
6333 }
6334
6335 registry = get_session_registry(ua_sess);
6336 if (!registry) {
6f6d3b69
MD
6337 DBG("Application session is being torn down. Skip application.");
6338 continue;
5c408ad8
JD
6339 }
6340
5c408ad8
JD
6341 /* Rotate the data channels. */
6342 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6343 ua_chan, node.node) {
470cc211
JG
6344 ret = consumer_rotate_channel(socket,
6345 ua_chan->key,
6346 ua_sess->effective_credentials
6347 .uid,
6348 ua_sess->effective_credentials
6349 .gid,
d2956687
JG
6350 ua_sess->consumer,
6351 /* is_metadata_channel */ false);
5c408ad8 6352 if (ret < 0) {
6f6d3b69
MD
6353 /* Per-PID buffer and application going away. */
6354 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
6355 continue;
6356 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6357 goto error;
6358 }
6359 }
6360
6361 /* Rotate the metadata channel. */
6362 (void) push_metadata(registry, usess->consumer);
470cc211
JG
6363 ret = consumer_rotate_channel(socket,
6364 registry->metadata_key,
6365 ua_sess->effective_credentials.uid,
6366 ua_sess->effective_credentials.gid,
d2956687
JG
6367 ua_sess->consumer,
6368 /* is_metadata_channel */ true);
5c408ad8 6369 if (ret < 0) {
6f6d3b69
MD
6370 /* Per-PID buffer and application going away. */
6371 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
6372 continue;
6373 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6374 goto error;
6375 }
5c408ad8
JD
6376 }
6377 break;
6378 }
6379 default:
6380 assert(0);
6381 break;
6382 }
6383
6f6d3b69 6384 cmd_ret = LTTNG_OK;
5c408ad8
JD
6385
6386error:
6387 rcu_read_unlock();
6f6d3b69 6388 return cmd_ret;
5c408ad8 6389}
d2956687
JG
6390
6391enum lttng_error_code ust_app_create_channel_subdirectories(
6392 const struct ltt_ust_session *usess)
6393{
6394 enum lttng_error_code ret = LTTNG_OK;
6395 struct lttng_ht_iter iter;
6396 enum lttng_trace_chunk_status chunk_status;
6397 char *pathname_index;
6398 int fmt_ret;
6399
6400 assert(usess->current_trace_chunk);
6401 rcu_read_lock();
6402
6403 switch (usess->buffer_type) {
6404 case LTTNG_BUFFER_PER_UID:
6405 {
6406 struct buffer_reg_uid *reg;
6407
6408 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6409 fmt_ret = asprintf(&pathname_index,
5da88b0f 6410 DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
d2956687
JG
6411 reg->uid, reg->bits_per_long);
6412 if (fmt_ret < 0) {
6413 ERR("Failed to format channel index directory");
6414 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6415 goto error;
6416 }
6417
6418 /*
6419 * Create the index subdirectory which will take care
6420 * of implicitly creating the channel's path.
6421 */
6422 chunk_status = lttng_trace_chunk_create_subdirectory(
6423 usess->current_trace_chunk,
6424 pathname_index);
6425 free(pathname_index);
6426 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6427 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6428 goto error;
6429 }
6430 }
6431 break;
6432 }
6433 case LTTNG_BUFFER_PER_PID:
6434 {
6435 struct ust_app *app;
6436
495dece5
MD
6437 /*
6438 * Create the toplevel ust/ directory in case no apps are running.
6439 */
6440 chunk_status = lttng_trace_chunk_create_subdirectory(
6441 usess->current_trace_chunk,
6442 DEFAULT_UST_TRACE_DIR);
6443 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6444 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6445 goto error;
6446 }
6447
d2956687
JG
6448 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
6449 pid_n.node) {
6450 struct ust_app_session *ua_sess;
6451 struct ust_registry_session *registry;
6452
6453 ua_sess = lookup_session_by_app(usess, app);
6454 if (!ua_sess) {
6455 /* Session not associated with this app. */
6456 continue;
6457 }
6458
6459 registry = get_session_registry(ua_sess);
6460 if (!registry) {
6461 DBG("Application session is being torn down. Skip application.");
6462 continue;
6463 }
6464
6465 fmt_ret = asprintf(&pathname_index,
5da88b0f 6466 DEFAULT_UST_TRACE_DIR "/%s/" DEFAULT_INDEX_DIR,
d2956687
JG
6467 ua_sess->path);
6468 if (fmt_ret < 0) {
6469 ERR("Failed to format channel index directory");
6470 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6471 goto error;
6472 }
6473 /*
6474 * Create the index subdirectory which will take care
6475 * of implicitly creating the channel's path.
6476 */
6477 chunk_status = lttng_trace_chunk_create_subdirectory(
6478 usess->current_trace_chunk,
6479 pathname_index);
6480 free(pathname_index);
6481 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6482 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6483 goto error;
6484 }
6485 }
6486 break;
6487 }
6488 default:
6489 abort();
6490 }
6491
6492 ret = LTTNG_OK;
6493error:
6494 rcu_read_unlock();
6495 return ret;
6496}
4a9b9759
MD
6497
6498/*
6499 * Clear all the channels of a session.
6500 *
6501 * Return LTTNG_OK on success or else an LTTng error code.
6502 */
6503enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
6504{
6505 int ret;
6506 enum lttng_error_code cmd_ret = LTTNG_OK;
6507 struct lttng_ht_iter iter;
6508 struct ust_app *app;
6509 struct ltt_ust_session *usess = session->ust_session;
6510
6511 assert(usess);
6512
6513 rcu_read_lock();
6514
6515 if (usess->active) {
6516 ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
6517 cmd_ret = LTTNG_ERR_FATAL;
6518 goto end;
6519 }
6520
6521 switch (usess->buffer_type) {
6522 case LTTNG_BUFFER_PER_UID:
6523 {
6524 struct buffer_reg_uid *reg;
6525
6526 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6527 struct buffer_reg_channel *reg_chan;
6528 struct consumer_socket *socket;
6529
6530 /* Get consumer socket to use to push the metadata.*/
6531 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6532 usess->consumer);
6533 if (!socket) {
6534 cmd_ret = LTTNG_ERR_INVALID;
6535 goto error_socket;
6536 }
6537
6538 /* Clear the data channels. */
6539 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6540 reg_chan, node.node) {
6541 ret = consumer_clear_channel(socket,
6542 reg_chan->consumer_key);
6543 if (ret < 0) {
6544 goto error;
6545 }
6546 }
6547
6548 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6549
6550 /*
6551 * Clear the metadata channel.
6552 * Metadata channel is not cleared per se but we still need to
6553 * perform a rotation operation on it behind the scene.
6554 */
6555 ret = consumer_clear_channel(socket,
6556 reg->registry->reg.ust->metadata_key);
6557 if (ret < 0) {
6558 goto error;
6559 }
6560 }
6561 break;
6562 }
6563 case LTTNG_BUFFER_PER_PID:
6564 {
6565 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6566 struct consumer_socket *socket;
6567 struct lttng_ht_iter chan_iter;
6568 struct ust_app_channel *ua_chan;
6569 struct ust_app_session *ua_sess;
6570 struct ust_registry_session *registry;
6571
6572 ua_sess = lookup_session_by_app(usess, app);
6573 if (!ua_sess) {
6574 /* Session not associated with this app. */
6575 continue;
6576 }
6577
6578 /* Get the right consumer socket for the application. */
6579 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6580 usess->consumer);
6581 if (!socket) {
6582 cmd_ret = LTTNG_ERR_INVALID;
6583 goto error_socket;
6584 }
6585
6586 registry = get_session_registry(ua_sess);
6587 if (!registry) {
6588 DBG("Application session is being torn down. Skip application.");
6589 continue;
6590 }
6591
6592 /* Clear the data channels. */
6593 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6594 ua_chan, node.node) {
6595 ret = consumer_clear_channel(socket, ua_chan->key);
6596 if (ret < 0) {
6597 /* Per-PID buffer and application going away. */
6598 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
6599 continue;
6600 }
6601 goto error;
6602 }
6603 }
6604
6605 (void) push_metadata(registry, usess->consumer);
6606
6607 /*
6608 * Clear the metadata channel.
6609 * Metadata channel is not cleared per se but we still need to
6610 * perform rotation operation on it behind the scene.
6611 */
6612 ret = consumer_clear_channel(socket, registry->metadata_key);
6613 if (ret < 0) {
6614 /* Per-PID buffer and application going away. */
6615 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND) {
6616 continue;
6617 }
6618 goto error;
6619 }
6620 }
6621 break;
6622 }
6623 default:
6624 assert(0);
6625 break;
6626 }
6627
6628 cmd_ret = LTTNG_OK;
6629 goto end;
6630
6631error:
6632 switch (-ret) {
6633 case LTTCOMM_CONSUMERD_RELAYD_CLEAR_DISALLOWED:
6634 cmd_ret = LTTNG_ERR_CLEAR_RELAY_DISALLOWED;
6635 break;
6636 default:
6637 cmd_ret = LTTNG_ERR_CLEAR_FAIL_CONSUMER;
6638 }
6639
6640error_socket:
6641end:
6642 rcu_read_unlock();
6643 return cmd_ret;
6644}
This page took 0.504699 seconds and 4 git commands to generate.