Fix: sessiond: session destroy hang in per-uid when context cannot be added
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
CommitLineData
91d76f53
DG
1/*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
bdf64013 3 * Copyright (C) 2016 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
91d76f53 4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
91d76f53
DG
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
91d76f53
DG
17 */
18
6c1c0768 19#define _LGPL_SOURCE
91d76f53 20#include <errno.h>
7972aab2 21#include <inttypes.h>
91d76f53
DG
22#include <pthread.h>
23#include <stdio.h>
24#include <stdlib.h>
099e26bd 25#include <string.h>
aba8e916
DG
26#include <sys/stat.h>
27#include <sys/types.h>
099e26bd 28#include <unistd.h>
0df502fd 29#include <urcu/compiler.h>
331744e3 30#include <signal.h>
bec39940 31
990570ed 32#include <common/common.h>
86acf0da 33#include <common/sessiond-comm/sessiond-comm.h>
1e307fab 34
7972aab2 35#include "buffer-registry.h"
86acf0da 36#include "fd-limit.h"
8782cc74 37#include "health-sessiond.h"
56fff090 38#include "ust-app.h"
48842b30 39#include "ust-consumer.h"
b7340b1d
JG
40#include "lttng-ust-ctl.h"
41#include "lttng-ust-error.h"
0b2dc8df 42#include "utils.h"
fb83fe64 43#include "session.h"
e9404c27
JG
44#include "lttng-sessiond.h"
45#include "notification-thread-commands.h"
5c408ad8 46#include "rotate.h"
d80a6244 47
e1d64b5c
MJ
48struct lttng_ht *ust_app_ht;
49struct lttng_ht *ust_app_ht_by_sock;
50struct lttng_ht *ust_app_ht_by_notify_sock;
51
c4b88406
MD
52static
53int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
54
d9bf3ca4
MD
55/* Next available channel key. Access under next_channel_key_lock. */
56static uint64_t _next_channel_key;
57static pthread_mutex_t next_channel_key_lock = PTHREAD_MUTEX_INITIALIZER;
58
59/* Next available session ID. Access under next_session_id_lock. */
60static uint64_t _next_session_id;
61static pthread_mutex_t next_session_id_lock = PTHREAD_MUTEX_INITIALIZER;
ffe60014
DG
62
63/*
d9bf3ca4 64 * Return the incremented value of next_channel_key.
ffe60014 65 */
d9bf3ca4 66static uint64_t get_next_channel_key(void)
ffe60014 67{
d9bf3ca4
MD
68 uint64_t ret;
69
70 pthread_mutex_lock(&next_channel_key_lock);
71 ret = ++_next_channel_key;
72 pthread_mutex_unlock(&next_channel_key_lock);
73 return ret;
ffe60014
DG
74}
75
76/*
7972aab2 77 * Return the atomically incremented value of next_session_id.
ffe60014 78 */
d9bf3ca4 79static uint64_t get_next_session_id(void)
ffe60014 80{
d9bf3ca4
MD
81 uint64_t ret;
82
83 pthread_mutex_lock(&next_session_id_lock);
84 ret = ++_next_session_id;
85 pthread_mutex_unlock(&next_session_id_lock);
86 return ret;
ffe60014
DG
87}
88
d65d2de8
DG
89static void copy_channel_attr_to_ustctl(
90 struct ustctl_consumer_channel_attr *attr,
91 struct lttng_ust_channel_attr *uattr)
92{
93 /* Copy event attributes since the layout is different. */
94 attr->subbuf_size = uattr->subbuf_size;
95 attr->num_subbuf = uattr->num_subbuf;
96 attr->overwrite = uattr->overwrite;
97 attr->switch_timer_interval = uattr->switch_timer_interval;
98 attr->read_timer_interval = uattr->read_timer_interval;
99 attr->output = uattr->output;
491d1539 100 attr->blocking_timeout = uattr->u.s.blocking_timeout;
d65d2de8
DG
101}
102
025faf73
DG
103/*
104 * Match function for the hash table lookup.
105 *
106 * It matches an ust app event based on three attributes which are the event
107 * name, the filter bytecode and the loglevel.
108 */
18eace3b
DG
109static int ht_match_ust_app_event(struct cds_lfht_node *node, const void *_key)
110{
111 struct ust_app_event *event;
112 const struct ust_app_ht_key *key;
2106efa0 113 int ev_loglevel_value;
18eace3b
DG
114
115 assert(node);
116 assert(_key);
117
118 event = caa_container_of(node, struct ust_app_event, node.node);
119 key = _key;
2106efa0 120 ev_loglevel_value = event->attr.loglevel;
18eace3b 121
1af53eb5 122 /* Match the 4 elements of the key: name, filter, loglevel, exclusions */
18eace3b
DG
123
124 /* Event name */
125 if (strncmp(event->attr.name, key->name, sizeof(event->attr.name)) != 0) {
126 goto no_match;
127 }
128
129 /* Event loglevel. */
2106efa0 130 if (ev_loglevel_value != key->loglevel_type) {
025faf73 131 if (event->attr.loglevel_type == LTTNG_UST_LOGLEVEL_ALL
2106efa0
PP
132 && key->loglevel_type == 0 &&
133 ev_loglevel_value == -1) {
025faf73
DG
134 /*
135 * Match is accepted. This is because on event creation, the
136 * loglevel is set to -1 if the event loglevel type is ALL so 0 and
137 * -1 are accepted for this loglevel type since 0 is the one set by
138 * the API when receiving an enable event.
139 */
140 } else {
141 goto no_match;
142 }
18eace3b
DG
143 }
144
145 /* One of the filters is NULL, fail. */
146 if ((key->filter && !event->filter) || (!key->filter && event->filter)) {
147 goto no_match;
148 }
149
025faf73
DG
150 if (key->filter && event->filter) {
151 /* Both filters exists, check length followed by the bytecode. */
152 if (event->filter->len != key->filter->len ||
153 memcmp(event->filter->data, key->filter->data,
154 event->filter->len) != 0) {
155 goto no_match;
156 }
18eace3b
DG
157 }
158
1af53eb5
JI
159 /* One of the exclusions is NULL, fail. */
160 if ((key->exclusion && !event->exclusion) || (!key->exclusion && event->exclusion)) {
161 goto no_match;
162 }
163
164 if (key->exclusion && event->exclusion) {
165 /* Both exclusions exists, check count followed by the names. */
166 if (event->exclusion->count != key->exclusion->count ||
167 memcmp(event->exclusion->names, key->exclusion->names,
168 event->exclusion->count * LTTNG_UST_SYM_NAME_LEN) != 0) {
169 goto no_match;
170 }
171 }
172
173
025faf73 174 /* Match. */
18eace3b
DG
175 return 1;
176
177no_match:
178 return 0;
18eace3b
DG
179}
180
025faf73
DG
181/*
182 * Unique add of an ust app event in the given ht. This uses the custom
183 * ht_match_ust_app_event match function and the event name as hash.
184 */
d0b96690 185static void add_unique_ust_app_event(struct ust_app_channel *ua_chan,
18eace3b
DG
186 struct ust_app_event *event)
187{
188 struct cds_lfht_node *node_ptr;
189 struct ust_app_ht_key key;
d0b96690 190 struct lttng_ht *ht;
18eace3b 191
d0b96690
DG
192 assert(ua_chan);
193 assert(ua_chan->events);
18eace3b
DG
194 assert(event);
195
d0b96690 196 ht = ua_chan->events;
18eace3b
DG
197 key.name = event->attr.name;
198 key.filter = event->filter;
2106efa0 199 key.loglevel_type = event->attr.loglevel;
91c89f23 200 key.exclusion = event->exclusion;
18eace3b
DG
201
202 node_ptr = cds_lfht_add_unique(ht->ht,
203 ht->hash_fct(event->node.key, lttng_ht_seed),
204 ht_match_ust_app_event, &key, &event->node.node);
205 assert(node_ptr == &event->node.node);
206}
207
d88aee68
DG
208/*
209 * Close the notify socket from the given RCU head object. This MUST be called
210 * through a call_rcu().
211 */
212static void close_notify_sock_rcu(struct rcu_head *head)
213{
214 int ret;
215 struct ust_app_notify_sock_obj *obj =
216 caa_container_of(head, struct ust_app_notify_sock_obj, head);
217
218 /* Must have a valid fd here. */
219 assert(obj->fd >= 0);
220
221 ret = close(obj->fd);
222 if (ret) {
223 ERR("close notify sock %d RCU", obj->fd);
224 }
225 lttng_fd_put(LTTNG_FD_APPS, 1);
226
227 free(obj);
228}
229
7972aab2
DG
230/*
231 * Return the session registry according to the buffer type of the given
232 * session.
233 *
234 * A registry per UID object MUST exists before calling this function or else
235 * it assert() if not found. RCU read side lock must be acquired.
236 */
237static struct ust_registry_session *get_session_registry(
238 struct ust_app_session *ua_sess)
239{
240 struct ust_registry_session *registry = NULL;
241
242 assert(ua_sess);
243
244 switch (ua_sess->buffer_type) {
245 case LTTNG_BUFFER_PER_PID:
246 {
247 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
248 if (!reg_pid) {
249 goto error;
250 }
251 registry = reg_pid->registry->reg.ust;
252 break;
253 }
254 case LTTNG_BUFFER_PER_UID:
255 {
256 struct buffer_reg_uid *reg_uid = buffer_reg_uid_find(
c51311d6
JG
257 ua_sess->tracing_id, ua_sess->bits_per_long,
258 ua_sess->real_credentials.uid);
7972aab2
DG
259 if (!reg_uid) {
260 goto error;
261 }
262 registry = reg_uid->registry->reg.ust;
263 break;
264 }
265 default:
266 assert(0);
267 };
268
269error:
270 return registry;
271}
272
55cc08a6
DG
273/*
274 * Delete ust context safely. RCU read lock must be held before calling
275 * this function.
276 */
277static
fb45065e
MD
278void delete_ust_app_ctx(int sock, struct ust_app_ctx *ua_ctx,
279 struct ust_app *app)
55cc08a6 280{
ffe60014
DG
281 int ret;
282
283 assert(ua_ctx);
284
55cc08a6 285 if (ua_ctx->obj) {
fb45065e 286 pthread_mutex_lock(&app->sock_lock);
ffe60014 287 ret = ustctl_release_object(sock, ua_ctx->obj);
fb45065e 288 pthread_mutex_unlock(&app->sock_lock);
d0b96690
DG
289 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
290 ERR("UST app sock %d release ctx obj handle %d failed with ret %d",
291 sock, ua_ctx->obj->handle, ret);
ffe60014 292 }
55cc08a6
DG
293 free(ua_ctx->obj);
294 }
295 free(ua_ctx);
296}
297
d80a6244
DG
298/*
299 * Delete ust app event safely. RCU read lock must be held before calling
300 * this function.
301 */
8b366481 302static
fb45065e
MD
303void delete_ust_app_event(int sock, struct ust_app_event *ua_event,
304 struct ust_app *app)
d80a6244 305{
ffe60014
DG
306 int ret;
307
308 assert(ua_event);
309
53a80697 310 free(ua_event->filter);
951f0b71
JI
311 if (ua_event->exclusion != NULL)
312 free(ua_event->exclusion);
edb67388 313 if (ua_event->obj != NULL) {
fb45065e 314 pthread_mutex_lock(&app->sock_lock);
ffe60014 315 ret = ustctl_release_object(sock, ua_event->obj);
fb45065e 316 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
317 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
318 ERR("UST app sock %d release event obj failed with ret %d",
319 sock, ret);
320 }
edb67388
DG
321 free(ua_event->obj);
322 }
d80a6244
DG
323 free(ua_event);
324}
325
326/*
7972aab2
DG
327 * Release ust data object of the given stream.
328 *
329 * Return 0 on success or else a negative value.
d80a6244 330 */
fb45065e
MD
331static int release_ust_app_stream(int sock, struct ust_app_stream *stream,
332 struct ust_app *app)
d80a6244 333{
7972aab2 334 int ret = 0;
ffe60014
DG
335
336 assert(stream);
337
8b366481 338 if (stream->obj) {
fb45065e 339 pthread_mutex_lock(&app->sock_lock);
ffe60014 340 ret = ustctl_release_object(sock, stream->obj);
fb45065e 341 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
342 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
343 ERR("UST app sock %d release stream obj failed with ret %d",
344 sock, ret);
345 }
4063050c 346 lttng_fd_put(LTTNG_FD_APPS, 2);
8b366481
DG
347 free(stream->obj);
348 }
7972aab2
DG
349
350 return ret;
351}
352
353/*
354 * Delete ust app stream safely. RCU read lock must be held before calling
355 * this function.
356 */
357static
fb45065e
MD
358void delete_ust_app_stream(int sock, struct ust_app_stream *stream,
359 struct ust_app *app)
7972aab2
DG
360{
361 assert(stream);
362
fb45065e 363 (void) release_ust_app_stream(sock, stream, app);
84cd17c6 364 free(stream);
d80a6244
DG
365}
366
36b588ed
MD
367/*
368 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
369 * section and outside of call_rcu thread, so we postpone its execution
370 * using ht_cleanup_push. It is simpler than to change the semantic of
371 * the many callers of delete_ust_app_session().
36b588ed
MD
372 */
373static
374void delete_ust_app_channel_rcu(struct rcu_head *head)
375{
376 struct ust_app_channel *ua_chan =
377 caa_container_of(head, struct ust_app_channel, rcu_head);
378
0b2dc8df
MD
379 ht_cleanup_push(ua_chan->ctx);
380 ht_cleanup_push(ua_chan->events);
36b588ed
MD
381 free(ua_chan);
382}
383
fb83fe64
JD
384/*
385 * Extract the lost packet or discarded events counter when the channel is
386 * being deleted and store the value in the parent channel so we can
387 * access it from lttng list and at stop/destroy.
82cac6d2
JG
388 *
389 * The session list lock must be held by the caller.
fb83fe64
JD
390 */
391static
392void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan)
393{
394 uint64_t discarded = 0, lost = 0;
395 struct ltt_session *session;
396 struct ltt_ust_channel *uchan;
397
398 if (ua_chan->attr.type != LTTNG_UST_CHAN_PER_CPU) {
399 return;
400 }
401
402 rcu_read_lock();
403 session = session_find_by_id(ua_chan->session->tracing_id);
d68ec974
JG
404 if (!session || !session->ust_session) {
405 /*
406 * Not finding the session is not an error because there are
407 * multiple ways the channels can be torn down.
408 *
409 * 1) The session daemon can initiate the destruction of the
410 * ust app session after receiving a destroy command or
411 * during its shutdown/teardown.
412 * 2) The application, since we are in per-pid tracing, is
413 * unregistering and tearing down its ust app session.
414 *
415 * Both paths are protected by the session list lock which
416 * ensures that the accounting of lost packets and discarded
417 * events is done exactly once. The session is then unpublished
418 * from the session list, resulting in this condition.
419 */
fb83fe64
JD
420 goto end;
421 }
422
423 if (ua_chan->attr.overwrite) {
424 consumer_get_lost_packets(ua_chan->session->tracing_id,
425 ua_chan->key, session->ust_session->consumer,
426 &lost);
427 } else {
428 consumer_get_discarded_events(ua_chan->session->tracing_id,
429 ua_chan->key, session->ust_session->consumer,
430 &discarded);
431 }
432 uchan = trace_ust_find_channel_by_name(
433 session->ust_session->domain_global.channels,
434 ua_chan->name);
435 if (!uchan) {
436 ERR("Missing UST channel to store discarded counters");
437 goto end;
438 }
439
440 uchan->per_pid_closed_app_discarded += discarded;
441 uchan->per_pid_closed_app_lost += lost;
442
443end:
444 rcu_read_unlock();
48a86f68
JG
445 if (session) {
446 session_put(session);
447 }
fb83fe64
JD
448}
449
d80a6244
DG
450/*
451 * Delete ust app channel safely. RCU read lock must be held before calling
452 * this function.
82cac6d2
JG
453 *
454 * The session list lock must be held by the caller.
d80a6244 455 */
8b366481 456static
d0b96690
DG
457void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
458 struct ust_app *app)
d80a6244
DG
459{
460 int ret;
bec39940 461 struct lttng_ht_iter iter;
d80a6244 462 struct ust_app_event *ua_event;
55cc08a6 463 struct ust_app_ctx *ua_ctx;
030a66fa 464 struct ust_app_stream *stream, *stmp;
7972aab2 465 struct ust_registry_session *registry;
d80a6244 466
ffe60014
DG
467 assert(ua_chan);
468
469 DBG3("UST app deleting channel %s", ua_chan->name);
470
55cc08a6 471 /* Wipe stream */
d80a6244 472 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
84cd17c6 473 cds_list_del(&stream->list);
fb45065e 474 delete_ust_app_stream(sock, stream, app);
d80a6244
DG
475 }
476
55cc08a6 477 /* Wipe context */
bec39940 478 cds_lfht_for_each_entry(ua_chan->ctx->ht, &iter.iter, ua_ctx, node.node) {
31746f93 479 cds_list_del(&ua_ctx->list);
bec39940 480 ret = lttng_ht_del(ua_chan->ctx, &iter);
55cc08a6 481 assert(!ret);
fb45065e 482 delete_ust_app_ctx(sock, ua_ctx, app);
55cc08a6 483 }
d80a6244 484
55cc08a6 485 /* Wipe events */
bec39940
DG
486 cds_lfht_for_each_entry(ua_chan->events->ht, &iter.iter, ua_event,
487 node.node) {
488 ret = lttng_ht_del(ua_chan->events, &iter);
525b0740 489 assert(!ret);
fb45065e 490 delete_ust_app_event(sock, ua_event, app);
d80a6244 491 }
edb67388 492
c8335706
MD
493 if (ua_chan->session->buffer_type == LTTNG_BUFFER_PER_PID) {
494 /* Wipe and free registry from session registry. */
495 registry = get_session_registry(ua_chan->session);
496 if (registry) {
e9404c27 497 ust_registry_channel_del_free(registry, ua_chan->key,
8a5340bc
MD
498 sock >= 0);
499 }
c1cd9f61
JG
500 /*
501 * A negative socket can be used by the caller when
502 * cleaning-up a ua_chan in an error path. Skip the
503 * accounting in this case.
504 */
8a5340bc
MD
505 if (sock >= 0) {
506 save_per_pid_lost_discarded_counters(ua_chan);
c8335706 507 }
7972aab2 508 }
d0b96690 509
edb67388 510 if (ua_chan->obj != NULL) {
d0b96690
DG
511 /* Remove channel from application UST object descriptor. */
512 iter.iter.node = &ua_chan->ust_objd_node.node;
c6e62271
DG
513 ret = lttng_ht_del(app->ust_objd, &iter);
514 assert(!ret);
fb45065e 515 pthread_mutex_lock(&app->sock_lock);
ffe60014 516 ret = ustctl_release_object(sock, ua_chan->obj);
fb45065e 517 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
518 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
519 ERR("UST app sock %d release channel obj failed with ret %d",
520 sock, ret);
521 }
7972aab2 522 lttng_fd_put(LTTNG_FD_APPS, 1);
edb67388
DG
523 free(ua_chan->obj);
524 }
36b588ed 525 call_rcu(&ua_chan->rcu_head, delete_ust_app_channel_rcu);
d80a6244
DG
526}
527
fb45065e
MD
528int ust_app_register_done(struct ust_app *app)
529{
530 int ret;
531
532 pthread_mutex_lock(&app->sock_lock);
533 ret = ustctl_register_done(app->sock);
534 pthread_mutex_unlock(&app->sock_lock);
535 return ret;
536}
537
538int ust_app_release_object(struct ust_app *app, struct lttng_ust_object_data *data)
539{
540 int ret, sock;
541
542 if (app) {
543 pthread_mutex_lock(&app->sock_lock);
544 sock = app->sock;
545 } else {
546 sock = -1;
547 }
548 ret = ustctl_release_object(sock, data);
549 if (app) {
550 pthread_mutex_unlock(&app->sock_lock);
551 }
552 return ret;
553}
554
331744e3 555/*
1b532a60
DG
556 * Push metadata to consumer socket.
557 *
dc2bbdae
MD
558 * RCU read-side lock must be held to guarantee existance of socket.
559 * Must be called with the ust app session lock held.
560 * Must be called with the registry lock held.
331744e3
JD
561 *
562 * On success, return the len of metadata pushed or else a negative value.
2c57e06d
MD
563 * Returning a -EPIPE return value means we could not send the metadata,
564 * but it can be caused by recoverable errors (e.g. the application has
565 * terminated concurrently).
331744e3
JD
566 */
567ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
568 struct consumer_socket *socket, int send_zero_data)
569{
570 int ret;
571 char *metadata_str = NULL;
c585821b 572 size_t len, offset, new_metadata_len_sent;
331744e3 573 ssize_t ret_val;
93ec662e 574 uint64_t metadata_key, metadata_version;
331744e3
JD
575
576 assert(registry);
577 assert(socket);
1b532a60 578
c585821b
MD
579 metadata_key = registry->metadata_key;
580
ce34fcd0 581 /*
dc2bbdae
MD
582 * Means that no metadata was assigned to the session. This can
583 * happens if no start has been done previously.
ce34fcd0 584 */
c585821b 585 if (!metadata_key) {
ce34fcd0
MD
586 return 0;
587 }
588
331744e3
JD
589 offset = registry->metadata_len_sent;
590 len = registry->metadata_len - registry->metadata_len_sent;
c585821b 591 new_metadata_len_sent = registry->metadata_len;
93ec662e 592 metadata_version = registry->metadata_version;
331744e3
JD
593 if (len == 0) {
594 DBG3("No metadata to push for metadata key %" PRIu64,
595 registry->metadata_key);
596 ret_val = len;
597 if (send_zero_data) {
598 DBG("No metadata to push");
599 goto push_data;
600 }
601 goto end;
602 }
603
604 /* Allocate only what we have to send. */
605 metadata_str = zmalloc(len);
606 if (!metadata_str) {
607 PERROR("zmalloc ust app metadata string");
608 ret_val = -ENOMEM;
609 goto error;
610 }
c585821b 611 /* Copy what we haven't sent out. */
331744e3 612 memcpy(metadata_str, registry->metadata + offset, len);
331744e3
JD
613
614push_data:
c585821b
MD
615 pthread_mutex_unlock(&registry->lock);
616 /*
617 * We need to unlock the registry while we push metadata to
618 * break a circular dependency between the consumerd metadata
619 * lock and the sessiond registry lock. Indeed, pushing metadata
620 * to the consumerd awaits that it gets pushed all the way to
621 * relayd, but doing so requires grabbing the metadata lock. If
622 * a concurrent metadata request is being performed by
623 * consumerd, this can try to grab the registry lock on the
624 * sessiond while holding the metadata lock on the consumer
625 * daemon. Those push and pull schemes are performed on two
626 * different bidirectionnal communication sockets.
627 */
628 ret = consumer_push_metadata(socket, metadata_key,
93ec662e 629 metadata_str, len, offset, metadata_version);
c585821b 630 pthread_mutex_lock(&registry->lock);
331744e3 631 if (ret < 0) {
000baf6a 632 /*
dc2bbdae
MD
633 * There is an acceptable race here between the registry
634 * metadata key assignment and the creation on the
635 * consumer. The session daemon can concurrently push
636 * metadata for this registry while being created on the
637 * consumer since the metadata key of the registry is
638 * assigned *before* it is setup to avoid the consumer
639 * to ask for metadata that could possibly be not found
640 * in the session daemon.
000baf6a 641 *
dc2bbdae
MD
642 * The metadata will get pushed either by the session
643 * being stopped or the consumer requesting metadata if
644 * that race is triggered.
000baf6a
DG
645 */
646 if (ret == -LTTCOMM_CONSUMERD_CHANNEL_FAIL) {
647 ret = 0;
c585821b
MD
648 } else {
649 ERR("Error pushing metadata to consumer");
000baf6a 650 }
331744e3
JD
651 ret_val = ret;
652 goto error_push;
c585821b
MD
653 } else {
654 /*
655 * Metadata may have been concurrently pushed, since
656 * we're not holding the registry lock while pushing to
657 * consumer. This is handled by the fact that we send
658 * the metadata content, size, and the offset at which
659 * that metadata belongs. This may arrive out of order
660 * on the consumer side, and the consumer is able to
661 * deal with overlapping fragments. The consumer
662 * supports overlapping fragments, which must be
663 * contiguous starting from offset 0. We keep the
664 * largest metadata_len_sent value of the concurrent
665 * send.
666 */
667 registry->metadata_len_sent =
668 max_t(size_t, registry->metadata_len_sent,
669 new_metadata_len_sent);
331744e3 670 }
331744e3
JD
671 free(metadata_str);
672 return len;
673
674end:
675error:
ce34fcd0
MD
676 if (ret_val) {
677 /*
dc2bbdae
MD
678 * On error, flag the registry that the metadata is
679 * closed. We were unable to push anything and this
680 * means that either the consumer is not responding or
681 * the metadata cache has been destroyed on the
682 * consumer.
ce34fcd0
MD
683 */
684 registry->metadata_closed = 1;
685 }
331744e3
JD
686error_push:
687 free(metadata_str);
688 return ret_val;
689}
690
d88aee68 691/*
ce34fcd0 692 * For a given application and session, push metadata to consumer.
331744e3
JD
693 * Either sock or consumer is required : if sock is NULL, the default
694 * socket to send the metadata is retrieved from consumer, if sock
695 * is not NULL we use it to send the metadata.
ce34fcd0 696 * RCU read-side lock must be held while calling this function,
dc2bbdae
MD
697 * therefore ensuring existance of registry. It also ensures existance
698 * of socket throughout this function.
d88aee68
DG
699 *
700 * Return 0 on success else a negative error.
2c57e06d
MD
701 * Returning a -EPIPE return value means we could not send the metadata,
702 * but it can be caused by recoverable errors (e.g. the application has
703 * terminated concurrently).
d88aee68 704 */
7972aab2
DG
705static int push_metadata(struct ust_registry_session *registry,
706 struct consumer_output *consumer)
d88aee68 707{
331744e3
JD
708 int ret_val;
709 ssize_t ret;
d88aee68
DG
710 struct consumer_socket *socket;
711
7972aab2
DG
712 assert(registry);
713 assert(consumer);
714
ce34fcd0 715 pthread_mutex_lock(&registry->lock);
ce34fcd0 716 if (registry->metadata_closed) {
dc2bbdae
MD
717 ret_val = -EPIPE;
718 goto error;
d88aee68
DG
719 }
720
d88aee68 721 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
722 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
723 consumer);
d88aee68 724 if (!socket) {
331744e3 725 ret_val = -1;
ce34fcd0 726 goto error;
d88aee68
DG
727 }
728
331744e3 729 ret = ust_app_push_metadata(registry, socket, 0);
d88aee68 730 if (ret < 0) {
331744e3 731 ret_val = ret;
ce34fcd0 732 goto error;
d88aee68 733 }
dc2bbdae 734 pthread_mutex_unlock(&registry->lock);
d88aee68
DG
735 return 0;
736
ce34fcd0 737error:
dc2bbdae 738 pthread_mutex_unlock(&registry->lock);
331744e3 739 return ret_val;
d88aee68
DG
740}
741
742/*
743 * Send to the consumer a close metadata command for the given session. Once
744 * done, the metadata channel is deleted and the session metadata pointer is
dc2bbdae 745 * nullified. The session lock MUST be held unless the application is
d88aee68
DG
746 * in the destroy path.
747 *
7ca90d1f
MD
748 * Do not hold the registry lock while communicating with the consumerd, because
749 * doing so causes inter-process deadlocks between consumerd and sessiond with
750 * the metadata request notification.
751 *
d88aee68
DG
752 * Return 0 on success else a negative value.
753 */
7972aab2
DG
754static int close_metadata(struct ust_registry_session *registry,
755 struct consumer_output *consumer)
d88aee68
DG
756{
757 int ret;
758 struct consumer_socket *socket;
7ca90d1f
MD
759 uint64_t metadata_key;
760 bool registry_was_already_closed;
d88aee68 761
7972aab2
DG
762 assert(registry);
763 assert(consumer);
d88aee68 764
7972aab2
DG
765 rcu_read_lock();
766
ce34fcd0 767 pthread_mutex_lock(&registry->lock);
7ca90d1f
MD
768 metadata_key = registry->metadata_key;
769 registry_was_already_closed = registry->metadata_closed;
770 if (metadata_key != 0) {
771 /*
772 * Metadata closed. Even on error this means that the consumer
773 * is not responding or not found so either way a second close
774 * should NOT be emit for this registry.
775 */
776 registry->metadata_closed = 1;
777 }
778 pthread_mutex_unlock(&registry->lock);
ce34fcd0 779
7ca90d1f 780 if (metadata_key == 0 || registry_was_already_closed) {
d88aee68 781 ret = 0;
1b532a60 782 goto end;
d88aee68
DG
783 }
784
d88aee68 785 /* Get consumer socket to use to push the metadata.*/
7972aab2
DG
786 socket = consumer_find_socket_by_bitness(registry->bits_per_long,
787 consumer);
d88aee68
DG
788 if (!socket) {
789 ret = -1;
7ca90d1f 790 goto end;
d88aee68
DG
791 }
792
7ca90d1f 793 ret = consumer_close_metadata(socket, metadata_key);
d88aee68 794 if (ret < 0) {
7ca90d1f 795 goto end;
d88aee68
DG
796 }
797
1b532a60 798end:
7972aab2 799 rcu_read_unlock();
d88aee68
DG
800 return ret;
801}
802
36b588ed
MD
803/*
804 * We need to execute ht_destroy outside of RCU read-side critical
0b2dc8df
MD
805 * section and outside of call_rcu thread, so we postpone its execution
806 * using ht_cleanup_push. It is simpler than to change the semantic of
807 * the many callers of delete_ust_app_session().
36b588ed
MD
808 */
809static
810void delete_ust_app_session_rcu(struct rcu_head *head)
811{
812 struct ust_app_session *ua_sess =
813 caa_container_of(head, struct ust_app_session, rcu_head);
814
0b2dc8df 815 ht_cleanup_push(ua_sess->channels);
36b588ed
MD
816 free(ua_sess);
817}
818
d80a6244
DG
819/*
820 * Delete ust app session safely. RCU read lock must be held before calling
821 * this function.
82cac6d2
JG
822 *
823 * The session list lock must be held by the caller.
d80a6244 824 */
8b366481 825static
d0b96690
DG
826void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
827 struct ust_app *app)
d80a6244
DG
828{
829 int ret;
bec39940 830 struct lttng_ht_iter iter;
d80a6244 831 struct ust_app_channel *ua_chan;
7972aab2 832 struct ust_registry_session *registry;
d80a6244 833
d88aee68
DG
834 assert(ua_sess);
835
1b532a60
DG
836 pthread_mutex_lock(&ua_sess->lock);
837
b161602a
MD
838 assert(!ua_sess->deleted);
839 ua_sess->deleted = true;
840
7972aab2 841 registry = get_session_registry(ua_sess);
fad1ed2f 842 /* Registry can be null on error path during initialization. */
ce34fcd0 843 if (registry) {
d88aee68 844 /* Push metadata for application before freeing the application. */
7972aab2 845 (void) push_metadata(registry, ua_sess->consumer);
d88aee68 846
7972aab2
DG
847 /*
848 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
849 * metadata only on destroy trace session in this case. Also, the
850 * previous push metadata could have flag the metadata registry to
851 * close so don't send a close command if closed.
7972aab2 852 */
ce34fcd0 853 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
854 /* And ask to close it for this session registry. */
855 (void) close_metadata(registry, ua_sess->consumer);
856 }
d80a6244
DG
857 }
858
bec39940
DG
859 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
860 node.node) {
861 ret = lttng_ht_del(ua_sess->channels, &iter);
525b0740 862 assert(!ret);
d0b96690 863 delete_ust_app_channel(sock, ua_chan, app);
d80a6244 864 }
d80a6244 865
7972aab2
DG
866 /* In case of per PID, the registry is kept in the session. */
867 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
868 struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
869 if (reg_pid) {
fad1ed2f
JR
870 /*
871 * Registry can be null on error path during
872 * initialization.
873 */
7972aab2
DG
874 buffer_reg_pid_remove(reg_pid);
875 buffer_reg_pid_destroy(reg_pid);
876 }
877 }
d0b96690 878
aee6bafd 879 if (ua_sess->handle != -1) {
fb45065e 880 pthread_mutex_lock(&app->sock_lock);
ffe60014 881 ret = ustctl_release_handle(sock, ua_sess->handle);
fb45065e 882 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
883 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
884 ERR("UST app sock %d release session handle failed with ret %d",
885 sock, ret);
886 }
10b56aef
MD
887 /* Remove session from application UST object descriptor. */
888 iter.iter.node = &ua_sess->ust_objd_node.node;
889 ret = lttng_ht_del(app->ust_sessions_objd, &iter);
890 assert(!ret);
aee6bafd 891 }
10b56aef 892
1b532a60
DG
893 pthread_mutex_unlock(&ua_sess->lock);
894
6addfa37
MD
895 consumer_output_put(ua_sess->consumer);
896
36b588ed 897 call_rcu(&ua_sess->rcu_head, delete_ust_app_session_rcu);
d80a6244 898}
91d76f53
DG
899
900/*
284d8f55
DG
901 * Delete a traceable application structure from the global list. Never call
902 * this function outside of a call_rcu call.
36b588ed
MD
903 *
904 * RCU read side lock should _NOT_ be held when calling this function.
91d76f53 905 */
8b366481
DG
906static
907void delete_ust_app(struct ust_app *app)
91d76f53 908{
8b366481 909 int ret, sock;
d42f20df 910 struct ust_app_session *ua_sess, *tmp_ua_sess;
44d3bd01 911
82cac6d2
JG
912 /*
913 * The session list lock must be held during this function to guarantee
914 * the existence of ua_sess.
915 */
916 session_lock_list();
d80a6244 917 /* Delete ust app sessions info */
852d0037
DG
918 sock = app->sock;
919 app->sock = -1;
d80a6244 920
8b366481 921 /* Wipe sessions */
d42f20df
DG
922 cds_list_for_each_entry_safe(ua_sess, tmp_ua_sess, &app->teardown_head,
923 teardown_node) {
924 /* Free every object in the session and the session. */
36b588ed 925 rcu_read_lock();
d0b96690 926 delete_ust_app_session(sock, ua_sess, app);
36b588ed 927 rcu_read_unlock();
d80a6244 928 }
36b588ed 929
0b2dc8df 930 ht_cleanup_push(app->sessions);
10b56aef 931 ht_cleanup_push(app->ust_sessions_objd);
0b2dc8df 932 ht_cleanup_push(app->ust_objd);
d80a6244 933
6414a713 934 /*
852d0037
DG
935 * Wait until we have deleted the application from the sock hash table
936 * before closing this socket, otherwise an application could re-use the
937 * socket ID and race with the teardown, using the same hash table entry.
938 *
939 * It's OK to leave the close in call_rcu. We want it to stay unique for
940 * all RCU readers that could run concurrently with unregister app,
941 * therefore we _need_ to only close that socket after a grace period. So
942 * it should stay in this RCU callback.
943 *
944 * This close() is a very important step of the synchronization model so
945 * every modification to this function must be carefully reviewed.
6414a713 946 */
799e2c4f
MD
947 ret = close(sock);
948 if (ret) {
949 PERROR("close");
950 }
4063050c 951 lttng_fd_put(LTTNG_FD_APPS, 1);
d80a6244 952
852d0037 953 DBG2("UST app pid %d deleted", app->pid);
284d8f55 954 free(app);
82cac6d2 955 session_unlock_list();
099e26bd
DG
956}
957
958/*
f6a9efaa 959 * URCU intermediate call to delete an UST app.
099e26bd 960 */
8b366481
DG
961static
962void delete_ust_app_rcu(struct rcu_head *head)
099e26bd 963{
bec39940
DG
964 struct lttng_ht_node_ulong *node =
965 caa_container_of(head, struct lttng_ht_node_ulong, head);
f6a9efaa 966 struct ust_app *app =
852d0037 967 caa_container_of(node, struct ust_app, pid_n);
f6a9efaa 968
852d0037 969 DBG3("Call RCU deleting app PID %d", app->pid);
f6a9efaa 970 delete_ust_app(app);
099e26bd
DG
971}
972
ffe60014
DG
973/*
974 * Delete the session from the application ht and delete the data structure by
975 * freeing every object inside and releasing them.
82cac6d2
JG
976 *
977 * The session list lock must be held by the caller.
ffe60014 978 */
d0b96690 979static void destroy_app_session(struct ust_app *app,
ffe60014
DG
980 struct ust_app_session *ua_sess)
981{
982 int ret;
983 struct lttng_ht_iter iter;
984
985 assert(app);
986 assert(ua_sess);
987
988 iter.iter.node = &ua_sess->node.node;
989 ret = lttng_ht_del(app->sessions, &iter);
990 if (ret) {
991 /* Already scheduled for teardown. */
992 goto end;
993 }
994
995 /* Once deleted, free the data structure. */
d0b96690 996 delete_ust_app_session(app->sock, ua_sess, app);
ffe60014
DG
997
998end:
999 return;
1000}
1001
8b366481
DG
1002/*
1003 * Alloc new UST app session.
1004 */
1005static
40bbd087 1006struct ust_app_session *alloc_ust_app_session(void)
8b366481
DG
1007{
1008 struct ust_app_session *ua_sess;
1009
1010 /* Init most of the default value by allocating and zeroing */
1011 ua_sess = zmalloc(sizeof(struct ust_app_session));
1012 if (ua_sess == NULL) {
1013 PERROR("malloc");
ffe60014 1014 goto error_free;
8b366481
DG
1015 }
1016
1017 ua_sess->handle = -1;
bec39940 1018 ua_sess->channels = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
ad7a9107 1019 ua_sess->metadata_attr.type = LTTNG_UST_CHAN_METADATA;
84ad93e8 1020 pthread_mutex_init(&ua_sess->lock, NULL);
ad7a9107 1021
8b366481
DG
1022 return ua_sess;
1023
ffe60014 1024error_free:
8b366481
DG
1025 return NULL;
1026}
1027
1028/*
1029 * Alloc new UST app channel.
1030 */
1031static
1032struct ust_app_channel *alloc_ust_app_channel(char *name,
d0b96690 1033 struct ust_app_session *ua_sess,
ffe60014 1034 struct lttng_ust_channel_attr *attr)
8b366481
DG
1035{
1036 struct ust_app_channel *ua_chan;
1037
1038 /* Init most of the default value by allocating and zeroing */
1039 ua_chan = zmalloc(sizeof(struct ust_app_channel));
1040 if (ua_chan == NULL) {
1041 PERROR("malloc");
1042 goto error;
1043 }
1044
1045 /* Setup channel name */
1046 strncpy(ua_chan->name, name, sizeof(ua_chan->name));
1047 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
1048
1049 ua_chan->enabled = 1;
1050 ua_chan->handle = -1;
45893984 1051 ua_chan->session = ua_sess;
ffe60014 1052 ua_chan->key = get_next_channel_key();
bec39940
DG
1053 ua_chan->ctx = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
1054 ua_chan->events = lttng_ht_new(0, LTTNG_HT_TYPE_STRING);
1055 lttng_ht_node_init_str(&ua_chan->node, ua_chan->name);
8b366481
DG
1056
1057 CDS_INIT_LIST_HEAD(&ua_chan->streams.head);
31746f93 1058 CDS_INIT_LIST_HEAD(&ua_chan->ctx_list);
8b366481
DG
1059
1060 /* Copy attributes */
1061 if (attr) {
ffe60014 1062 /* Translate from lttng_ust_channel to ustctl_consumer_channel_attr. */
2fe6e7f5
DG
1063 ua_chan->attr.subbuf_size = attr->subbuf_size;
1064 ua_chan->attr.num_subbuf = attr->num_subbuf;
1065 ua_chan->attr.overwrite = attr->overwrite;
1066 ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
1067 ua_chan->attr.read_timer_interval = attr->read_timer_interval;
1068 ua_chan->attr.output = attr->output;
491d1539 1069 ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
8b366481 1070 }
ffe60014
DG
1071 /* By default, the channel is a per cpu channel. */
1072 ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
8b366481
DG
1073
1074 DBG3("UST app channel %s allocated", ua_chan->name);
1075
1076 return ua_chan;
1077
1078error:
1079 return NULL;
1080}
1081
37f1c236
DG
1082/*
1083 * Allocate and initialize a UST app stream.
1084 *
1085 * Return newly allocated stream pointer or NULL on error.
1086 */
ffe60014 1087struct ust_app_stream *ust_app_alloc_stream(void)
37f1c236
DG
1088{
1089 struct ust_app_stream *stream = NULL;
1090
1091 stream = zmalloc(sizeof(*stream));
1092 if (stream == NULL) {
1093 PERROR("zmalloc ust app stream");
1094 goto error;
1095 }
1096
1097 /* Zero could be a valid value for a handle so flag it to -1. */
1098 stream->handle = -1;
1099
1100error:
1101 return stream;
1102}
1103
8b366481
DG
1104/*
1105 * Alloc new UST app event.
1106 */
1107static
1108struct ust_app_event *alloc_ust_app_event(char *name,
1109 struct lttng_ust_event *attr)
1110{
1111 struct ust_app_event *ua_event;
1112
1113 /* Init most of the default value by allocating and zeroing */
1114 ua_event = zmalloc(sizeof(struct ust_app_event));
1115 if (ua_event == NULL) {
48a0b866 1116 PERROR("Failed to allocate ust_app_event structure");
8b366481
DG
1117 goto error;
1118 }
1119
1120 ua_event->enabled = 1;
1121 strncpy(ua_event->name, name, sizeof(ua_event->name));
1122 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
bec39940 1123 lttng_ht_node_init_str(&ua_event->node, ua_event->name);
8b366481
DG
1124
1125 /* Copy attributes */
1126 if (attr) {
1127 memcpy(&ua_event->attr, attr, sizeof(ua_event->attr));
1128 }
1129
1130 DBG3("UST app event %s allocated", ua_event->name);
1131
1132 return ua_event;
1133
1134error:
1135 return NULL;
1136}
1137
1138/*
1139 * Alloc new UST app context.
1140 */
1141static
bdf64013 1142struct ust_app_ctx *alloc_ust_app_ctx(struct lttng_ust_context_attr *uctx)
8b366481
DG
1143{
1144 struct ust_app_ctx *ua_ctx;
1145
1146 ua_ctx = zmalloc(sizeof(struct ust_app_ctx));
1147 if (ua_ctx == NULL) {
1148 goto error;
1149 }
1150
31746f93
DG
1151 CDS_INIT_LIST_HEAD(&ua_ctx->list);
1152
8b366481
DG
1153 if (uctx) {
1154 memcpy(&ua_ctx->ctx, uctx, sizeof(ua_ctx->ctx));
bdf64013
JG
1155 if (uctx->ctx == LTTNG_UST_CONTEXT_APP_CONTEXT) {
1156 char *provider_name = NULL, *ctx_name = NULL;
1157
1158 provider_name = strdup(uctx->u.app_ctx.provider_name);
1159 ctx_name = strdup(uctx->u.app_ctx.ctx_name);
1160 if (!provider_name || !ctx_name) {
1161 free(provider_name);
1162 free(ctx_name);
1163 goto error;
1164 }
1165
1166 ua_ctx->ctx.u.app_ctx.provider_name = provider_name;
1167 ua_ctx->ctx.u.app_ctx.ctx_name = ctx_name;
1168 }
8b366481
DG
1169 }
1170
1171 DBG3("UST app context %d allocated", ua_ctx->ctx.ctx);
8b366481 1172 return ua_ctx;
bdf64013
JG
1173error:
1174 free(ua_ctx);
1175 return NULL;
8b366481
DG
1176}
1177
025faf73
DG
1178/*
1179 * Allocate a filter and copy the given original filter.
1180 *
1181 * Return allocated filter or NULL on error.
1182 */
51755dc8
JG
1183static struct lttng_filter_bytecode *copy_filter_bytecode(
1184 struct lttng_filter_bytecode *orig_f)
025faf73 1185{
51755dc8 1186 struct lttng_filter_bytecode *filter = NULL;
025faf73
DG
1187
1188 /* Copy filter bytecode */
1189 filter = zmalloc(sizeof(*filter) + orig_f->len);
1190 if (!filter) {
51755dc8 1191 PERROR("zmalloc alloc filter bytecode");
025faf73
DG
1192 goto error;
1193 }
1194
1195 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1196
1197error:
1198 return filter;
1199}
1200
51755dc8
JG
1201/*
1202 * Create a liblttng-ust filter bytecode from given bytecode.
1203 *
1204 * Return allocated filter or NULL on error.
1205 */
1206static struct lttng_ust_filter_bytecode *create_ust_bytecode_from_bytecode(
1207 struct lttng_filter_bytecode *orig_f)
1208{
1209 struct lttng_ust_filter_bytecode *filter = NULL;
1210
1211 /* Copy filter bytecode */
1212 filter = zmalloc(sizeof(*filter) + orig_f->len);
1213 if (!filter) {
1214 PERROR("zmalloc alloc ust filter bytecode");
1215 goto error;
1216 }
1217
1218 assert(sizeof(struct lttng_filter_bytecode) ==
1219 sizeof(struct lttng_ust_filter_bytecode));
1220 memcpy(filter, orig_f, sizeof(*filter) + orig_f->len);
1221error:
1222 return filter;
1223}
1224
099e26bd 1225/*
421cb601
DG
1226 * Find an ust_app using the sock and return it. RCU read side lock must be
1227 * held before calling this helper function.
099e26bd 1228 */
f20baf8e 1229struct ust_app *ust_app_find_by_sock(int sock)
099e26bd 1230{
bec39940 1231 struct lttng_ht_node_ulong *node;
bec39940 1232 struct lttng_ht_iter iter;
f6a9efaa 1233
852d0037 1234 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &iter);
bec39940 1235 node = lttng_ht_iter_get_node_ulong(&iter);
f6a9efaa
DG
1236 if (node == NULL) {
1237 DBG2("UST app find by sock %d not found", sock);
f6a9efaa
DG
1238 goto error;
1239 }
852d0037
DG
1240
1241 return caa_container_of(node, struct ust_app, sock_n);
f6a9efaa
DG
1242
1243error:
1244 return NULL;
099e26bd
DG
1245}
1246
d0b96690
DG
1247/*
1248 * Find an ust_app using the notify sock and return it. RCU read side lock must
1249 * be held before calling this helper function.
1250 */
1251static struct ust_app *find_app_by_notify_sock(int sock)
1252{
1253 struct lttng_ht_node_ulong *node;
1254 struct lttng_ht_iter iter;
1255
1256 lttng_ht_lookup(ust_app_ht_by_notify_sock, (void *)((unsigned long) sock),
1257 &iter);
1258 node = lttng_ht_iter_get_node_ulong(&iter);
1259 if (node == NULL) {
1260 DBG2("UST app find by notify sock %d not found", sock);
1261 goto error;
1262 }
1263
1264 return caa_container_of(node, struct ust_app, notify_sock_n);
1265
1266error:
1267 return NULL;
1268}
1269
025faf73
DG
1270/*
1271 * Lookup for an ust app event based on event name, filter bytecode and the
1272 * event loglevel.
1273 *
1274 * Return an ust_app_event object or NULL on error.
1275 */
18eace3b 1276static struct ust_app_event *find_ust_app_event(struct lttng_ht *ht,
0ce9aa93 1277 const char *name, const struct lttng_filter_bytecode *filter,
2106efa0 1278 int loglevel_value,
39c5a3a7 1279 const struct lttng_event_exclusion *exclusion)
18eace3b
DG
1280{
1281 struct lttng_ht_iter iter;
1282 struct lttng_ht_node_str *node;
1283 struct ust_app_event *event = NULL;
1284 struct ust_app_ht_key key;
18eace3b
DG
1285
1286 assert(name);
1287 assert(ht);
1288
1289 /* Setup key for event lookup. */
1290 key.name = name;
1291 key.filter = filter;
2106efa0 1292 key.loglevel_type = loglevel_value;
39c5a3a7 1293 /* lttng_event_exclusion and lttng_ust_event_exclusion structures are similar */
51755dc8 1294 key.exclusion = exclusion;
18eace3b 1295
025faf73
DG
1296 /* Lookup using the event name as hash and a custom match fct. */
1297 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) name, lttng_ht_seed),
1298 ht_match_ust_app_event, &key, &iter.iter);
18eace3b
DG
1299 node = lttng_ht_iter_get_node_str(&iter);
1300 if (node == NULL) {
1301 goto end;
1302 }
1303
1304 event = caa_container_of(node, struct ust_app_event, node);
1305
1306end:
18eace3b
DG
1307 return event;
1308}
1309
55cc08a6
DG
1310/*
1311 * Create the channel context on the tracer.
d0b96690
DG
1312 *
1313 * Called with UST app session lock held.
55cc08a6
DG
1314 */
1315static
1316int create_ust_channel_context(struct ust_app_channel *ua_chan,
1317 struct ust_app_ctx *ua_ctx, struct ust_app *app)
1318{
1319 int ret;
1320
840cb59c 1321 health_code_update();
86acf0da 1322
fb45065e 1323 pthread_mutex_lock(&app->sock_lock);
852d0037 1324 ret = ustctl_add_context(app->sock, &ua_ctx->ctx,
55cc08a6 1325 ua_chan->obj, &ua_ctx->obj);
fb45065e 1326 pthread_mutex_unlock(&app->sock_lock);
55cc08a6 1327 if (ret < 0) {
ffe60014
DG
1328 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1329 ERR("UST app create channel context failed for app (pid: %d) "
1330 "with ret %d", app->pid, ret);
1331 } else {
3757b385
DG
1332 /*
1333 * This is normal behavior, an application can die during the
1334 * creation process. Don't report an error so the execution can
1335 * continue normally.
1336 */
1337 ret = 0;
0ce9aa93 1338 DBG3("UST app add context failed. Application is dead.");
ffe60014 1339 }
55cc08a6
DG
1340 goto error;
1341 }
1342
1343 ua_ctx->handle = ua_ctx->obj->handle;
1344
d0b96690
DG
1345 DBG2("UST app context handle %d created successfully for channel %s",
1346 ua_ctx->handle, ua_chan->name);
55cc08a6
DG
1347
1348error:
840cb59c 1349 health_code_update();
55cc08a6
DG
1350 return ret;
1351}
1352
53a80697
MD
1353/*
1354 * Set the filter on the tracer.
1355 */
1356static
1357int set_ust_event_filter(struct ust_app_event *ua_event,
1358 struct ust_app *app)
1359{
1360 int ret;
51755dc8 1361 struct lttng_ust_filter_bytecode *ust_bytecode = NULL;
53a80697 1362
840cb59c 1363 health_code_update();
86acf0da 1364
53a80697 1365 if (!ua_event->filter) {
86acf0da
DG
1366 ret = 0;
1367 goto error;
53a80697
MD
1368 }
1369
51755dc8
JG
1370 ust_bytecode = create_ust_bytecode_from_bytecode(ua_event->filter);
1371 if (!ust_bytecode) {
1372 ret = -LTTNG_ERR_NOMEM;
1373 goto error;
1374 }
fb45065e 1375 pthread_mutex_lock(&app->sock_lock);
51755dc8 1376 ret = ustctl_set_filter(app->sock, ust_bytecode,
53a80697 1377 ua_event->obj);
fb45065e 1378 pthread_mutex_unlock(&app->sock_lock);
53a80697 1379 if (ret < 0) {
ffe60014
DG
1380 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1381 ERR("UST app event %s filter failed for app (pid: %d) "
1382 "with ret %d", ua_event->attr.name, app->pid, ret);
1383 } else {
3757b385
DG
1384 /*
1385 * This is normal behavior, an application can die during the
1386 * creation process. Don't report an error so the execution can
1387 * continue normally.
1388 */
1389 ret = 0;
ffe60014
DG
1390 DBG3("UST app filter event failed. Application is dead.");
1391 }
53a80697
MD
1392 goto error;
1393 }
1394
1395 DBG2("UST filter set successfully for event %s", ua_event->name);
1396
1397error:
840cb59c 1398 health_code_update();
51755dc8 1399 free(ust_bytecode);
53a80697
MD
1400 return ret;
1401}
1402
51755dc8
JG
1403static
1404struct lttng_ust_event_exclusion *create_ust_exclusion_from_exclusion(
1405 struct lttng_event_exclusion *exclusion)
1406{
1407 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
1408 size_t exclusion_alloc_size = sizeof(struct lttng_ust_event_exclusion) +
1409 LTTNG_UST_SYM_NAME_LEN * exclusion->count;
1410
1411 ust_exclusion = zmalloc(exclusion_alloc_size);
1412 if (!ust_exclusion) {
1413 PERROR("malloc");
1414 goto end;
1415 }
1416
1417 assert(sizeof(struct lttng_event_exclusion) ==
1418 sizeof(struct lttng_ust_event_exclusion));
1419 memcpy(ust_exclusion, exclusion, exclusion_alloc_size);
1420end:
1421 return ust_exclusion;
1422}
1423
7cc9a73c
JI
1424/*
1425 * Set event exclusions on the tracer.
1426 */
1427static
1428int set_ust_event_exclusion(struct ust_app_event *ua_event,
1429 struct ust_app *app)
1430{
1431 int ret;
51755dc8 1432 struct lttng_ust_event_exclusion *ust_exclusion = NULL;
7cc9a73c
JI
1433
1434 health_code_update();
1435
1436 if (!ua_event->exclusion || !ua_event->exclusion->count) {
1437 ret = 0;
1438 goto error;
1439 }
1440
51755dc8
JG
1441 ust_exclusion = create_ust_exclusion_from_exclusion(
1442 ua_event->exclusion);
1443 if (!ust_exclusion) {
1444 ret = -LTTNG_ERR_NOMEM;
1445 goto error;
1446 }
fb45065e 1447 pthread_mutex_lock(&app->sock_lock);
51755dc8 1448 ret = ustctl_set_exclusion(app->sock, ust_exclusion, ua_event->obj);
fb45065e 1449 pthread_mutex_unlock(&app->sock_lock);
7cc9a73c
JI
1450 if (ret < 0) {
1451 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1452 ERR("UST app event %s exclusions failed for app (pid: %d) "
1453 "with ret %d", ua_event->attr.name, app->pid, ret);
1454 } else {
1455 /*
1456 * This is normal behavior, an application can die during the
1457 * creation process. Don't report an error so the execution can
1458 * continue normally.
1459 */
1460 ret = 0;
1461 DBG3("UST app event exclusion failed. Application is dead.");
1462 }
1463 goto error;
1464 }
1465
1466 DBG2("UST exclusion set successfully for event %s", ua_event->name);
1467
1468error:
1469 health_code_update();
51755dc8 1470 free(ust_exclusion);
7cc9a73c
JI
1471 return ret;
1472}
1473
9730260e
DG
1474/*
1475 * Disable the specified event on to UST tracer for the UST session.
1476 */
1477static int disable_ust_event(struct ust_app *app,
1478 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1479{
1480 int ret;
1481
840cb59c 1482 health_code_update();
86acf0da 1483
fb45065e 1484 pthread_mutex_lock(&app->sock_lock);
852d0037 1485 ret = ustctl_disable(app->sock, ua_event->obj);
fb45065e 1486 pthread_mutex_unlock(&app->sock_lock);
9730260e 1487 if (ret < 0) {
ffe60014
DG
1488 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1489 ERR("UST app event %s disable failed for app (pid: %d) "
1490 "and session handle %d with ret %d",
1491 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1492 } else {
3757b385
DG
1493 /*
1494 * This is normal behavior, an application can die during the
1495 * creation process. Don't report an error so the execution can
1496 * continue normally.
1497 */
1498 ret = 0;
ffe60014
DG
1499 DBG3("UST app disable event failed. Application is dead.");
1500 }
9730260e
DG
1501 goto error;
1502 }
1503
1504 DBG2("UST app event %s disabled successfully for app (pid: %d)",
852d0037 1505 ua_event->attr.name, app->pid);
9730260e
DG
1506
1507error:
840cb59c 1508 health_code_update();
9730260e
DG
1509 return ret;
1510}
1511
78f0bacd
DG
1512/*
1513 * Disable the specified channel on to UST tracer for the UST session.
1514 */
1515static int disable_ust_channel(struct ust_app *app,
1516 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1517{
1518 int ret;
1519
840cb59c 1520 health_code_update();
86acf0da 1521
fb45065e 1522 pthread_mutex_lock(&app->sock_lock);
852d0037 1523 ret = ustctl_disable(app->sock, ua_chan->obj);
fb45065e 1524 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1525 if (ret < 0) {
ffe60014
DG
1526 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1527 ERR("UST app channel %s disable failed for app (pid: %d) "
1528 "and session handle %d with ret %d",
1529 ua_chan->name, app->pid, ua_sess->handle, ret);
1530 } else {
3757b385
DG
1531 /*
1532 * This is normal behavior, an application can die during the
1533 * creation process. Don't report an error so the execution can
1534 * continue normally.
1535 */
1536 ret = 0;
ffe60014
DG
1537 DBG3("UST app disable channel failed. Application is dead.");
1538 }
78f0bacd
DG
1539 goto error;
1540 }
1541
78f0bacd 1542 DBG2("UST app channel %s disabled successfully for app (pid: %d)",
852d0037 1543 ua_chan->name, app->pid);
78f0bacd
DG
1544
1545error:
840cb59c 1546 health_code_update();
78f0bacd
DG
1547 return ret;
1548}
1549
1550/*
1551 * Enable the specified channel on to UST tracer for the UST session.
1552 */
1553static int enable_ust_channel(struct ust_app *app,
1554 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
1555{
1556 int ret;
1557
840cb59c 1558 health_code_update();
86acf0da 1559
fb45065e 1560 pthread_mutex_lock(&app->sock_lock);
852d0037 1561 ret = ustctl_enable(app->sock, ua_chan->obj);
fb45065e 1562 pthread_mutex_unlock(&app->sock_lock);
78f0bacd 1563 if (ret < 0) {
ffe60014
DG
1564 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1565 ERR("UST app channel %s enable failed for app (pid: %d) "
1566 "and session handle %d with ret %d",
1567 ua_chan->name, app->pid, ua_sess->handle, ret);
1568 } else {
3757b385
DG
1569 /*
1570 * This is normal behavior, an application can die during the
1571 * creation process. Don't report an error so the execution can
1572 * continue normally.
1573 */
1574 ret = 0;
ffe60014
DG
1575 DBG3("UST app enable channel failed. Application is dead.");
1576 }
78f0bacd
DG
1577 goto error;
1578 }
1579
1580 ua_chan->enabled = 1;
1581
1582 DBG2("UST app channel %s enabled successfully for app (pid: %d)",
852d0037 1583 ua_chan->name, app->pid);
78f0bacd
DG
1584
1585error:
840cb59c 1586 health_code_update();
78f0bacd
DG
1587 return ret;
1588}
1589
edb67388
DG
1590/*
1591 * Enable the specified event on to UST tracer for the UST session.
1592 */
1593static int enable_ust_event(struct ust_app *app,
1594 struct ust_app_session *ua_sess, struct ust_app_event *ua_event)
1595{
1596 int ret;
1597
840cb59c 1598 health_code_update();
86acf0da 1599
fb45065e 1600 pthread_mutex_lock(&app->sock_lock);
852d0037 1601 ret = ustctl_enable(app->sock, ua_event->obj);
fb45065e 1602 pthread_mutex_unlock(&app->sock_lock);
edb67388 1603 if (ret < 0) {
ffe60014
DG
1604 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
1605 ERR("UST app event %s enable failed for app (pid: %d) "
1606 "and session handle %d with ret %d",
1607 ua_event->attr.name, app->pid, ua_sess->handle, ret);
1608 } else {
3757b385
DG
1609 /*
1610 * This is normal behavior, an application can die during the
1611 * creation process. Don't report an error so the execution can
1612 * continue normally.
1613 */
1614 ret = 0;
ffe60014
DG
1615 DBG3("UST app enable event failed. Application is dead.");
1616 }
edb67388
DG
1617 goto error;
1618 }
1619
1620 DBG2("UST app event %s enabled successfully for app (pid: %d)",
852d0037 1621 ua_event->attr.name, app->pid);
edb67388
DG
1622
1623error:
840cb59c 1624 health_code_update();
edb67388
DG
1625 return ret;
1626}
1627
099e26bd 1628/*
7972aab2 1629 * Send channel and stream buffer to application.
4f3ab6ee 1630 *
ffe60014 1631 * Return 0 on success. On error, a negative value is returned.
4f3ab6ee 1632 */
7972aab2
DG
1633static int send_channel_pid_to_ust(struct ust_app *app,
1634 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan)
4f3ab6ee
DG
1635{
1636 int ret;
ffe60014 1637 struct ust_app_stream *stream, *stmp;
4f3ab6ee
DG
1638
1639 assert(app);
ffe60014 1640 assert(ua_sess);
4f3ab6ee 1641 assert(ua_chan);
4f3ab6ee 1642
840cb59c 1643 health_code_update();
4f3ab6ee 1644
7972aab2
DG
1645 DBG("UST app sending channel %s to UST app sock %d", ua_chan->name,
1646 app->sock);
86acf0da 1647
ffe60014
DG
1648 /* Send channel to the application. */
1649 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
1650 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1651 ret = -ENOTCONN; /* Caused by app exiting. */
1652 goto error;
1653 } else if (ret < 0) {
b551a063
DG
1654 goto error;
1655 }
1656
d88aee68
DG
1657 health_code_update();
1658
ffe60014
DG
1659 /* Send all streams to application. */
1660 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
1661 ret = ust_consumer_send_stream_to_ust(app, ua_chan, stream);
a7169585
MD
1662 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
1663 ret = -ENOTCONN; /* Caused by app exiting. */
1664 goto error;
1665 } else if (ret < 0) {
ffe60014
DG
1666 goto error;
1667 }
1668 /* We don't need the stream anymore once sent to the tracer. */
1669 cds_list_del(&stream->list);
fb45065e 1670 delete_ust_app_stream(-1, stream, app);
ffe60014 1671 }
ffe60014
DG
1672 /* Flag the channel that it is sent to the application. */
1673 ua_chan->is_sent = 1;
ffe60014 1674
b551a063 1675error:
840cb59c 1676 health_code_update();
b551a063
DG
1677 return ret;
1678}
1679
91d76f53 1680/*
5b4a0ec0 1681 * Create the specified event onto the UST tracer for a UST session.
d0b96690
DG
1682 *
1683 * Should be called with session mutex held.
91d76f53 1684 */
edb67388
DG
1685static
1686int create_ust_event(struct ust_app *app, struct ust_app_session *ua_sess,
1687 struct ust_app_channel *ua_chan, struct ust_app_event *ua_event)
91d76f53 1688{
5b4a0ec0 1689 int ret = 0;
284d8f55 1690
840cb59c 1691 health_code_update();
86acf0da 1692
5b4a0ec0 1693 /* Create UST event on tracer */
fb45065e 1694 pthread_mutex_lock(&app->sock_lock);
852d0037 1695 ret = ustctl_create_event(app->sock, &ua_event->attr, ua_chan->obj,
5b4a0ec0 1696 &ua_event->obj);
fb45065e 1697 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0 1698 if (ret < 0) {
ffe60014 1699 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
0ce9aa93 1700 abort();
ffe60014
DG
1701 ERR("Error ustctl create event %s for app pid: %d with ret %d",
1702 ua_event->attr.name, app->pid, ret);
1703 } else {
3757b385
DG
1704 /*
1705 * This is normal behavior, an application can die during the
1706 * creation process. Don't report an error so the execution can
1707 * continue normally.
1708 */
1709 ret = 0;
ffe60014
DG
1710 DBG3("UST app create event failed. Application is dead.");
1711 }
5b4a0ec0 1712 goto error;
91d76f53 1713 }
f6a9efaa 1714
5b4a0ec0 1715 ua_event->handle = ua_event->obj->handle;
284d8f55 1716
5b4a0ec0 1717 DBG2("UST app event %s created successfully for pid:%d",
852d0037 1718 ua_event->attr.name, app->pid);
f6a9efaa 1719
840cb59c 1720 health_code_update();
86acf0da 1721
025faf73
DG
1722 /* Set filter if one is present. */
1723 if (ua_event->filter) {
1724 ret = set_ust_event_filter(ua_event, app);
1725 if (ret < 0) {
1726 goto error;
1727 }
1728 }
1729
7cc9a73c
JI
1730 /* Set exclusions for the event */
1731 if (ua_event->exclusion) {
1732 ret = set_ust_event_exclusion(ua_event, app);
1733 if (ret < 0) {
1734 goto error;
1735 }
1736 }
1737
8535a6d9 1738 /* If event not enabled, disable it on the tracer */
40113787
MD
1739 if (ua_event->enabled) {
1740 /*
1741 * We now need to explicitly enable the event, since it
1742 * is now disabled at creation.
1743 */
1744 ret = enable_ust_event(app, ua_sess, ua_event);
1745 if (ret < 0) {
1746 /*
1747 * If we hit an EPERM, something is wrong with our enable call. If
1748 * we get an EEXIST, there is a problem on the tracer side since we
1749 * just created it.
1750 */
1751 switch (ret) {
1752 case -LTTNG_UST_ERR_PERM:
1753 /* Code flow problem */
1754 assert(0);
1755 case -LTTNG_UST_ERR_EXIST:
1756 /* It's OK for our use case. */
1757 ret = 0;
1758 break;
1759 default:
1760 break;
1761 }
1762 goto error;
1763 }
8535a6d9
DG
1764 }
1765
5b4a0ec0 1766error:
840cb59c 1767 health_code_update();
5b4a0ec0 1768 return ret;
91d76f53 1769}
48842b30 1770
5b4a0ec0
DG
1771/*
1772 * Copy data between an UST app event and a LTT event.
1773 */
421cb601 1774static void shadow_copy_event(struct ust_app_event *ua_event,
48842b30
DG
1775 struct ltt_ust_event *uevent)
1776{
b4ffad32
JI
1777 size_t exclusion_alloc_size;
1778
48842b30
DG
1779 strncpy(ua_event->name, uevent->attr.name, sizeof(ua_event->name));
1780 ua_event->name[sizeof(ua_event->name) - 1] = '\0';
1781
fc34caaa
DG
1782 ua_event->enabled = uevent->enabled;
1783
5b4a0ec0
DG
1784 /* Copy event attributes */
1785 memcpy(&ua_event->attr, &uevent->attr, sizeof(ua_event->attr));
1786
53a80697
MD
1787 /* Copy filter bytecode */
1788 if (uevent->filter) {
51755dc8 1789 ua_event->filter = copy_filter_bytecode(uevent->filter);
025faf73 1790 /* Filter might be NULL here in case of ENONEM. */
53a80697 1791 }
b4ffad32
JI
1792
1793 /* Copy exclusion data */
1794 if (uevent->exclusion) {
51755dc8 1795 exclusion_alloc_size = sizeof(struct lttng_event_exclusion) +
b4ffad32
JI
1796 LTTNG_UST_SYM_NAME_LEN * uevent->exclusion->count;
1797 ua_event->exclusion = zmalloc(exclusion_alloc_size);
5f8df26c
JI
1798 if (ua_event->exclusion == NULL) {
1799 PERROR("malloc");
1800 } else {
1801 memcpy(ua_event->exclusion, uevent->exclusion,
1802 exclusion_alloc_size);
b4ffad32
JI
1803 }
1804 }
48842b30
DG
1805}
1806
5b4a0ec0
DG
1807/*
1808 * Copy data between an UST app channel and a LTT channel.
1809 */
421cb601 1810static void shadow_copy_channel(struct ust_app_channel *ua_chan,
48842b30
DG
1811 struct ltt_ust_channel *uchan)
1812{
fc34caaa 1813 DBG2("UST app shadow copy of channel %s started", ua_chan->name);
48842b30
DG
1814
1815 strncpy(ua_chan->name, uchan->name, sizeof(ua_chan->name));
1816 ua_chan->name[sizeof(ua_chan->name) - 1] = '\0';
ffe60014 1817
1624d5b7
JD
1818 ua_chan->tracefile_size = uchan->tracefile_size;
1819 ua_chan->tracefile_count = uchan->tracefile_count;
1820
ffe60014
DG
1821 /* Copy event attributes since the layout is different. */
1822 ua_chan->attr.subbuf_size = uchan->attr.subbuf_size;
1823 ua_chan->attr.num_subbuf = uchan->attr.num_subbuf;
1824 ua_chan->attr.overwrite = uchan->attr.overwrite;
1825 ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
1826 ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
e9404c27 1827 ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
ffe60014 1828 ua_chan->attr.output = uchan->attr.output;
491d1539
MD
1829 ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
1830
ffe60014
DG
1831 /*
1832 * Note that the attribute channel type is not set since the channel on the
1833 * tracing registry side does not have this information.
1834 */
48842b30 1835
fc34caaa 1836 ua_chan->enabled = uchan->enabled;
7972aab2 1837 ua_chan->tracing_channel_id = uchan->id;
fc34caaa 1838
fc34caaa 1839 DBG3("UST app shadow copy of channel %s done", ua_chan->name);
48842b30
DG
1840}
1841
5b4a0ec0
DG
1842/*
1843 * Copy data between a UST app session and a regular LTT session.
1844 */
421cb601 1845static void shadow_copy_session(struct ust_app_session *ua_sess,
bec39940 1846 struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1847{
477d7741
MD
1848 struct tm *timeinfo;
1849 char datetime[16];
1850 int ret;
d7ba1388 1851 char tmp_shm_path[PATH_MAX];
477d7741 1852
cd82d919 1853 timeinfo = localtime(&app->registration_time);
477d7741 1854 strftime(datetime, sizeof(datetime), "%Y%m%d-%H%M%S", timeinfo);
48842b30 1855
421cb601 1856 DBG2("Shadow copy of session handle %d", ua_sess->handle);
48842b30 1857
7972aab2
DG
1858 ua_sess->tracing_id = usess->id;
1859 ua_sess->id = get_next_session_id();
c51311d6
JG
1860 ua_sess->real_credentials.uid = app->uid;
1861 ua_sess->real_credentials.gid = app->gid;
1862 ua_sess->effective_credentials.uid = usess->uid;
1863 ua_sess->effective_credentials.gid = usess->gid;
7972aab2
DG
1864 ua_sess->buffer_type = usess->buffer_type;
1865 ua_sess->bits_per_long = app->bits_per_long;
6addfa37 1866
7972aab2 1867 /* There is only one consumer object per session possible. */
6addfa37 1868 consumer_output_get(usess->consumer);
7972aab2 1869 ua_sess->consumer = usess->consumer;
6addfa37 1870
2bba9e53 1871 ua_sess->output_traces = usess->output_traces;
ecc48a90 1872 ua_sess->live_timer_interval = usess->live_timer_interval;
84ad93e8
DG
1873 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
1874 &usess->metadata_attr);
7972aab2
DG
1875
1876 switch (ua_sess->buffer_type) {
1877 case LTTNG_BUFFER_PER_PID:
1878 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
dec56f6c 1879 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s", app->name, app->pid,
7972aab2
DG
1880 datetime);
1881 break;
1882 case LTTNG_BUFFER_PER_UID:
1883 ret = snprintf(ua_sess->path, sizeof(ua_sess->path),
c51311d6
JG
1884 DEFAULT_UST_TRACE_UID_PATH,
1885 ua_sess->real_credentials.uid,
1886 app->bits_per_long);
7972aab2
DG
1887 break;
1888 default:
1889 assert(0);
1890 goto error;
1891 }
477d7741
MD
1892 if (ret < 0) {
1893 PERROR("asprintf UST shadow copy session");
477d7741 1894 assert(0);
7972aab2 1895 goto error;
477d7741
MD
1896 }
1897
3d071855
MD
1898 strncpy(ua_sess->root_shm_path, usess->root_shm_path,
1899 sizeof(ua_sess->root_shm_path));
1900 ua_sess->root_shm_path[sizeof(ua_sess->root_shm_path) - 1] = '\0';
d7ba1388
MD
1901 strncpy(ua_sess->shm_path, usess->shm_path,
1902 sizeof(ua_sess->shm_path));
1903 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1904 if (ua_sess->shm_path[0]) {
1905 switch (ua_sess->buffer_type) {
1906 case LTTNG_BUFFER_PER_PID:
1907 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1908 DEFAULT_UST_TRACE_PID_PATH "/%s-%d-%s",
1909 app->name, app->pid, datetime);
1910 break;
1911 case LTTNG_BUFFER_PER_UID:
1912 ret = snprintf(tmp_shm_path, sizeof(tmp_shm_path),
1913 DEFAULT_UST_TRACE_UID_PATH,
1914 app->uid, app->bits_per_long);
1915 break;
1916 default:
1917 assert(0);
1918 goto error;
1919 }
1920 if (ret < 0) {
1921 PERROR("sprintf UST shadow copy session");
1922 assert(0);
1923 goto error;
1924 }
1925 strncat(ua_sess->shm_path, tmp_shm_path,
1926 sizeof(ua_sess->shm_path) - strlen(ua_sess->shm_path) - 1);
1927 ua_sess->shm_path[sizeof(ua_sess->shm_path) - 1] = '\0';
1928 }
6addfa37 1929 return;
7972aab2
DG
1930
1931error:
6addfa37 1932 consumer_output_put(ua_sess->consumer);
48842b30
DG
1933}
1934
78f0bacd
DG
1935/*
1936 * Lookup sesison wrapper.
1937 */
84cd17c6 1938static
65ff8ea3 1939void __lookup_session_by_app(const struct ltt_ust_session *usess,
bec39940 1940 struct ust_app *app, struct lttng_ht_iter *iter)
84cd17c6
MD
1941{
1942 /* Get right UST app session from app */
d9bf3ca4 1943 lttng_ht_lookup(app->sessions, &usess->id, iter);
84cd17c6
MD
1944}
1945
421cb601
DG
1946/*
1947 * Return ust app session from the app session hashtable using the UST session
a991f516 1948 * id.
421cb601 1949 */
48842b30 1950static struct ust_app_session *lookup_session_by_app(
65ff8ea3 1951 const struct ltt_ust_session *usess, struct ust_app *app)
48842b30 1952{
bec39940 1953 struct lttng_ht_iter iter;
d9bf3ca4 1954 struct lttng_ht_node_u64 *node;
48842b30 1955
84cd17c6 1956 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 1957 node = lttng_ht_iter_get_node_u64(&iter);
48842b30
DG
1958 if (node == NULL) {
1959 goto error;
1960 }
1961
1962 return caa_container_of(node, struct ust_app_session, node);
1963
1964error:
1965 return NULL;
1966}
1967
7972aab2
DG
1968/*
1969 * Setup buffer registry per PID for the given session and application. If none
1970 * is found, a new one is created, added to the global registry and
1971 * initialized. If regp is valid, it's set with the newly created object.
1972 *
1973 * Return 0 on success or else a negative value.
1974 */
1975static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
1976 struct ust_app *app, struct buffer_reg_pid **regp)
1977{
1978 int ret = 0;
1979 struct buffer_reg_pid *reg_pid;
1980
1981 assert(ua_sess);
1982 assert(app);
1983
1984 rcu_read_lock();
1985
1986 reg_pid = buffer_reg_pid_find(ua_sess->id);
1987 if (!reg_pid) {
1988 /*
1989 * This is the create channel path meaning that if there is NO
1990 * registry available, we have to create one for this session.
1991 */
d7ba1388 1992 ret = buffer_reg_pid_create(ua_sess->id, &reg_pid,
3d071855 1993 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
1994 if (ret < 0) {
1995 goto error;
1996 }
7972aab2
DG
1997 } else {
1998 goto end;
1999 }
2000
2001 /* Initialize registry. */
2002 ret = ust_registry_session_init(&reg_pid->registry->reg.ust, app,
2003 app->bits_per_long, app->uint8_t_alignment,
2004 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf 2005 app->uint64_t_alignment, app->long_alignment,
c51311d6
JG
2006 app->byte_order, app->version.major, app->version.minor,
2007 reg_pid->root_shm_path, reg_pid->shm_path,
2008 ua_sess->effective_credentials.uid,
6e78b10e
JR
2009 ua_sess->effective_credentials.gid, ua_sess->tracing_id,
2010 app->uid);
7972aab2 2011 if (ret < 0) {
286c991a
MD
2012 /*
2013 * reg_pid->registry->reg.ust is NULL upon error, so we need to
2014 * destroy the buffer registry, because it is always expected
2015 * that if the buffer registry can be found, its ust registry is
2016 * non-NULL.
2017 */
2018 buffer_reg_pid_destroy(reg_pid);
7972aab2
DG
2019 goto error;
2020 }
2021
286c991a
MD
2022 buffer_reg_pid_add(reg_pid);
2023
7972aab2
DG
2024 DBG3("UST app buffer registry per PID created successfully");
2025
2026end:
2027 if (regp) {
2028 *regp = reg_pid;
2029 }
2030error:
2031 rcu_read_unlock();
2032 return ret;
2033}
2034
2035/*
2036 * Setup buffer registry per UID for the given session and application. If none
2037 * is found, a new one is created, added to the global registry and
2038 * initialized. If regp is valid, it's set with the newly created object.
2039 *
2040 * Return 0 on success or else a negative value.
2041 */
2042static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
d7ba1388 2043 struct ust_app_session *ua_sess,
7972aab2
DG
2044 struct ust_app *app, struct buffer_reg_uid **regp)
2045{
2046 int ret = 0;
2047 struct buffer_reg_uid *reg_uid;
2048
2049 assert(usess);
2050 assert(app);
2051
2052 rcu_read_lock();
2053
2054 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2055 if (!reg_uid) {
2056 /*
2057 * This is the create channel path meaning that if there is NO
2058 * registry available, we have to create one for this session.
2059 */
2060 ret = buffer_reg_uid_create(usess->id, app->bits_per_long, app->uid,
3d071855
MD
2061 LTTNG_DOMAIN_UST, &reg_uid,
2062 ua_sess->root_shm_path, ua_sess->shm_path);
7972aab2
DG
2063 if (ret < 0) {
2064 goto error;
2065 }
7972aab2
DG
2066 } else {
2067 goto end;
2068 }
2069
2070 /* Initialize registry. */
af6142cf 2071 ret = ust_registry_session_init(&reg_uid->registry->reg.ust, NULL,
7972aab2
DG
2072 app->bits_per_long, app->uint8_t_alignment,
2073 app->uint16_t_alignment, app->uint32_t_alignment,
af6142cf
MD
2074 app->uint64_t_alignment, app->long_alignment,
2075 app->byte_order, app->version.major,
3d071855 2076 app->version.minor, reg_uid->root_shm_path,
6e78b10e
JR
2077 reg_uid->shm_path, usess->uid, usess->gid,
2078 ua_sess->tracing_id, app->uid);
7972aab2 2079 if (ret < 0) {
286c991a
MD
2080 /*
2081 * reg_uid->registry->reg.ust is NULL upon error, so we need to
2082 * destroy the buffer registry, because it is always expected
2083 * that if the buffer registry can be found, its ust registry is
2084 * non-NULL.
2085 */
2086 buffer_reg_uid_destroy(reg_uid, NULL);
7972aab2
DG
2087 goto error;
2088 }
2089 /* Add node to teardown list of the session. */
2090 cds_list_add(&reg_uid->lnode, &usess->buffer_reg_uid_list);
2091
286c991a 2092 buffer_reg_uid_add(reg_uid);
7972aab2 2093
286c991a 2094 DBG3("UST app buffer registry per UID created successfully");
7972aab2
DG
2095end:
2096 if (regp) {
2097 *regp = reg_uid;
2098 }
2099error:
2100 rcu_read_unlock();
2101 return ret;
2102}
2103
421cb601 2104/*
3d8ca23b 2105 * Create a session on the tracer side for the given app.
421cb601 2106 *
3d8ca23b
DG
2107 * On success, ua_sess_ptr is populated with the session pointer or else left
2108 * untouched. If the session was created, is_created is set to 1. On error,
2109 * it's left untouched. Note that ua_sess_ptr is mandatory but is_created can
2110 * be NULL.
2111 *
2112 * Returns 0 on success or else a negative code which is either -ENOMEM or
2113 * -ENOTCONN which is the default code if the ustctl_create_session fails.
421cb601 2114 */
03f91eaa 2115static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
3d8ca23b
DG
2116 struct ust_app *app, struct ust_app_session **ua_sess_ptr,
2117 int *is_created)
421cb601 2118{
3d8ca23b 2119 int ret, created = 0;
421cb601
DG
2120 struct ust_app_session *ua_sess;
2121
3d8ca23b
DG
2122 assert(usess);
2123 assert(app);
2124 assert(ua_sess_ptr);
2125
840cb59c 2126 health_code_update();
86acf0da 2127
421cb601
DG
2128 ua_sess = lookup_session_by_app(usess, app);
2129 if (ua_sess == NULL) {
d9bf3ca4 2130 DBG2("UST app pid: %d session id %" PRIu64 " not found, creating it",
852d0037 2131 app->pid, usess->id);
40bbd087 2132 ua_sess = alloc_ust_app_session();
421cb601
DG
2133 if (ua_sess == NULL) {
2134 /* Only malloc can failed so something is really wrong */
3d8ca23b
DG
2135 ret = -ENOMEM;
2136 goto error;
421cb601 2137 }
477d7741 2138 shadow_copy_session(ua_sess, usess, app);
3d8ca23b 2139 created = 1;
421cb601
DG
2140 }
2141
7972aab2
DG
2142 switch (usess->buffer_type) {
2143 case LTTNG_BUFFER_PER_PID:
2144 /* Init local registry. */
2145 ret = setup_buffer_reg_pid(ua_sess, app, NULL);
421cb601 2146 if (ret < 0) {
e64207cf 2147 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2148 goto error;
2149 }
2150 break;
2151 case LTTNG_BUFFER_PER_UID:
2152 /* Look for a global registry. If none exists, create one. */
d7ba1388 2153 ret = setup_buffer_reg_uid(usess, ua_sess, app, NULL);
7972aab2 2154 if (ret < 0) {
e64207cf 2155 delete_ust_app_session(-1, ua_sess, app);
7972aab2
DG
2156 goto error;
2157 }
2158 break;
2159 default:
2160 assert(0);
2161 ret = -EINVAL;
2162 goto error;
2163 }
2164
2165 health_code_update();
2166
2167 if (ua_sess->handle == -1) {
fb45065e 2168 pthread_mutex_lock(&app->sock_lock);
7972aab2 2169 ret = ustctl_create_session(app->sock);
fb45065e 2170 pthread_mutex_unlock(&app->sock_lock);
7972aab2
DG
2171 if (ret < 0) {
2172 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
2173 ERR("Creating session for app pid %d with ret %d",
ffe60014
DG
2174 app->pid, ret);
2175 } else {
2176 DBG("UST app creating session failed. Application is dead");
3757b385
DG
2177 /*
2178 * This is normal behavior, an application can die during the
2179 * creation process. Don't report an error so the execution can
2180 * continue normally. This will get flagged ENOTCONN and the
2181 * caller will handle it.
2182 */
2183 ret = 0;
ffe60014 2184 }
d0b96690 2185 delete_ust_app_session(-1, ua_sess, app);
3d8ca23b
DG
2186 if (ret != -ENOMEM) {
2187 /*
2188 * Tracer is probably gone or got an internal error so let's
2189 * behave like it will soon unregister or not usable.
2190 */
2191 ret = -ENOTCONN;
2192 }
2193 goto error;
421cb601
DG
2194 }
2195
7972aab2
DG
2196 ua_sess->handle = ret;
2197
2198 /* Add ust app session to app's HT */
d9bf3ca4
MD
2199 lttng_ht_node_init_u64(&ua_sess->node,
2200 ua_sess->tracing_id);
2201 lttng_ht_add_unique_u64(app->sessions, &ua_sess->node);
10b56aef
MD
2202 lttng_ht_node_init_ulong(&ua_sess->ust_objd_node, ua_sess->handle);
2203 lttng_ht_add_unique_ulong(app->ust_sessions_objd,
2204 &ua_sess->ust_objd_node);
7972aab2
DG
2205
2206 DBG2("UST app session created successfully with handle %d", ret);
2207 }
2208
2209 *ua_sess_ptr = ua_sess;
2210 if (is_created) {
2211 *is_created = created;
2212 }
2213
2214 /* Everything went well. */
2215 ret = 0;
2216
2217error:
2218 health_code_update();
2219 return ret;
2220}
2221
6a6b2068
JG
2222/*
2223 * Match function for a hash table lookup of ust_app_ctx.
2224 *
2225 * It matches an ust app context based on the context type and, in the case
2226 * of perf counters, their name.
2227 */
2228static int ht_match_ust_app_ctx(struct cds_lfht_node *node, const void *_key)
2229{
2230 struct ust_app_ctx *ctx;
bdf64013 2231 const struct lttng_ust_context_attr *key;
6a6b2068
JG
2232
2233 assert(node);
2234 assert(_key);
2235
2236 ctx = caa_container_of(node, struct ust_app_ctx, node.node);
2237 key = _key;
2238
2239 /* Context type */
2240 if (ctx->ctx.ctx != key->ctx) {
2241 goto no_match;
2242 }
2243
bdf64013
JG
2244 switch(key->ctx) {
2245 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
6a6b2068 2246 if (strncmp(key->u.perf_counter.name,
bdf64013
JG
2247 ctx->ctx.u.perf_counter.name,
2248 sizeof(key->u.perf_counter.name))) {
2249 goto no_match;
2250 }
2251 break;
2252 case LTTNG_UST_CONTEXT_APP_CONTEXT:
2253 if (strcmp(key->u.app_ctx.provider_name,
2254 ctx->ctx.u.app_ctx.provider_name) ||
2255 strcmp(key->u.app_ctx.ctx_name,
2256 ctx->ctx.u.app_ctx.ctx_name)) {
6a6b2068
JG
2257 goto no_match;
2258 }
bdf64013
JG
2259 break;
2260 default:
2261 break;
6a6b2068
JG
2262 }
2263
2264 /* Match. */
2265 return 1;
2266
2267no_match:
2268 return 0;
2269}
2270
2271/*
2272 * Lookup for an ust app context from an lttng_ust_context.
2273 *
be184a0f 2274 * Must be called while holding RCU read side lock.
6a6b2068
JG
2275 * Return an ust_app_ctx object or NULL on error.
2276 */
2277static
2278struct ust_app_ctx *find_ust_app_context(struct lttng_ht *ht,
bdf64013 2279 struct lttng_ust_context_attr *uctx)
6a6b2068
JG
2280{
2281 struct lttng_ht_iter iter;
2282 struct lttng_ht_node_ulong *node;
2283 struct ust_app_ctx *app_ctx = NULL;
2284
2285 assert(uctx);
2286 assert(ht);
2287
2288 /* Lookup using the lttng_ust_context_type and a custom match fct. */
2289 cds_lfht_lookup(ht->ht, ht->hash_fct((void *) uctx->ctx, lttng_ht_seed),
2290 ht_match_ust_app_ctx, uctx, &iter.iter);
2291 node = lttng_ht_iter_get_node_ulong(&iter);
2292 if (!node) {
2293 goto end;
2294 }
2295
2296 app_ctx = caa_container_of(node, struct ust_app_ctx, node);
2297
2298end:
2299 return app_ctx;
2300}
2301
7972aab2
DG
2302/*
2303 * Create a context for the channel on the tracer.
2304 *
2305 * Called with UST app session lock held and a RCU read side lock.
2306 */
2307static
c9edf082 2308int create_ust_app_channel_context(struct ust_app_channel *ua_chan,
bdf64013 2309 struct lttng_ust_context_attr *uctx,
7972aab2
DG
2310 struct ust_app *app)
2311{
2312 int ret = 0;
7972aab2
DG
2313 struct ust_app_ctx *ua_ctx;
2314
2315 DBG2("UST app adding context to channel %s", ua_chan->name);
2316
6a6b2068
JG
2317 ua_ctx = find_ust_app_context(ua_chan->ctx, uctx);
2318 if (ua_ctx) {
7972aab2
DG
2319 ret = -EEXIST;
2320 goto error;
2321 }
2322
2323 ua_ctx = alloc_ust_app_ctx(uctx);
2324 if (ua_ctx == NULL) {
2325 /* malloc failed */
7682f304 2326 ret = -ENOMEM;
7972aab2
DG
2327 goto error;
2328 }
2329
2330 lttng_ht_node_init_ulong(&ua_ctx->node, (unsigned long) ua_ctx->ctx.ctx);
aa3514e9 2331 lttng_ht_add_ulong(ua_chan->ctx, &ua_ctx->node);
31746f93 2332 cds_list_add_tail(&ua_ctx->list, &ua_chan->ctx_list);
7972aab2
DG
2333
2334 ret = create_ust_channel_context(ua_chan, ua_ctx, app);
2335 if (ret < 0) {
2336 goto error;
2337 }
2338
2339error:
2340 return ret;
2341}
2342
2343/*
2344 * Enable on the tracer side a ust app event for the session and channel.
2345 *
2346 * Called with UST app session lock held.
2347 */
2348static
2349int enable_ust_app_event(struct ust_app_session *ua_sess,
2350 struct ust_app_event *ua_event, struct ust_app *app)
2351{
2352 int ret;
2353
2354 ret = enable_ust_event(app, ua_sess, ua_event);
2355 if (ret < 0) {
2356 goto error;
2357 }
2358
2359 ua_event->enabled = 1;
2360
2361error:
2362 return ret;
2363}
2364
2365/*
2366 * Disable on the tracer side a ust app event for the session and channel.
2367 */
2368static int disable_ust_app_event(struct ust_app_session *ua_sess,
2369 struct ust_app_event *ua_event, struct ust_app *app)
2370{
2371 int ret;
2372
2373 ret = disable_ust_event(app, ua_sess, ua_event);
2374 if (ret < 0) {
2375 goto error;
2376 }
2377
2378 ua_event->enabled = 0;
2379
2380error:
2381 return ret;
2382}
2383
2384/*
2385 * Lookup ust app channel for session and disable it on the tracer side.
2386 */
2387static
2388int disable_ust_app_channel(struct ust_app_session *ua_sess,
2389 struct ust_app_channel *ua_chan, struct ust_app *app)
2390{
2391 int ret;
2392
2393 ret = disable_ust_channel(app, ua_sess, ua_chan);
2394 if (ret < 0) {
2395 goto error;
2396 }
2397
2398 ua_chan->enabled = 0;
2399
2400error:
2401 return ret;
2402}
2403
2404/*
2405 * Lookup ust app channel for session and enable it on the tracer side. This
2406 * MUST be called with a RCU read side lock acquired.
2407 */
2408static int enable_ust_app_channel(struct ust_app_session *ua_sess,
2409 struct ltt_ust_channel *uchan, struct ust_app *app)
2410{
2411 int ret = 0;
2412 struct lttng_ht_iter iter;
2413 struct lttng_ht_node_str *ua_chan_node;
2414 struct ust_app_channel *ua_chan;
2415
2416 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
2417 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
2418 if (ua_chan_node == NULL) {
d9bf3ca4 2419 DBG2("Unable to find channel %s in ust session id %" PRIu64,
7972aab2
DG
2420 uchan->name, ua_sess->tracing_id);
2421 goto error;
2422 }
2423
2424 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
2425
2426 ret = enable_ust_channel(app, ua_sess, ua_chan);
2427 if (ret < 0) {
2428 goto error;
2429 }
2430
2431error:
2432 return ret;
2433}
2434
2435/*
2436 * Ask the consumer to create a channel and get it if successful.
2437 *
fad1ed2f
JR
2438 * Called with UST app session lock held.
2439 *
7972aab2
DG
2440 * Return 0 on success or else a negative value.
2441 */
2442static int do_consumer_create_channel(struct ltt_ust_session *usess,
2443 struct ust_app_session *ua_sess, struct ust_app_channel *ua_chan,
e098433c
JG
2444 int bitness, struct ust_registry_session *registry,
2445 uint64_t trace_archive_id)
7972aab2
DG
2446{
2447 int ret;
2448 unsigned int nb_fd = 0;
2449 struct consumer_socket *socket;
2450
2451 assert(usess);
2452 assert(ua_sess);
2453 assert(ua_chan);
2454 assert(registry);
2455
2456 rcu_read_lock();
2457 health_code_update();
2458
2459 /* Get the right consumer socket for the application. */
2460 socket = consumer_find_socket_by_bitness(bitness, usess->consumer);
2461 if (!socket) {
2462 ret = -EINVAL;
2463 goto error;
2464 }
2465
2466 health_code_update();
2467
2468 /* Need one fd for the channel. */
2469 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2470 if (ret < 0) {
2471 ERR("Exhausted number of available FD upon create channel");
2472 goto error;
2473 }
2474
2475 /*
2476 * Ask consumer to create channel. The consumer will return the number of
2477 * stream we have to expect.
2478 */
2479 ret = ust_consumer_ask_channel(ua_sess, ua_chan, usess->consumer, socket,
e5148e25 2480 registry, usess->current_trace_chunk);
7972aab2
DG
2481 if (ret < 0) {
2482 goto error_ask;
2483 }
2484
2485 /*
2486 * Compute the number of fd needed before receiving them. It must be 2 per
2487 * stream (2 being the default value here).
2488 */
2489 nb_fd = DEFAULT_UST_STREAM_FD_NUM * ua_chan->expected_stream_count;
2490
2491 /* Reserve the amount of file descriptor we need. */
2492 ret = lttng_fd_get(LTTNG_FD_APPS, nb_fd);
2493 if (ret < 0) {
2494 ERR("Exhausted number of available FD upon create channel");
2495 goto error_fd_get_stream;
2496 }
2497
2498 health_code_update();
2499
2500 /*
e1ac6bb9 2501 * Now get the channel from the consumer. This call will populate the stream
7972aab2
DG
2502 * list of that channel and set the ust objects.
2503 */
d9078d0c
DG
2504 if (usess->consumer->enabled) {
2505 ret = ust_consumer_get_channel(socket, ua_chan);
2506 if (ret < 0) {
2507 goto error_destroy;
2508 }
7972aab2
DG
2509 }
2510
2511 rcu_read_unlock();
2512 return 0;
2513
2514error_destroy:
2515 lttng_fd_put(LTTNG_FD_APPS, nb_fd);
2516error_fd_get_stream:
2517 /*
2518 * Initiate a destroy channel on the consumer since we had an error
2519 * handling it on our side. The return value is of no importance since we
2520 * already have a ret value set by the previous error that we need to
2521 * return.
2522 */
2523 (void) ust_consumer_destroy_channel(socket, ua_chan);
2524error_ask:
2525 lttng_fd_put(LTTNG_FD_APPS, 1);
2526error:
2527 health_code_update();
2528 rcu_read_unlock();
2529 return ret;
2530}
2531
2532/*
2533 * Duplicate the ust data object of the ust app stream and save it in the
2534 * buffer registry stream.
2535 *
2536 * Return 0 on success or else a negative value.
2537 */
2538static int duplicate_stream_object(struct buffer_reg_stream *reg_stream,
2539 struct ust_app_stream *stream)
2540{
2541 int ret;
2542
2543 assert(reg_stream);
2544 assert(stream);
2545
2546 /* Reserve the amount of file descriptor we need. */
2547 ret = lttng_fd_get(LTTNG_FD_APPS, 2);
2548 if (ret < 0) {
2549 ERR("Exhausted number of available FD upon duplicate stream");
2550 goto error;
2551 }
2552
2553 /* Duplicate object for stream once the original is in the registry. */
2554 ret = ustctl_duplicate_ust_object_data(&stream->obj,
2555 reg_stream->obj.ust);
2556 if (ret < 0) {
2557 ERR("Duplicate stream obj from %p to %p failed with ret %d",
2558 reg_stream->obj.ust, stream->obj, ret);
2559 lttng_fd_put(LTTNG_FD_APPS, 2);
2560 goto error;
2561 }
2562 stream->handle = stream->obj->handle;
2563
2564error:
2565 return ret;
2566}
2567
2568/*
2569 * Duplicate the ust data object of the ust app. channel and save it in the
2570 * buffer registry channel.
2571 *
2572 * Return 0 on success or else a negative value.
2573 */
2574static int duplicate_channel_object(struct buffer_reg_channel *reg_chan,
2575 struct ust_app_channel *ua_chan)
2576{
2577 int ret;
2578
2579 assert(reg_chan);
2580 assert(ua_chan);
2581
2582 /* Need two fds for the channel. */
2583 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
2584 if (ret < 0) {
2585 ERR("Exhausted number of available FD upon duplicate channel");
2586 goto error_fd_get;
2587 }
2588
2589 /* Duplicate object for stream once the original is in the registry. */
2590 ret = ustctl_duplicate_ust_object_data(&ua_chan->obj, reg_chan->obj.ust);
2591 if (ret < 0) {
2592 ERR("Duplicate channel obj from %p to %p failed with ret: %d",
2593 reg_chan->obj.ust, ua_chan->obj, ret);
2594 goto error;
2595 }
2596 ua_chan->handle = ua_chan->obj->handle;
2597
2598 return 0;
2599
2600error:
2601 lttng_fd_put(LTTNG_FD_APPS, 1);
2602error_fd_get:
2603 return ret;
2604}
2605
2606/*
2607 * For a given channel buffer registry, setup all streams of the given ust
2608 * application channel.
2609 *
2610 * Return 0 on success or else a negative value.
2611 */
2612static int setup_buffer_reg_streams(struct buffer_reg_channel *reg_chan,
fb45065e
MD
2613 struct ust_app_channel *ua_chan,
2614 struct ust_app *app)
7972aab2
DG
2615{
2616 int ret = 0;
2617 struct ust_app_stream *stream, *stmp;
2618
2619 assert(reg_chan);
2620 assert(ua_chan);
2621
2622 DBG2("UST app setup buffer registry stream");
2623
2624 /* Send all streams to application. */
2625 cds_list_for_each_entry_safe(stream, stmp, &ua_chan->streams.head, list) {
2626 struct buffer_reg_stream *reg_stream;
2627
2628 ret = buffer_reg_stream_create(&reg_stream);
2629 if (ret < 0) {
2630 goto error;
2631 }
2632
2633 /*
2634 * Keep original pointer and nullify it in the stream so the delete
2635 * stream call does not release the object.
2636 */
2637 reg_stream->obj.ust = stream->obj;
2638 stream->obj = NULL;
2639 buffer_reg_stream_add(reg_stream, reg_chan);
421cb601 2640
7972aab2
DG
2641 /* We don't need the streams anymore. */
2642 cds_list_del(&stream->list);
fb45065e 2643 delete_ust_app_stream(-1, stream, app);
7972aab2 2644 }
421cb601 2645
7972aab2
DG
2646error:
2647 return ret;
2648}
2649
2650/*
2651 * Create a buffer registry channel for the given session registry and
2652 * application channel object. If regp pointer is valid, it's set with the
2653 * created object. Important, the created object is NOT added to the session
2654 * registry hash table.
2655 *
2656 * Return 0 on success else a negative value.
2657 */
2658static int create_buffer_reg_channel(struct buffer_reg_session *reg_sess,
2659 struct ust_app_channel *ua_chan, struct buffer_reg_channel **regp)
2660{
2661 int ret;
2662 struct buffer_reg_channel *reg_chan = NULL;
2663
2664 assert(reg_sess);
2665 assert(ua_chan);
2666
2667 DBG2("UST app creating buffer registry channel for %s", ua_chan->name);
2668
2669 /* Create buffer registry channel. */
2670 ret = buffer_reg_channel_create(ua_chan->tracing_channel_id, &reg_chan);
2671 if (ret < 0) {
2672 goto error_create;
421cb601 2673 }
7972aab2
DG
2674 assert(reg_chan);
2675 reg_chan->consumer_key = ua_chan->key;
8c924c7b 2676 reg_chan->subbuf_size = ua_chan->attr.subbuf_size;
d07ceecd 2677 reg_chan->num_subbuf = ua_chan->attr.num_subbuf;
421cb601 2678
7972aab2
DG
2679 /* Create and add a channel registry to session. */
2680 ret = ust_registry_channel_add(reg_sess->reg.ust,
2681 ua_chan->tracing_channel_id);
2682 if (ret < 0) {
2683 goto error;
d88aee68 2684 }
7972aab2 2685 buffer_reg_channel_add(reg_sess, reg_chan);
d88aee68 2686
7972aab2
DG
2687 if (regp) {
2688 *regp = reg_chan;
3d8ca23b 2689 }
d88aee68 2690
7972aab2 2691 return 0;
3d8ca23b
DG
2692
2693error:
7972aab2
DG
2694 /* Safe because the registry channel object was not added to any HT. */
2695 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2696error_create:
3d8ca23b 2697 return ret;
421cb601
DG
2698}
2699
55cc08a6 2700/*
7972aab2
DG
2701 * Setup buffer registry channel for the given session registry and application
2702 * channel object. If regp pointer is valid, it's set with the created object.
d0b96690 2703 *
7972aab2 2704 * Return 0 on success else a negative value.
55cc08a6 2705 */
7972aab2 2706static int setup_buffer_reg_channel(struct buffer_reg_session *reg_sess,
fb45065e
MD
2707 struct ust_app_channel *ua_chan, struct buffer_reg_channel *reg_chan,
2708 struct ust_app *app)
55cc08a6 2709{
7972aab2 2710 int ret;
55cc08a6 2711
7972aab2
DG
2712 assert(reg_sess);
2713 assert(reg_chan);
2714 assert(ua_chan);
2715 assert(ua_chan->obj);
55cc08a6 2716
7972aab2 2717 DBG2("UST app setup buffer registry channel for %s", ua_chan->name);
55cc08a6 2718
7972aab2 2719 /* Setup all streams for the registry. */
fb45065e 2720 ret = setup_buffer_reg_streams(reg_chan, ua_chan, app);
7972aab2 2721 if (ret < 0) {
55cc08a6
DG
2722 goto error;
2723 }
2724
7972aab2
DG
2725 reg_chan->obj.ust = ua_chan->obj;
2726 ua_chan->obj = NULL;
55cc08a6 2727
7972aab2 2728 return 0;
55cc08a6
DG
2729
2730error:
7972aab2
DG
2731 buffer_reg_channel_remove(reg_sess, reg_chan);
2732 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
55cc08a6
DG
2733 return ret;
2734}
2735
edb67388 2736/*
7972aab2 2737 * Send buffer registry channel to the application.
d0b96690 2738 *
7972aab2 2739 * Return 0 on success else a negative value.
edb67388 2740 */
7972aab2
DG
2741static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
2742 struct ust_app *app, struct ust_app_session *ua_sess,
2743 struct ust_app_channel *ua_chan)
edb67388
DG
2744{
2745 int ret;
7972aab2 2746 struct buffer_reg_stream *reg_stream;
edb67388 2747
7972aab2
DG
2748 assert(reg_chan);
2749 assert(app);
2750 assert(ua_sess);
2751 assert(ua_chan);
2752
2753 DBG("UST app sending buffer registry channel to ust sock %d", app->sock);
2754
2755 ret = duplicate_channel_object(reg_chan, ua_chan);
edb67388
DG
2756 if (ret < 0) {
2757 goto error;
2758 }
2759
7972aab2
DG
2760 /* Send channel to the application. */
2761 ret = ust_consumer_send_channel_to_ust(app, ua_sess, ua_chan);
a7169585
MD
2762 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2763 ret = -ENOTCONN; /* Caused by app exiting. */
2764 goto error;
2765 } else if (ret < 0) {
7972aab2
DG
2766 goto error;
2767 }
2768
2769 health_code_update();
2770
2771 /* Send all streams to application. */
2772 pthread_mutex_lock(&reg_chan->stream_list_lock);
2773 cds_list_for_each_entry(reg_stream, &reg_chan->streams, lnode) {
2774 struct ust_app_stream stream;
2775
2776 ret = duplicate_stream_object(reg_stream, &stream);
2777 if (ret < 0) {
2778 goto error_stream_unlock;
2779 }
2780
2781 ret = ust_consumer_send_stream_to_ust(app, ua_chan, &stream);
2782 if (ret < 0) {
fb45065e 2783 (void) release_ust_app_stream(-1, &stream, app);
a7169585
MD
2784 if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
2785 ret = -ENOTCONN; /* Caused by app exiting. */
a7169585 2786 }
7972aab2
DG
2787 goto error_stream_unlock;
2788 }
edb67388 2789
7972aab2
DG
2790 /*
2791 * The return value is not important here. This function will output an
2792 * error if needed.
2793 */
fb45065e 2794 (void) release_ust_app_stream(-1, &stream, app);
7972aab2
DG
2795 }
2796 ua_chan->is_sent = 1;
2797
2798error_stream_unlock:
2799 pthread_mutex_unlock(&reg_chan->stream_list_lock);
edb67388
DG
2800error:
2801 return ret;
2802}
2803
9730260e 2804/*
7972aab2
DG
2805 * Create and send to the application the created buffers with per UID buffers.
2806 *
9acdc1d6 2807 * This MUST be called with a RCU read side lock acquired.
71e0a100 2808 * The session list lock and the session's lock must be acquired.
9acdc1d6 2809 *
7972aab2 2810 * Return 0 on success else a negative value.
9730260e 2811 */
7972aab2
DG
2812static int create_channel_per_uid(struct ust_app *app,
2813 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2814 struct ust_app_channel *ua_chan)
9730260e
DG
2815{
2816 int ret;
7972aab2
DG
2817 struct buffer_reg_uid *reg_uid;
2818 struct buffer_reg_channel *reg_chan;
48a86f68 2819 struct ltt_session *session = NULL;
e098433c
JG
2820 enum lttng_error_code notification_ret;
2821 struct ust_registry_channel *chan_reg;
9730260e 2822
7972aab2
DG
2823 assert(app);
2824 assert(usess);
2825 assert(ua_sess);
2826 assert(ua_chan);
2827
2828 DBG("UST app creating channel %s with per UID buffers", ua_chan->name);
2829
2830 reg_uid = buffer_reg_uid_find(usess->id, app->bits_per_long, app->uid);
2831 /*
2832 * The session creation handles the creation of this global registry
2833 * object. If none can be find, there is a code flow problem or a
2834 * teardown race.
2835 */
2836 assert(reg_uid);
2837
2838 reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
2839 reg_uid);
2721f7ea
JG
2840 if (reg_chan) {
2841 goto send_channel;
2842 }
7972aab2 2843
2721f7ea
JG
2844 /* Create the buffer registry channel object. */
2845 ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
2846 if (ret < 0) {
2847 ERR("Error creating the UST channel \"%s\" registry instance",
f14256d6 2848 ua_chan->name);
2721f7ea
JG
2849 goto error;
2850 }
f14256d6 2851
e098433c
JG
2852 session = session_find_by_id(ua_sess->tracing_id);
2853 assert(session);
2854 assert(pthread_mutex_trylock(&session->lock));
2855 assert(session_trylock_list());
2856
2721f7ea
JG
2857 /*
2858 * Create the buffers on the consumer side. This call populates the
2859 * ust app channel object with all streams and data object.
2860 */
2861 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 2862 app->bits_per_long, reg_uid->registry->reg.ust,
e5148e25 2863 session->most_recent_chunk_id.value);
2721f7ea
JG
2864 if (ret < 0) {
2865 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2866 ua_chan->name);
7972aab2
DG
2867
2868 /*
2721f7ea
JG
2869 * Let's remove the previously created buffer registry channel so
2870 * it's not visible anymore in the session registry.
7972aab2 2871 */
2721f7ea
JG
2872 ust_registry_channel_del_free(reg_uid->registry->reg.ust,
2873 ua_chan->tracing_channel_id, false);
2874 buffer_reg_channel_remove(reg_uid->registry, reg_chan);
2875 buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
2876 goto error;
7972aab2
DG
2877 }
2878
2721f7ea
JG
2879 /*
2880 * Setup the streams and add it to the session registry.
2881 */
2882 ret = setup_buffer_reg_channel(reg_uid->registry,
2883 ua_chan, reg_chan, app);
2884 if (ret < 0) {
2885 ERR("Error setting up UST channel \"%s\"", ua_chan->name);
2886 goto error;
2887 }
2888
e098433c
JG
2889 /* Notify the notification subsystem of the channel's creation. */
2890 pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
2891 chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
2892 ua_chan->tracing_channel_id);
2893 assert(chan_reg);
2894 chan_reg->consumer_key = ua_chan->key;
2895 chan_reg = NULL;
2896 pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
e9404c27 2897
e098433c
JG
2898 notification_ret = notification_thread_command_add_channel(
2899 notification_thread_handle, session->name,
c51311d6
JG
2900 ua_sess->effective_credentials.uid,
2901 ua_sess->effective_credentials.gid, ua_chan->name,
2902 ua_chan->key, LTTNG_DOMAIN_UST,
e098433c
JG
2903 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
2904 if (notification_ret != LTTNG_OK) {
2905 ret = - (int) notification_ret;
2906 ERR("Failed to add channel to notification thread");
2907 goto error;
e9404c27
JG
2908 }
2909
2721f7ea 2910send_channel:
66ff8e3f
JG
2911 /* Send buffers to the application. */
2912 ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
2913 if (ret < 0) {
2914 if (ret != -ENOTCONN) {
2915 ERR("Error sending channel to application");
2916 }
2917 goto error;
2918 }
2919
9730260e 2920error:
48a86f68
JG
2921 if (session) {
2922 session_put(session);
2923 }
9730260e
DG
2924 return ret;
2925}
2926
78f0bacd 2927/*
7972aab2
DG
2928 * Create and send to the application the created buffers with per PID buffers.
2929 *
fad1ed2f 2930 * Called with UST app session lock held.
71e0a100 2931 * The session list lock and the session's lock must be acquired.
fad1ed2f 2932 *
7972aab2 2933 * Return 0 on success else a negative value.
78f0bacd 2934 */
7972aab2
DG
2935static int create_channel_per_pid(struct ust_app *app,
2936 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
2937 struct ust_app_channel *ua_chan)
78f0bacd 2938{
8535a6d9 2939 int ret;
7972aab2 2940 struct ust_registry_session *registry;
e9404c27 2941 enum lttng_error_code cmd_ret;
48a86f68 2942 struct ltt_session *session = NULL;
e9404c27
JG
2943 uint64_t chan_reg_key;
2944 struct ust_registry_channel *chan_reg;
78f0bacd 2945
7972aab2
DG
2946 assert(app);
2947 assert(usess);
2948 assert(ua_sess);
2949 assert(ua_chan);
2950
2951 DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
2952
2953 rcu_read_lock();
2954
2955 registry = get_session_registry(ua_sess);
fad1ed2f 2956 /* The UST app session lock is held, registry shall not be null. */
7972aab2
DG
2957 assert(registry);
2958
2959 /* Create and add a new channel registry to session. */
2960 ret = ust_registry_channel_add(registry, ua_chan->key);
78f0bacd 2961 if (ret < 0) {
f14256d6
MD
2962 ERR("Error creating the UST channel \"%s\" registry instance",
2963 ua_chan->name);
78f0bacd
DG
2964 goto error;
2965 }
2966
e098433c
JG
2967 session = session_find_by_id(ua_sess->tracing_id);
2968 assert(session);
2969
2970 assert(pthread_mutex_trylock(&session->lock));
2971 assert(session_trylock_list());
2972
7972aab2
DG
2973 /* Create and get channel on the consumer side. */
2974 ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
e098433c 2975 app->bits_per_long, registry,
e5148e25 2976 session->most_recent_chunk_id.value);
7972aab2 2977 if (ret < 0) {
f14256d6
MD
2978 ERR("Error creating UST channel \"%s\" on the consumer daemon",
2979 ua_chan->name);
7f560f1d 2980 goto error_remove_from_registry;
7972aab2
DG
2981 }
2982
2983 ret = send_channel_pid_to_ust(app, ua_sess, ua_chan);
2984 if (ret < 0) {
a7169585
MD
2985 if (ret != -ENOTCONN) {
2986 ERR("Error sending channel to application");
2987 }
7f560f1d 2988 goto error_remove_from_registry;
7972aab2 2989 }
8535a6d9 2990
e9404c27
JG
2991 chan_reg_key = ua_chan->key;
2992 pthread_mutex_lock(&registry->lock);
2993 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
2994 assert(chan_reg);
2995 chan_reg->consumer_key = ua_chan->key;
2996 pthread_mutex_unlock(&registry->lock);
2997
2998 cmd_ret = notification_thread_command_add_channel(
2999 notification_thread_handle, session->name,
c51311d6
JG
3000 ua_sess->effective_credentials.uid,
3001 ua_sess->effective_credentials.gid, ua_chan->name,
3002 ua_chan->key, LTTNG_DOMAIN_UST,
e9404c27
JG
3003 ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
3004 if (cmd_ret != LTTNG_OK) {
3005 ret = - (int) cmd_ret;
3006 ERR("Failed to add channel to notification thread");
7f560f1d 3007 goto error_remove_from_registry;
e9404c27
JG
3008 }
3009
7f560f1d
MD
3010error_remove_from_registry:
3011 if (ret) {
3012 ust_registry_channel_del_free(registry, ua_chan->key, false);
3013 }
78f0bacd 3014error:
7972aab2 3015 rcu_read_unlock();
48a86f68
JG
3016 if (session) {
3017 session_put(session);
3018 }
78f0bacd
DG
3019 return ret;
3020}
3021
3022/*
7972aab2 3023 * From an already allocated ust app channel, create the channel buffers if
0ce9aa93 3024 * needed and send them to the application. This MUST be called with a RCU read
7972aab2
DG
3025 * side lock acquired.
3026 *
fad1ed2f
JR
3027 * Called with UST app session lock held.
3028 *
a7169585
MD
3029 * Return 0 on success or else a negative value. Returns -ENOTCONN if
3030 * the application exited concurrently.
78f0bacd 3031 */
0ce9aa93 3032static int ust_app_channel_send(struct ust_app *app,
7972aab2
DG
3033 struct ltt_ust_session *usess, struct ust_app_session *ua_sess,
3034 struct ust_app_channel *ua_chan)
78f0bacd 3035{
7972aab2 3036 int ret;
78f0bacd 3037
7972aab2
DG
3038 assert(app);
3039 assert(usess);
0ce9aa93 3040 assert(usess->active);
7972aab2
DG
3041 assert(ua_sess);
3042 assert(ua_chan);
3043
3044 /* Handle buffer type before sending the channel to the application. */
3045 switch (usess->buffer_type) {
3046 case LTTNG_BUFFER_PER_UID:
3047 {
3048 ret = create_channel_per_uid(app, usess, ua_sess, ua_chan);
3049 if (ret < 0) {
3050 goto error;
3051 }
3052 break;
3053 }
3054 case LTTNG_BUFFER_PER_PID:
3055 {
3056 ret = create_channel_per_pid(app, usess, ua_sess, ua_chan);
3057 if (ret < 0) {
3058 goto error;
3059 }
3060 break;
3061 }
3062 default:
3063 assert(0);
3064 ret = -EINVAL;
78f0bacd
DG
3065 goto error;
3066 }
3067
7972aab2
DG
3068 /* Initialize ust objd object using the received handle and add it. */
3069 lttng_ht_node_init_ulong(&ua_chan->ust_objd_node, ua_chan->handle);
3070 lttng_ht_add_unique_ulong(app->ust_objd, &ua_chan->ust_objd_node);
78f0bacd 3071
7972aab2
DG
3072 /* If channel is not enabled, disable it on the tracer */
3073 if (!ua_chan->enabled) {
3074 ret = disable_ust_channel(app, ua_sess, ua_chan);
3075 if (ret < 0) {
3076 goto error;
3077 }
78f0bacd
DG
3078 }
3079
3080error:
3081 return ret;
3082}
3083
284d8f55 3084/*
0ce9aa93 3085 * Create UST app channel and return it through ua_chanp if not NULL.
d0b96690 3086 *
36b588ed 3087 * Called with UST app session lock and RCU read-side lock held.
7972aab2 3088 *
0ce9aa93 3089 * Return 0 on success or else a negative value.
284d8f55 3090 */
0ce9aa93
JG
3091static int ust_app_channel_allocate(struct ust_app_session *ua_sess,
3092 struct ltt_ust_channel *uchan,
7972aab2 3093 enum lttng_ust_chan_type type, struct ltt_ust_session *usess,
4d710ac2 3094 struct ust_app_channel **ua_chanp)
5b4a0ec0
DG
3095{
3096 int ret = 0;
bec39940
DG
3097 struct lttng_ht_iter iter;
3098 struct lttng_ht_node_str *ua_chan_node;
5b4a0ec0
DG
3099 struct ust_app_channel *ua_chan;
3100
3101 /* Lookup channel in the ust app session */
bec39940
DG
3102 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &iter);
3103 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
fc34caaa 3104 if (ua_chan_node != NULL) {
5b4a0ec0 3105 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
fc34caaa 3106 goto end;
5b4a0ec0
DG
3107 }
3108
d0b96690 3109 ua_chan = alloc_ust_app_channel(uchan->name, ua_sess, &uchan->attr);
fc34caaa
DG
3110 if (ua_chan == NULL) {
3111 /* Only malloc can fail here */
4d710ac2 3112 ret = -ENOMEM;
0ce9aa93 3113 goto error;
fc34caaa
DG
3114 }
3115 shadow_copy_channel(ua_chan, uchan);
3116
ffe60014
DG
3117 /* Set channel type. */
3118 ua_chan->attr.type = type;
3119
d0b96690
DG
3120 /* Only add the channel if successful on the tracer side. */
3121 lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
fc34caaa 3122end:
4d710ac2
DG
3123 if (ua_chanp) {
3124 *ua_chanp = ua_chan;
3125 }
3126
3127 /* Everything went well. */
3128 return 0;
5b4a0ec0
DG
3129
3130error:
4d710ac2 3131 return ret;
5b4a0ec0
DG
3132}
3133
3134/*
3135 * Create UST app event and create it on the tracer side.
d0b96690
DG
3136 *
3137 * Called with ust app session mutex held.
5b4a0ec0 3138 */
edb67388
DG
3139static
3140int create_ust_app_event(struct ust_app_session *ua_sess,
3141 struct ust_app_channel *ua_chan, struct ltt_ust_event *uevent,
3142 struct ust_app *app)
284d8f55 3143{
edb67388 3144 int ret = 0;
5b4a0ec0 3145 struct ust_app_event *ua_event;
284d8f55 3146
edb67388
DG
3147 ua_event = alloc_ust_app_event(uevent->attr.name, &uevent->attr);
3148 if (ua_event == NULL) {
48a0b866 3149 /* Only failure mode of alloc_ust_app_event(). */
edb67388 3150 ret = -ENOMEM;
fc34caaa 3151 goto end;
5b4a0ec0 3152 }
edb67388 3153 shadow_copy_event(ua_event, uevent);
5b4a0ec0 3154
edb67388 3155 /* Create it on the tracer side */
5b4a0ec0 3156 ret = create_ust_event(app, ua_sess, ua_chan, ua_event);
284d8f55 3157 if (ret < 0) {
3177a074
JG
3158 /*
3159 * Not found previously means that it does not exist on the
3160 * tracer. If the application reports that the event existed,
3161 * it means there is a bug in the sessiond or lttng-ust
3162 * (or corruption, etc.)
3163 */
3164 if (ret == -LTTNG_UST_ERR_EXIST) {
3165 ERR("Tracer for application reported that an event being created already existed: "
3166 "event_name = \"%s\", pid = %d, ppid = %d, uid = %d, gid = %d",
3167 uevent->attr.name,
3168 app->pid, app->ppid, app->uid,
3169 app->gid);
3170 }
284d8f55
DG
3171 goto error;
3172 }
3173
d0b96690 3174 add_unique_ust_app_event(ua_chan, ua_event);
284d8f55 3175
fc34caaa 3176 DBG2("UST app create event %s for PID %d completed", ua_event->name,
852d0037 3177 app->pid);
7f79d3a1 3178
edb67388 3179end:
fc34caaa
DG
3180 return ret;
3181
5b4a0ec0 3182error:
fc34caaa 3183 /* Valid. Calling here is already in a read side lock */
fb45065e 3184 delete_ust_app_event(-1, ua_event, app);
edb67388 3185 return ret;
5b4a0ec0
DG
3186}
3187
3188/*
3189 * Create UST metadata and open it on the tracer side.
d0b96690 3190 *
7972aab2 3191 * Called with UST app session lock held and RCU read side lock.
5b4a0ec0
DG
3192 */
3193static int create_ust_app_metadata(struct ust_app_session *ua_sess,
ad7a9107 3194 struct ust_app *app, struct consumer_output *consumer)
5b4a0ec0
DG
3195{
3196 int ret = 0;
ffe60014 3197 struct ust_app_channel *metadata;
d88aee68 3198 struct consumer_socket *socket;
7972aab2 3199 struct ust_registry_session *registry;
48a86f68 3200 struct ltt_session *session = NULL;
5b4a0ec0 3201
ffe60014
DG
3202 assert(ua_sess);
3203 assert(app);
d88aee68 3204 assert(consumer);
5b4a0ec0 3205
7972aab2 3206 registry = get_session_registry(ua_sess);
fad1ed2f 3207 /* The UST app session is held registry shall not be null. */
7972aab2
DG
3208 assert(registry);
3209
ce34fcd0
MD
3210 pthread_mutex_lock(&registry->lock);
3211
1b532a60
DG
3212 /* Metadata already exists for this registry or it was closed previously */
3213 if (registry->metadata_key || registry->metadata_closed) {
7972aab2
DG
3214 ret = 0;
3215 goto error;
5b4a0ec0
DG
3216 }
3217
ffe60014 3218 /* Allocate UST metadata */
d0b96690 3219 metadata = alloc_ust_app_channel(DEFAULT_METADATA_NAME, ua_sess, NULL);
ffe60014
DG
3220 if (!metadata) {
3221 /* malloc() failed */
3222 ret = -ENOMEM;
3223 goto error;
3224 }
5b4a0ec0 3225
ad7a9107 3226 memcpy(&metadata->attr, &ua_sess->metadata_attr, sizeof(metadata->attr));
5b4a0ec0 3227
7972aab2
DG
3228 /* Need one fd for the channel. */
3229 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
3230 if (ret < 0) {
3231 ERR("Exhausted number of available FD upon create metadata");
3232 goto error;
3233 }
3234
4dc3dfc5
DG
3235 /* Get the right consumer socket for the application. */
3236 socket = consumer_find_socket_by_bitness(app->bits_per_long, consumer);
3237 if (!socket) {
3238 ret = -EINVAL;
3239 goto error_consumer;
3240 }
3241
331744e3
JD
3242 /*
3243 * Keep metadata key so we can identify it on the consumer side. Assign it
3244 * to the registry *before* we ask the consumer so we avoid the race of the
3245 * consumer requesting the metadata and the ask_channel call on our side
3246 * did not returned yet.
3247 */
3248 registry->metadata_key = metadata->key;
3249
e098433c
JG
3250 session = session_find_by_id(ua_sess->tracing_id);
3251 assert(session);
3252
3253 assert(pthread_mutex_trylock(&session->lock));
3254 assert(session_trylock_list());
3255
d88aee68
DG
3256 /*
3257 * Ask the metadata channel creation to the consumer. The metadata object
3258 * will be created by the consumer and kept their. However, the stream is
3259 * never added or monitored until we do a first push metadata to the
3260 * consumer.
3261 */
7972aab2 3262 ret = ust_consumer_ask_channel(ua_sess, metadata, consumer, socket,
e5148e25 3263 registry, session->current_trace_chunk);
d88aee68 3264 if (ret < 0) {
f2a444f1
DG
3265 /* Nullify the metadata key so we don't try to close it later on. */
3266 registry->metadata_key = 0;
d88aee68
DG
3267 goto error_consumer;
3268 }
3269
3270 /*
3271 * The setup command will make the metadata stream be sent to the relayd,
3272 * if applicable, and the thread managing the metadatas. This is important
3273 * because after this point, if an error occurs, the only way the stream
3274 * can be deleted is to be monitored in the consumer.
3275 */
7972aab2 3276 ret = consumer_setup_metadata(socket, metadata->key);
ffe60014 3277 if (ret < 0) {
f2a444f1
DG
3278 /* Nullify the metadata key so we don't try to close it later on. */
3279 registry->metadata_key = 0;
d88aee68 3280 goto error_consumer;
5b4a0ec0
DG
3281 }
3282
7972aab2
DG
3283 DBG2("UST metadata with key %" PRIu64 " created for app pid %d",
3284 metadata->key, app->pid);
5b4a0ec0 3285
d88aee68 3286error_consumer:
b80f0b6c 3287 lttng_fd_put(LTTNG_FD_APPS, 1);
d88aee68 3288 delete_ust_app_channel(-1, metadata, app);
5b4a0ec0 3289error:
ce34fcd0 3290 pthread_mutex_unlock(&registry->lock);
48a86f68
JG
3291 if (session) {
3292 session_put(session);
3293 }
ffe60014 3294 return ret;
5b4a0ec0
DG
3295}
3296
5b4a0ec0 3297/*
d88aee68
DG
3298 * Return ust app pointer or NULL if not found. RCU read side lock MUST be
3299 * acquired before calling this function.
5b4a0ec0
DG
3300 */
3301struct ust_app *ust_app_find_by_pid(pid_t pid)
3302{
d88aee68 3303 struct ust_app *app = NULL;
bec39940
DG
3304 struct lttng_ht_node_ulong *node;
3305 struct lttng_ht_iter iter;
5b4a0ec0 3306
bec39940
DG
3307 lttng_ht_lookup(ust_app_ht, (void *)((unsigned long) pid), &iter);
3308 node = lttng_ht_iter_get_node_ulong(&iter);
5b4a0ec0
DG
3309 if (node == NULL) {
3310 DBG2("UST app no found with pid %d", pid);
3311 goto error;
3312 }
5b4a0ec0
DG
3313
3314 DBG2("Found UST app by pid %d", pid);
3315
d88aee68 3316 app = caa_container_of(node, struct ust_app, pid_n);
5b4a0ec0
DG
3317
3318error:
d88aee68 3319 return app;
5b4a0ec0
DG
3320}
3321
d88aee68
DG
3322/*
3323 * Allocate and init an UST app object using the registration information and
3324 * the command socket. This is called when the command socket connects to the
3325 * session daemon.
3326 *
3327 * The object is returned on success or else NULL.
3328 */
d0b96690 3329struct ust_app *ust_app_create(struct ust_register_msg *msg, int sock)
5b4a0ec0 3330{
d0b96690
DG
3331 struct ust_app *lta = NULL;
3332
3333 assert(msg);
3334 assert(sock >= 0);
3335
3336 DBG3("UST app creating application for socket %d", sock);
5b4a0ec0 3337
173af62f
DG
3338 if ((msg->bits_per_long == 64 &&
3339 (uatomic_read(&ust_consumerd64_fd) == -EINVAL))
3340 || (msg->bits_per_long == 32 &&
3341 (uatomic_read(&ust_consumerd32_fd) == -EINVAL))) {
f943b0fb 3342 ERR("Registration failed: application \"%s\" (pid: %d) has "
d0b96690
DG
3343 "%d-bit long, but no consumerd for this size is available.\n",
3344 msg->name, msg->pid, msg->bits_per_long);
3345 goto error;
3f2c5fcc 3346 }
d0b96690 3347
5b4a0ec0
DG
3348 lta = zmalloc(sizeof(struct ust_app));
3349 if (lta == NULL) {
3350 PERROR("malloc");
d0b96690 3351 goto error;
5b4a0ec0
DG
3352 }
3353
3354 lta->ppid = msg->ppid;
3355 lta->uid = msg->uid;
3356 lta->gid = msg->gid;
d0b96690 3357
7753dea8 3358 lta->bits_per_long = msg->bits_per_long;
d0b96690
DG
3359 lta->uint8_t_alignment = msg->uint8_t_alignment;
3360 lta->uint16_t_alignment = msg->uint16_t_alignment;
3361 lta->uint32_t_alignment = msg->uint32_t_alignment;
3362 lta->uint64_t_alignment = msg->uint64_t_alignment;
3363 lta->long_alignment = msg->long_alignment;
3364 lta->byte_order = msg->byte_order;
3365
5b4a0ec0
DG
3366 lta->v_major = msg->major;
3367 lta->v_minor = msg->minor;
d9bf3ca4 3368 lta->sessions = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
d0b96690 3369 lta->ust_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
10b56aef 3370 lta->ust_sessions_objd = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
d0b96690 3371 lta->notify_sock = -1;
d88aee68
DG
3372
3373 /* Copy name and make sure it's NULL terminated. */
3374 strncpy(lta->name, msg->name, sizeof(lta->name));
3375 lta->name[UST_APP_PROCNAME_LEN] = '\0';
3376
3377 /*
3378 * Before this can be called, when receiving the registration information,
3379 * the application compatibility is checked. So, at this point, the
3380 * application can work with this session daemon.
3381 */
d0b96690 3382 lta->compatible = 1;
5b4a0ec0 3383
852d0037 3384 lta->pid = msg->pid;
d0b96690 3385 lttng_ht_node_init_ulong(&lta->pid_n, (unsigned long) lta->pid);
852d0037 3386 lta->sock = sock;
fb45065e 3387 pthread_mutex_init(&lta->sock_lock, NULL);
d0b96690 3388 lttng_ht_node_init_ulong(&lta->sock_n, (unsigned long) lta->sock);
5b4a0ec0 3389
d42f20df 3390 CDS_INIT_LIST_HEAD(&lta->teardown_head);
d0b96690
DG
3391error:
3392 return lta;
3393}
3394
d88aee68
DG
3395/*
3396 * For a given application object, add it to every hash table.
3397 */
d0b96690
DG
3398void ust_app_add(struct ust_app *app)
3399{
3400 assert(app);
3401 assert(app->notify_sock >= 0);
3402
cd82d919
JR
3403 app->registration_time = time(NULL);
3404
5b4a0ec0 3405 rcu_read_lock();
852d0037
DG
3406
3407 /*
3408 * On a re-registration, we want to kick out the previous registration of
3409 * that pid
3410 */
d0b96690 3411 lttng_ht_add_replace_ulong(ust_app_ht, &app->pid_n);
852d0037
DG
3412
3413 /*
3414 * The socket _should_ be unique until _we_ call close. So, a add_unique
3415 * for the ust_app_ht_by_sock is used which asserts fail if the entry was
3416 * already in the table.
3417 */
d0b96690 3418 lttng_ht_add_unique_ulong(ust_app_ht_by_sock, &app->sock_n);
852d0037 3419
d0b96690
DG
3420 /* Add application to the notify socket hash table. */
3421 lttng_ht_node_init_ulong(&app->notify_sock_n, app->notify_sock);
3422 lttng_ht_add_unique_ulong(ust_app_ht_by_notify_sock, &app->notify_sock_n);
5b4a0ec0 3423
d0b96690 3424 DBG("App registered with pid:%d ppid:%d uid:%d gid:%d sock:%d name:%s "
d88aee68
DG
3425 "notify_sock:%d (version %d.%d)", app->pid, app->ppid, app->uid,
3426 app->gid, app->sock, app->name, app->notify_sock, app->v_major,
3427 app->v_minor);
5b4a0ec0 3428
d0b96690
DG
3429 rcu_read_unlock();
3430}
3431
d88aee68
DG
3432/*
3433 * Set the application version into the object.
3434 *
3435 * Return 0 on success else a negative value either an errno code or a
3436 * LTTng-UST error code.
3437 */
d0b96690
DG
3438int ust_app_version(struct ust_app *app)
3439{
d88aee68
DG
3440 int ret;
3441
d0b96690 3442 assert(app);
d88aee68 3443
fb45065e 3444 pthread_mutex_lock(&app->sock_lock);
d88aee68 3445 ret = ustctl_tracer_version(app->sock, &app->version);
fb45065e 3446 pthread_mutex_unlock(&app->sock_lock);
d88aee68
DG
3447 if (ret < 0) {
3448 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
5368d366 3449 ERR("UST app %d version failed with ret %d", app->sock, ret);
d88aee68 3450 } else {
5368d366 3451 DBG3("UST app %d version failed. Application is dead", app->sock);
d88aee68
DG
3452 }
3453 }
3454
3455 return ret;
5b4a0ec0
DG
3456}
3457
3458/*
3459 * Unregister app by removing it from the global traceable app list and freeing
3460 * the data struct.
3461 *
3462 * The socket is already closed at this point so no close to sock.
3463 */
3464void ust_app_unregister(int sock)
3465{
3466 struct ust_app *lta;
bec39940 3467 struct lttng_ht_node_ulong *node;
c4b88406 3468 struct lttng_ht_iter ust_app_sock_iter;
bec39940 3469 struct lttng_ht_iter iter;
d42f20df 3470 struct ust_app_session *ua_sess;
525b0740 3471 int ret;
5b4a0ec0
DG
3472
3473 rcu_read_lock();
886459c6 3474
5b4a0ec0 3475 /* Get the node reference for a call_rcu */
c4b88406
MD
3476 lttng_ht_lookup(ust_app_ht_by_sock, (void *)((unsigned long) sock), &ust_app_sock_iter);
3477 node = lttng_ht_iter_get_node_ulong(&ust_app_sock_iter);
d0b96690 3478 assert(node);
284d8f55 3479
852d0037 3480 lta = caa_container_of(node, struct ust_app, sock_n);
852d0037
DG
3481 DBG("PID %d unregistering with sock %d", lta->pid, sock);
3482
d88aee68 3483 /*
ce34fcd0
MD
3484 * For per-PID buffers, perform "push metadata" and flush all
3485 * application streams before removing app from hash tables,
3486 * ensuring proper behavior of data_pending check.
c4b88406 3487 * Remove sessions so they are not visible during deletion.
d88aee68 3488 */
d42f20df
DG
3489 cds_lfht_for_each_entry(lta->sessions->ht, &iter.iter, ua_sess,
3490 node.node) {
7972aab2
DG
3491 struct ust_registry_session *registry;
3492
d42f20df
DG
3493 ret = lttng_ht_del(lta->sessions, &iter);
3494 if (ret) {
3495 /* The session was already removed so scheduled for teardown. */
3496 continue;
3497 }
3498
ce34fcd0
MD
3499 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
3500 (void) ust_app_flush_app_session(lta, ua_sess);
3501 }
c4b88406 3502
d42f20df
DG
3503 /*
3504 * Add session to list for teardown. This is safe since at this point we
3505 * are the only one using this list.
3506 */
d88aee68
DG
3507 pthread_mutex_lock(&ua_sess->lock);
3508
b161602a
MD
3509 if (ua_sess->deleted) {
3510 pthread_mutex_unlock(&ua_sess->lock);
3511 continue;
3512 }
3513
d88aee68
DG
3514 /*
3515 * Normally, this is done in the delete session process which is
3516 * executed in the call rcu below. However, upon registration we can't
3517 * afford to wait for the grace period before pushing data or else the
3518 * data pending feature can race between the unregistration and stop
3519 * command where the data pending command is sent *before* the grace
3520 * period ended.
3521 *
3522 * The close metadata below nullifies the metadata pointer in the
3523 * session so the delete session will NOT push/close a second time.
3524 */
7972aab2 3525 registry = get_session_registry(ua_sess);
ce34fcd0 3526 if (registry) {
7972aab2
DG
3527 /* Push metadata for application before freeing the application. */
3528 (void) push_metadata(registry, ua_sess->consumer);
3529
3530 /*
3531 * Don't ask to close metadata for global per UID buffers. Close
1b532a60
DG
3532 * metadata only on destroy trace session in this case. Also, the
3533 * previous push metadata could have flag the metadata registry to
3534 * close so don't send a close command if closed.
7972aab2 3535 */
ce34fcd0 3536 if (ua_sess->buffer_type != LTTNG_BUFFER_PER_UID) {
7972aab2
DG
3537 /* And ask to close it for this session registry. */
3538 (void) close_metadata(registry, ua_sess->consumer);
3539 }
3540 }
d42f20df 3541 cds_list_add(&ua_sess->teardown_node, &lta->teardown_head);
c4b88406 3542
d88aee68 3543 pthread_mutex_unlock(&ua_sess->lock);
d42f20df
DG
3544 }
3545
c4b88406
MD
3546 /* Remove application from PID hash table */
3547 ret = lttng_ht_del(ust_app_ht_by_sock, &ust_app_sock_iter);
3548 assert(!ret);
3549
3550 /*
3551 * Remove application from notify hash table. The thread handling the
3552 * notify socket could have deleted the node so ignore on error because
c48239ca
JG
3553 * either way it's valid. The close of that socket is handled by the
3554 * apps_notify_thread.
c4b88406
MD
3555 */
3556 iter.iter.node = &lta->notify_sock_n.node;
3557 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
3558
3559 /*
3560 * Ignore return value since the node might have been removed before by an
3561 * add replace during app registration because the PID can be reassigned by
3562 * the OS.
3563 */
3564 iter.iter.node = &lta->pid_n.node;
3565 ret = lttng_ht_del(ust_app_ht, &iter);
3566 if (ret) {
3567 DBG3("Unregister app by PID %d failed. This can happen on pid reuse",
3568 lta->pid);
3569 }
3570
852d0037
DG
3571 /* Free memory */
3572 call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
3573
5b4a0ec0
DG
3574 rcu_read_unlock();
3575 return;
284d8f55
DG
3576}
3577
5b4a0ec0
DG
3578/*
3579 * Fill events array with all events name of all registered apps.
3580 */
3581int ust_app_list_events(struct lttng_event **events)
421cb601 3582{
5b4a0ec0
DG
3583 int ret, handle;
3584 size_t nbmem, count = 0;
bec39940 3585 struct lttng_ht_iter iter;
5b4a0ec0 3586 struct ust_app *app;
c617c0c6 3587 struct lttng_event *tmp_event;
421cb601 3588
5b4a0ec0 3589 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3590 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event));
3591 if (tmp_event == NULL) {
5b4a0ec0
DG
3592 PERROR("zmalloc ust app events");
3593 ret = -ENOMEM;
421cb601
DG
3594 goto error;
3595 }
3596
5b4a0ec0 3597 rcu_read_lock();
421cb601 3598
852d0037 3599 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
90eaa0d2 3600 struct lttng_ust_tracepoint_iter uiter;
ac3bd9c0 3601
840cb59c 3602 health_code_update();
86acf0da 3603
e0c7ec2b
DG
3604 if (!app->compatible) {
3605 /*
3606 * TODO: In time, we should notice the caller of this error by
3607 * telling him that this is a version error.
3608 */
3609 continue;
3610 }
fb45065e 3611 pthread_mutex_lock(&app->sock_lock);
852d0037 3612 handle = ustctl_tracepoint_list(app->sock);
5b4a0ec0 3613 if (handle < 0) {
ffe60014
DG
3614 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3615 ERR("UST app list events getting handle failed for app pid %d",
3616 app->pid);
3617 }
fb45065e 3618 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3619 continue;
3620 }
421cb601 3621
852d0037 3622 while ((ret = ustctl_tracepoint_list_get(app->sock, handle,
fb54cdbf 3623 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3624 /* Handle ustctl error. */
3625 if (ret < 0) {
fb45065e
MD
3626 int release_ret;
3627
a2ba1ab0 3628 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3629 ERR("UST app tp list get failed for app %d with ret %d",
3630 app->sock, ret);
3631 } else {
3632 DBG3("UST app tp list get failed. Application is dead");
3757b385
DG
3633 /*
3634 * This is normal behavior, an application can die during the
3635 * creation process. Don't report an error so the execution can
3636 * continue normally. Continue normal execution.
3637 */
3638 break;
ffe60014 3639 }
98f595d4 3640 free(tmp_event);
fb45065e 3641 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3642 if (release_ret < 0 &&
3643 release_ret != -LTTNG_UST_ERR_EXITING &&
3644 release_ret != -EPIPE) {
fb45065e
MD
3645 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3646 }
3647 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
3648 goto rcu_error;
3649 }
3650
840cb59c 3651 health_code_update();
815564d8 3652 if (count >= nbmem) {
d7b3776f 3653 /* In case the realloc fails, we free the memory */
53efb85a
MD
3654 struct lttng_event *new_tmp_event;
3655 size_t new_nbmem;
3656
3657 new_nbmem = nbmem << 1;
3658 DBG2("Reallocating event list from %zu to %zu entries",
3659 nbmem, new_nbmem);
3660 new_tmp_event = realloc(tmp_event,
3661 new_nbmem * sizeof(struct lttng_event));
3662 if (new_tmp_event == NULL) {
fb45065e
MD
3663 int release_ret;
3664
5b4a0ec0 3665 PERROR("realloc ust app events");
c617c0c6 3666 free(tmp_event);
5b4a0ec0 3667 ret = -ENOMEM;
fb45065e 3668 release_ret = ustctl_release_handle(app->sock, handle);
68313703
JG
3669 if (release_ret < 0 &&
3670 release_ret != -LTTNG_UST_ERR_EXITING &&
3671 release_ret != -EPIPE) {
fb45065e
MD
3672 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3673 }
3674 pthread_mutex_unlock(&app->sock_lock);
5b4a0ec0
DG
3675 goto rcu_error;
3676 }
53efb85a
MD
3677 /* Zero the new memory */
3678 memset(new_tmp_event + nbmem, 0,
3679 (new_nbmem - nbmem) * sizeof(struct lttng_event));
3680 nbmem = new_nbmem;
3681 tmp_event = new_tmp_event;
5b4a0ec0 3682 }
c617c0c6
MD
3683 memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_SYM_NAME_LEN);
3684 tmp_event[count].loglevel = uiter.loglevel;
3685 tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_TRACEPOINT;
3686 tmp_event[count].pid = app->pid;
3687 tmp_event[count].enabled = -1;
5b4a0ec0 3688 count++;
421cb601 3689 }
fb45065e
MD
3690 ret = ustctl_release_handle(app->sock, handle);
3691 pthread_mutex_unlock(&app->sock_lock);
68313703 3692 if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
fb45065e
MD
3693 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3694 }
421cb601
DG
3695 }
3696
5b4a0ec0 3697 ret = count;
c617c0c6 3698 *events = tmp_event;
421cb601 3699
5b4a0ec0 3700 DBG2("UST app list events done (%zu events)", count);
421cb601 3701
5b4a0ec0
DG
3702rcu_error:
3703 rcu_read_unlock();
421cb601 3704error:
840cb59c 3705 health_code_update();
5b4a0ec0 3706 return ret;
421cb601
DG
3707}
3708
f37d259d
MD
3709/*
3710 * Fill events array with all events name of all registered apps.
3711 */
3712int ust_app_list_event_fields(struct lttng_event_field **fields)
3713{
3714 int ret, handle;
3715 size_t nbmem, count = 0;
3716 struct lttng_ht_iter iter;
3717 struct ust_app *app;
c617c0c6 3718 struct lttng_event_field *tmp_event;
f37d259d
MD
3719
3720 nbmem = UST_APP_EVENT_LIST_SIZE;
c617c0c6
MD
3721 tmp_event = zmalloc(nbmem * sizeof(struct lttng_event_field));
3722 if (tmp_event == NULL) {
f37d259d
MD
3723 PERROR("zmalloc ust app event fields");
3724 ret = -ENOMEM;
3725 goto error;
3726 }
3727
3728 rcu_read_lock();
3729
3730 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3731 struct lttng_ust_field_iter uiter;
3732
840cb59c 3733 health_code_update();
86acf0da 3734
f37d259d
MD
3735 if (!app->compatible) {
3736 /*
3737 * TODO: In time, we should notice the caller of this error by
3738 * telling him that this is a version error.
3739 */
3740 continue;
3741 }
fb45065e 3742 pthread_mutex_lock(&app->sock_lock);
f37d259d
MD
3743 handle = ustctl_tracepoint_field_list(app->sock);
3744 if (handle < 0) {
ffe60014
DG
3745 if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
3746 ERR("UST app list field getting handle failed for app pid %d",
3747 app->pid);
3748 }
fb45065e 3749 pthread_mutex_unlock(&app->sock_lock);
f37d259d
MD
3750 continue;
3751 }
3752
3753 while ((ret = ustctl_tracepoint_field_list_get(app->sock, handle,
fb54cdbf 3754 &uiter)) != -LTTNG_UST_ERR_NOENT) {
ffe60014
DG
3755 /* Handle ustctl error. */
3756 if (ret < 0) {
fb45065e
MD
3757 int release_ret;
3758
a2ba1ab0 3759 if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
ffe60014
DG
3760 ERR("UST app tp list field failed for app %d with ret %d",
3761 app->sock, ret);
3762 } else {
3763 DBG3("UST app tp list field failed. Application is dead");
3757b385
DG
3764 /*
3765 * This is normal behavior, an application can die during the
3766 * creation process. Don't report an error so the execution can
98f595d4 3767 * continue normally. Reset list and count for next app.
3757b385
DG
3768 */
3769 break;
ffe60014 3770 }
98f595d4 3771 free(tmp_event);
fb45065e
MD
3772 release_ret = ustctl_release_handle(app->sock, handle);
3773 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3774 if (release_ret < 0 &&
3775 release_ret != -LTTNG_UST_ERR_EXITING &&
3776 release_ret != -EPIPE) {
fb45065e
MD
3777 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3778 }
ffe60014
DG
3779 goto rcu_error;
3780 }
3781
840cb59c 3782 health_code_update();
f37d259d 3783 if (count >= nbmem) {
d7b3776f 3784 /* In case the realloc fails, we free the memory */
53efb85a
MD
3785 struct lttng_event_field *new_tmp_event;
3786 size_t new_nbmem;
3787
3788 new_nbmem = nbmem << 1;
3789 DBG2("Reallocating event field list from %zu to %zu entries",
3790 nbmem, new_nbmem);
3791 new_tmp_event = realloc(tmp_event,
3792 new_nbmem * sizeof(struct lttng_event_field));
3793 if (new_tmp_event == NULL) {
fb45065e
MD
3794 int release_ret;
3795
f37d259d 3796 PERROR("realloc ust app event fields");
c617c0c6 3797 free(tmp_event);
f37d259d 3798 ret = -ENOMEM;
fb45065e
MD
3799 release_ret = ustctl_release_handle(app->sock, handle);
3800 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3801 if (release_ret &&
3802 release_ret != -LTTNG_UST_ERR_EXITING &&
3803 release_ret != -EPIPE) {
fb45065e
MD
3804 ERR("Error releasing app handle for app %d with ret %d", app->sock, release_ret);
3805 }
f37d259d
MD
3806 goto rcu_error;
3807 }
53efb85a
MD
3808 /* Zero the new memory */
3809 memset(new_tmp_event + nbmem, 0,
3810 (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
3811 nbmem = new_nbmem;
3812 tmp_event = new_tmp_event;
f37d259d 3813 }
f37d259d 3814
c617c0c6 3815 memcpy(tmp_event[count].field_name, uiter.field_name, LTTNG_UST_SYM_NAME_LEN);
2e84128e
DG
3816 /* Mapping between these enums matches 1 to 1. */
3817 tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
c617c0c6 3818 tmp_event[count].nowrite = uiter.nowrite;
f37d259d 3819
c617c0c6
MD
3820 memcpy(tmp_event[count].event.name, uiter.event_name, LTTNG_UST_SYM_NAME_LEN);
3821 tmp_event[count].event.loglevel = uiter.loglevel;
2e84128e 3822 tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
c617c0c6
MD
3823 tmp_event[count].event.pid = app->pid;
3824 tmp_event[count].event.enabled = -1;
f37d259d
MD
3825 count++;
3826 }
fb45065e
MD
3827 ret = ustctl_release_handle(app->sock, handle);
3828 pthread_mutex_unlock(&app->sock_lock);
68313703
JG
3829 if (ret < 0 &&
3830 ret != -LTTNG_UST_ERR_EXITING &&
3831 ret != -EPIPE) {
fb45065e
MD
3832 ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
3833 }
f37d259d
MD
3834 }
3835
3836 ret = count;
c617c0c6 3837 *fields = tmp_event;
f37d259d
MD
3838
3839 DBG2("UST app list event fields done (%zu events)", count);
3840
3841rcu_error:
3842 rcu_read_unlock();
3843error:
840cb59c 3844 health_code_update();
f37d259d
MD
3845 return ret;
3846}
3847
5b4a0ec0
DG
3848/*
3849 * Free and clean all traceable apps of the global list.
36b588ed
MD
3850 *
3851 * Should _NOT_ be called with RCU read-side lock held.
5b4a0ec0
DG
3852 */
3853void ust_app_clean_list(void)
421cb601 3854{
5b4a0ec0 3855 int ret;
659ed79f 3856 struct ust_app *app;
bec39940 3857 struct lttng_ht_iter iter;
421cb601 3858
5b4a0ec0 3859 DBG2("UST app cleaning registered apps hash table");
421cb601 3860
5b4a0ec0 3861 rcu_read_lock();
421cb601 3862
44975bc9
JG
3863 /* Cleanup notify socket hash table */
3864 if (ust_app_ht_by_notify_sock) {
3865 cds_lfht_for_each_entry(ust_app_ht_by_notify_sock->ht, &iter.iter, app,
3866 notify_sock_n.node) {
3867 struct cds_lfht_node *node;
3868 struct ust_app *app;
3869
3870 node = cds_lfht_iter_get_node(&iter.iter);
3871 if (!node) {
3872 continue;
3873 }
3874
3875 app = container_of(node, struct ust_app,
3876 notify_sock_n.node);
3877 ust_app_notify_sock_unregister(app->notify_sock);
3878 }
3879 }
3880
f1b711c4
MD
3881 if (ust_app_ht) {
3882 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3883 ret = lttng_ht_del(ust_app_ht, &iter);
3884 assert(!ret);
3885 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
3886 }
421cb601
DG
3887 }
3888
852d0037 3889 /* Cleanup socket hash table */
f1b711c4
MD
3890 if (ust_app_ht_by_sock) {
3891 cds_lfht_for_each_entry(ust_app_ht_by_sock->ht, &iter.iter, app,
3892 sock_n.node) {
3893 ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
3894 assert(!ret);
3895 }
bec39940 3896 }
852d0037 3897
36b588ed 3898 rcu_read_unlock();
d88aee68 3899
bec39940 3900 /* Destroy is done only when the ht is empty */
f1b711c4
MD
3901 if (ust_app_ht) {
3902 ht_cleanup_push(ust_app_ht);
3903 }
3904 if (ust_app_ht_by_sock) {
3905 ht_cleanup_push(ust_app_ht_by_sock);
3906 }
3907 if (ust_app_ht_by_notify_sock) {
3908 ht_cleanup_push(ust_app_ht_by_notify_sock);
3909 }
5b4a0ec0
DG
3910}
3911
3912/*
3913 * Init UST app hash table.
3914 */
57703f6e 3915int ust_app_ht_alloc(void)
5b4a0ec0 3916{
bec39940 3917 ust_app_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3918 if (!ust_app_ht) {
3919 return -1;
3920 }
852d0037 3921 ust_app_ht_by_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3922 if (!ust_app_ht_by_sock) {
3923 return -1;
3924 }
d0b96690 3925 ust_app_ht_by_notify_sock = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
57703f6e
MD
3926 if (!ust_app_ht_by_notify_sock) {
3927 return -1;
3928 }
3929 return 0;
421cb601
DG
3930}
3931
78f0bacd
DG
3932/*
3933 * For a specific UST session, disable the channel for all registered apps.
3934 */
35a9059d 3935int ust_app_disable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3936 struct ltt_ust_channel *uchan)
3937{
3938 int ret = 0;
bec39940
DG
3939 struct lttng_ht_iter iter;
3940 struct lttng_ht_node_str *ua_chan_node;
78f0bacd
DG
3941 struct ust_app *app;
3942 struct ust_app_session *ua_sess;
8535a6d9 3943 struct ust_app_channel *ua_chan;
78f0bacd 3944
0ce9aa93 3945 assert(usess->active);
d9bf3ca4 3946 DBG2("UST app disabling channel %s from global domain for session id %" PRIu64,
a991f516 3947 uchan->name, usess->id);
78f0bacd
DG
3948
3949 rcu_read_lock();
3950
3951 /* For every registered applications */
852d0037 3952 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
bec39940 3953 struct lttng_ht_iter uiter;
e0c7ec2b
DG
3954 if (!app->compatible) {
3955 /*
3956 * TODO: In time, we should notice the caller of this error by
3957 * telling him that this is a version error.
3958 */
3959 continue;
3960 }
78f0bacd
DG
3961 ua_sess = lookup_session_by_app(usess, app);
3962 if (ua_sess == NULL) {
3963 continue;
3964 }
3965
8535a6d9 3966 /* Get channel */
bec39940
DG
3967 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
3968 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
8535a6d9
DG
3969 /* If the session if found for the app, the channel must be there */
3970 assert(ua_chan_node);
3971
3972 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
3973 /* The channel must not be already disabled */
3974 assert(ua_chan->enabled == 1);
3975
3976 /* Disable channel onto application */
3977 ret = disable_ust_app_channel(ua_sess, ua_chan, app);
78f0bacd
DG
3978 if (ret < 0) {
3979 /* XXX: We might want to report this error at some point... */
3980 continue;
3981 }
3982 }
3983
3984 rcu_read_unlock();
78f0bacd
DG
3985 return ret;
3986}
3987
3988/*
3989 * For a specific UST session, enable the channel for all registered apps.
3990 */
35a9059d 3991int ust_app_enable_channel_glb(struct ltt_ust_session *usess,
78f0bacd
DG
3992 struct ltt_ust_channel *uchan)
3993{
3994 int ret = 0;
bec39940 3995 struct lttng_ht_iter iter;
78f0bacd
DG
3996 struct ust_app *app;
3997 struct ust_app_session *ua_sess;
3998
0ce9aa93 3999 assert(usess->active);
d9bf3ca4 4000 DBG2("UST app enabling channel %s to global domain for session id %" PRIu64,
a991f516 4001 uchan->name, usess->id);
78f0bacd
DG
4002
4003 rcu_read_lock();
4004
4005 /* For every registered applications */
852d0037 4006 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4007 if (!app->compatible) {
4008 /*
4009 * TODO: In time, we should notice the caller of this error by
4010 * telling him that this is a version error.
4011 */
4012 continue;
4013 }
78f0bacd
DG
4014 ua_sess = lookup_session_by_app(usess, app);
4015 if (ua_sess == NULL) {
4016 continue;
4017 }
4018
4019 /* Enable channel onto application */
4020 ret = enable_ust_app_channel(ua_sess, uchan, app);
4021 if (ret < 0) {
4022 /* XXX: We might want to report this error at some point... */
4023 continue;
4024 }
4025 }
4026
4027 rcu_read_unlock();
78f0bacd
DG
4028 return ret;
4029}
4030
b0a40d28
DG
4031/*
4032 * Disable an event in a channel and for a specific session.
4033 */
35a9059d
DG
4034int ust_app_disable_event_glb(struct ltt_ust_session *usess,
4035 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
b0a40d28
DG
4036{
4037 int ret = 0;
bec39940 4038 struct lttng_ht_iter iter, uiter;
700c5a9d 4039 struct lttng_ht_node_str *ua_chan_node;
b0a40d28
DG
4040 struct ust_app *app;
4041 struct ust_app_session *ua_sess;
4042 struct ust_app_channel *ua_chan;
4043 struct ust_app_event *ua_event;
4044
0ce9aa93 4045 assert(usess->active);
b0a40d28 4046 DBG("UST app disabling event %s for all apps in channel "
d9bf3ca4
MD
4047 "%s for session id %" PRIu64,
4048 uevent->attr.name, uchan->name, usess->id);
b0a40d28
DG
4049
4050 rcu_read_lock();
4051
4052 /* For all registered applications */
852d0037 4053 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4054 if (!app->compatible) {
4055 /*
4056 * TODO: In time, we should notice the caller of this error by
4057 * telling him that this is a version error.
4058 */
4059 continue;
4060 }
b0a40d28
DG
4061 ua_sess = lookup_session_by_app(usess, app);
4062 if (ua_sess == NULL) {
4063 /* Next app */
4064 continue;
4065 }
4066
4067 /* Lookup channel in the ust app session */
bec39940
DG
4068 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4069 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
b0a40d28 4070 if (ua_chan_node == NULL) {
d9bf3ca4 4071 DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
852d0037 4072 "Skipping", uchan->name, usess->id, app->pid);
b0a40d28
DG
4073 continue;
4074 }
4075 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4076
700c5a9d
JR
4077 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
4078 uevent->filter, uevent->attr.loglevel,
4079 uevent->exclusion);
4080 if (ua_event == NULL) {
b0a40d28 4081 DBG2("Event %s not found in channel %s for app pid %d."
852d0037 4082 "Skipping", uevent->attr.name, uchan->name, app->pid);
b0a40d28
DG
4083 continue;
4084 }
b0a40d28 4085
7f79d3a1 4086 ret = disable_ust_app_event(ua_sess, ua_event, app);
b0a40d28
DG
4087 if (ret < 0) {
4088 /* XXX: Report error someday... */
4089 continue;
4090 }
4091 }
4092
4093 rcu_read_unlock();
0ce9aa93
JG
4094 return ret;
4095}
4096
4097/* The ua_sess lock must be held by the caller. */
4098static
4099int ust_app_channel_create(struct ltt_ust_session *usess,
4100 struct ust_app_session *ua_sess,
4101 struct ltt_ust_channel *uchan, struct ust_app *app,
4102 struct ust_app_channel **_ua_chan)
4103{
4104 int ret = 0;
4105 struct ust_app_channel *ua_chan = NULL;
4106
4107 assert(ua_sess);
4108 ASSERT_LOCKED(ua_sess->lock);
4109
4110 if (!strncmp(uchan->name, DEFAULT_METADATA_NAME,
4111 sizeof(uchan->name))) {
4112 copy_channel_attr_to_ustctl(&ua_sess->metadata_attr,
4113 &uchan->attr);
4114 ret = 0;
4115 } else {
4116 struct ltt_ust_context *uctx = NULL;
4117
4118 /*
4119 * Create channel onto application and synchronize its
4120 * configuration.
4121 */
4122 ret = ust_app_channel_allocate(ua_sess, uchan,
4123 LTTNG_UST_CHAN_PER_CPU, usess,
4124 &ua_chan);
14a5bf28
JR
4125 if (ret < 0) {
4126 goto error;
4127 }
4128
4129 ret = ust_app_channel_send(app, usess,
4130 ua_sess, ua_chan);
4131 if (ret) {
4132 goto error;
0ce9aa93
JG
4133 }
4134
4135 /* Add contexts. */
4136 cds_list_for_each_entry(uctx, &uchan->ctx_list, list) {
4137 ret = create_ust_app_channel_context(ua_chan,
4138 &uctx->ctx, app);
4139 if (ret) {
14a5bf28 4140 goto error;
0ce9aa93
JG
4141 }
4142 }
4143 }
14a5bf28
JR
4144
4145error:
0ce9aa93
JG
4146 if (ret < 0) {
4147 switch (ret) {
4148 case -ENOTCONN:
4149 /*
4150 * The application's socket is not valid. Either a bad socket
4151 * or a timeout on it. We can't inform the caller that for a
4152 * specific app, the session failed so lets continue here.
4153 */
4154 ret = 0; /* Not an error. */
4155 break;
4156 case -ENOMEM:
4157 default:
4158 break;
4159 }
4160 }
14a5bf28 4161
0ce9aa93
JG
4162 if (ret == 0 && _ua_chan) {
4163 /*
4164 * Only return the application's channel on success. Note
4165 * that the channel can still be part of the application's
4166 * channel hashtable on error.
4167 */
4168 *_ua_chan = ua_chan;
4169 }
b0a40d28
DG
4170 return ret;
4171}
4172
421cb601 4173/*
5b4a0ec0 4174 * For a specific UST session, create the channel for all registered apps.
421cb601 4175 */
35a9059d 4176int ust_app_create_channel_glb(struct ltt_ust_session *usess,
48842b30
DG
4177 struct ltt_ust_channel *uchan)
4178{
0ce9aa93
JG
4179 int ret = 0;
4180 struct cds_lfht_iter iter;
48842b30 4181 struct ust_app *app;
48842b30 4182
fc34caaa 4183 assert(usess);
0ce9aa93 4184 assert(usess->active);
fc34caaa 4185 assert(uchan);
421cb601 4186
d9bf3ca4 4187 DBG2("UST app adding channel %s to UST domain for session id %" PRIu64,
a991f516 4188 uchan->name, usess->id);
48842b30
DG
4189
4190 rcu_read_lock();
5b4a0ec0 4191 /* For every registered applications */
0ce9aa93
JG
4192 cds_lfht_for_each_entry(ust_app_ht->ht, &iter, app, pid_n.node) {
4193 struct ust_app_session *ua_sess;
4194 int session_was_created = 0;
4195
4196 if (!app->compatible ||
4197 !trace_ust_pid_tracker_lookup(usess, app->pid)) {
4198 goto error_rcu_unlock;
a9ad0c8f
MD
4199 }
4200
edb67388
DG
4201 /*
4202 * Create session on the tracer side and add it to app session HT. Note
4203 * that if session exist, it will simply return a pointer to the ust
4204 * app session.
4205 */
0ce9aa93
JG
4206 ret = find_or_create_ust_app_session(usess, app, &ua_sess,
4207 &session_was_created);
3d8ca23b
DG
4208 if (ret < 0) {
4209 switch (ret) {
4210 case -ENOTCONN:
4211 /*
0ce9aa93
JG
4212 * The application's socket is not valid. Either a bad
4213 * socket or a timeout on it. We can't inform the caller
4214 * that for a specific app, the session failed so lets
4215 * continue here; it is not an error.
3d8ca23b 4216 */
0ce9aa93
JG
4217 ret = 0;
4218 goto error_rcu_unlock;
3d8ca23b
DG
4219 case -ENOMEM:
4220 default:
4221 goto error_rcu_unlock;
4222 }
48842b30 4223 }
b161602a
MD
4224
4225 if (ua_sess->deleted) {
b161602a
MD
4226 continue;
4227 }
0ce9aa93
JG
4228 ret = ust_app_channel_create(usess, ua_sess, uchan, app, NULL);
4229 if (ret) {
4230 if (session_was_created) {
d0b96690 4231 destroy_app_session(app, ua_sess);
3d8ca23b 4232 }
56c23a67 4233 /* Continue to the next application. */
48842b30 4234 }
48842b30 4235 }
5b4a0ec0 4236
95e047ff 4237error_rcu_unlock:
48842b30 4238 rcu_read_unlock();
3c14c33f 4239 return ret;
48842b30
DG
4240}
4241
5b4a0ec0 4242/*
edb67388 4243 * Enable event for a specific session and channel on the tracer.
5b4a0ec0 4244 */
35a9059d 4245int ust_app_enable_event_glb(struct ltt_ust_session *usess,
48842b30
DG
4246 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4247{
4248 int ret = 0;
bec39940 4249 struct lttng_ht_iter iter, uiter;
18eace3b 4250 struct lttng_ht_node_str *ua_chan_node;
48842b30
DG
4251 struct ust_app *app;
4252 struct ust_app_session *ua_sess;
4253 struct ust_app_channel *ua_chan;
4254 struct ust_app_event *ua_event;
48842b30 4255
0ce9aa93 4256 assert(usess->active);
d9bf3ca4 4257 DBG("UST app enabling event %s for all apps for session id %" PRIu64,
a991f516 4258 uevent->attr.name, usess->id);
48842b30 4259
edb67388
DG
4260 /*
4261 * NOTE: At this point, this function is called only if the session and
4262 * channel passed are already created for all apps. and enabled on the
4263 * tracer also.
4264 */
4265
48842b30 4266 rcu_read_lock();
421cb601
DG
4267
4268 /* For all registered applications */
852d0037 4269 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4270 if (!app->compatible) {
4271 /*
4272 * TODO: In time, we should notice the caller of this error by
4273 * telling him that this is a version error.
4274 */
4275 continue;
4276 }
edb67388 4277 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4278 if (!ua_sess) {
4279 /* The application has problem or is probably dead. */
4280 continue;
4281 }
ba767faf 4282
d0b96690
DG
4283 pthread_mutex_lock(&ua_sess->lock);
4284
b161602a
MD
4285 if (ua_sess->deleted) {
4286 pthread_mutex_unlock(&ua_sess->lock);
4287 continue;
4288 }
4289
edb67388 4290 /* Lookup channel in the ust app session */
bec39940
DG
4291 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4292 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
a7169585
MD
4293 /*
4294 * It is possible that the channel cannot be found is
4295 * the channel/event creation occurs concurrently with
4296 * an application exit.
4297 */
4298 if (!ua_chan_node) {
4299 pthread_mutex_unlock(&ua_sess->lock);
4300 continue;
4301 }
edb67388
DG
4302
4303 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4304
18eace3b
DG
4305 /* Get event node */
4306 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
39c5a3a7 4307 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
18eace3b 4308 if (ua_event == NULL) {
7f79d3a1 4309 DBG3("UST app enable event %s not found for app PID %d."
852d0037 4310 "Skipping app", uevent->attr.name, app->pid);
d0b96690 4311 goto next_app;
35a9059d 4312 }
35a9059d
DG
4313
4314 ret = enable_ust_app_event(ua_sess, ua_event, app);
4315 if (ret < 0) {
d0b96690 4316 pthread_mutex_unlock(&ua_sess->lock);
7f79d3a1 4317 goto error;
48842b30 4318 }
d0b96690
DG
4319 next_app:
4320 pthread_mutex_unlock(&ua_sess->lock);
edb67388
DG
4321 }
4322
7f79d3a1 4323error:
edb67388 4324 rcu_read_unlock();
edb67388
DG
4325 return ret;
4326}
4327
4328/*
4329 * For a specific existing UST session and UST channel, creates the event for
4330 * all registered apps.
4331 */
35a9059d 4332int ust_app_create_event_glb(struct ltt_ust_session *usess,
edb67388
DG
4333 struct ltt_ust_channel *uchan, struct ltt_ust_event *uevent)
4334{
4335 int ret = 0;
bec39940
DG
4336 struct lttng_ht_iter iter, uiter;
4337 struct lttng_ht_node_str *ua_chan_node;
edb67388
DG
4338 struct ust_app *app;
4339 struct ust_app_session *ua_sess;
4340 struct ust_app_channel *ua_chan;
4341
0ce9aa93 4342 assert(usess->active);
d9bf3ca4 4343 DBG("UST app creating event %s for all apps for session id %" PRIu64,
a991f516 4344 uevent->attr.name, usess->id);
edb67388 4345
edb67388
DG
4346 rcu_read_lock();
4347
4348 /* For all registered applications */
852d0037 4349 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
4350 if (!app->compatible) {
4351 /*
4352 * TODO: In time, we should notice the caller of this error by
4353 * telling him that this is a version error.
4354 */
4355 continue;
4356 }
edb67388 4357 ua_sess = lookup_session_by_app(usess, app);
c4a1715b
DG
4358 if (!ua_sess) {
4359 /* The application has problem or is probably dead. */
4360 continue;
4361 }
48842b30 4362
d0b96690 4363 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
4364
4365 if (ua_sess->deleted) {
4366 pthread_mutex_unlock(&ua_sess->lock);
4367 continue;
4368 }
4369
48842b30 4370 /* Lookup channel in the ust app session */
bec39940
DG
4371 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
4372 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
edb67388
DG
4373 /* If the channel is not found, there is a code flow error */
4374 assert(ua_chan_node);
4375
48842b30
DG
4376 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
4377
edb67388 4378 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
d0b96690 4379 pthread_mutex_unlock(&ua_sess->lock);
edb67388 4380 if (ret < 0) {
49c336c1 4381 if (ret != -LTTNG_UST_ERR_EXIST) {
fc34caaa
DG
4382 /* Possible value at this point: -ENOMEM. If so, we stop! */
4383 break;
4384 }
4385 DBG2("UST app event %s already exist on app PID %d",
852d0037 4386 uevent->attr.name, app->pid);
5b4a0ec0 4387 continue;
48842b30 4388 }
48842b30 4389 }
5b4a0ec0 4390
48842b30 4391 rcu_read_unlock();
48842b30
DG
4392 return ret;
4393}
4394
5b4a0ec0
DG
4395/*
4396 * Start tracing for a specific UST session and app.
fad1ed2f
JR
4397 *
4398 * Called with UST app session lock held.
4399 *
5b4a0ec0 4400 */
b34cbebf 4401static
421cb601 4402int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
48842b30
DG
4403{
4404 int ret = 0;
48842b30 4405 struct ust_app_session *ua_sess;
48842b30 4406
852d0037 4407 DBG("Starting tracing for ust app pid %d", app->pid);
5cf5d0e7 4408
509cbaf8
MD
4409 rcu_read_lock();
4410
e0c7ec2b
DG
4411 if (!app->compatible) {
4412 goto end;
4413 }
4414
421cb601
DG
4415 ua_sess = lookup_session_by_app(usess, app);
4416 if (ua_sess == NULL) {
d42f20df
DG
4417 /* The session is in teardown process. Ignore and continue. */
4418 goto end;
421cb601 4419 }
48842b30 4420
d0b96690
DG
4421 pthread_mutex_lock(&ua_sess->lock);
4422
b161602a
MD
4423 if (ua_sess->deleted) {
4424 pthread_mutex_unlock(&ua_sess->lock);
4425 goto end;
4426 }
4427
aea829b3
DG
4428 /* Upon restart, we skip the setup, already done */
4429 if (ua_sess->started) {
8be98f9a 4430 goto skip_setup;
aea829b3 4431 }
8be98f9a 4432
840cb59c 4433 health_code_update();
86acf0da 4434
8be98f9a 4435skip_setup:
421cb601 4436 /* This start the UST tracing */
fb45065e 4437 pthread_mutex_lock(&app->sock_lock);
852d0037 4438 ret = ustctl_start_session(app->sock, ua_sess->handle);
fb45065e 4439 pthread_mutex_unlock(&app->sock_lock);
421cb601 4440 if (ret < 0) {
ffe60014
DG
4441 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4442 ERR("Error starting tracing for app pid: %d (ret: %d)",
4443 app->pid, ret);
4444 } else {
4445 DBG("UST app start session failed. Application is dead.");
3757b385
DG
4446 /*
4447 * This is normal behavior, an application can die during the
4448 * creation process. Don't report an error so the execution can
4449 * continue normally.
4450 */
4451 pthread_mutex_unlock(&ua_sess->lock);
4452 goto end;
ffe60014 4453 }
d0b96690 4454 goto error_unlock;
421cb601 4455 }
5b4a0ec0 4456
55c3953d
DG
4457 /* Indicate that the session has been started once */
4458 ua_sess->started = 1;
4459
d0b96690
DG
4460 pthread_mutex_unlock(&ua_sess->lock);
4461
840cb59c 4462 health_code_update();
86acf0da 4463
421cb601 4464 /* Quiescent wait after starting trace */
fb45065e 4465 pthread_mutex_lock(&app->sock_lock);
ffe60014 4466 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4467 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4468 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4469 ERR("UST app wait quiescent failed for app pid %d ret %d",
4470 app->pid, ret);
4471 }
48842b30 4472
e0c7ec2b
DG
4473end:
4474 rcu_read_unlock();
840cb59c 4475 health_code_update();
421cb601 4476 return 0;
48842b30 4477
d0b96690
DG
4478error_unlock:
4479 pthread_mutex_unlock(&ua_sess->lock);
509cbaf8 4480 rcu_read_unlock();
840cb59c 4481 health_code_update();
421cb601
DG
4482 return -1;
4483}
48842b30 4484
8be98f9a
MD
4485/*
4486 * Stop tracing for a specific UST session and app.
4487 */
b34cbebf 4488static
8be98f9a
MD
4489int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
4490{
4491 int ret = 0;
4492 struct ust_app_session *ua_sess;
7972aab2 4493 struct ust_registry_session *registry;
8be98f9a 4494
852d0037 4495 DBG("Stopping tracing for ust app pid %d", app->pid);
8be98f9a
MD
4496
4497 rcu_read_lock();
4498
e0c7ec2b 4499 if (!app->compatible) {
d88aee68 4500 goto end_no_session;
e0c7ec2b
DG
4501 }
4502
8be98f9a
MD
4503 ua_sess = lookup_session_by_app(usess, app);
4504 if (ua_sess == NULL) {
d88aee68 4505 goto end_no_session;
8be98f9a
MD
4506 }
4507
d88aee68
DG
4508 pthread_mutex_lock(&ua_sess->lock);
4509
b161602a
MD
4510 if (ua_sess->deleted) {
4511 pthread_mutex_unlock(&ua_sess->lock);
4512 goto end_no_session;
4513 }
4514
9bc07046
DG
4515 /*
4516 * If started = 0, it means that stop trace has been called for a session
c45536e1
DG
4517 * that was never started. It's possible since we can have a fail start
4518 * from either the application manager thread or the command thread. Simply
4519 * indicate that this is a stop error.
9bc07046 4520 */
f9dfc3d9 4521 if (!ua_sess->started) {
c45536e1
DG
4522 goto error_rcu_unlock;
4523 }
7db205b5 4524
840cb59c 4525 health_code_update();
86acf0da 4526
9d6c7d3f 4527 /* This inhibits UST tracing */
fb45065e 4528 pthread_mutex_lock(&app->sock_lock);
852d0037 4529 ret = ustctl_stop_session(app->sock, ua_sess->handle);
fb45065e 4530 pthread_mutex_unlock(&app->sock_lock);
9d6c7d3f 4531 if (ret < 0) {
ffe60014
DG
4532 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4533 ERR("Error stopping tracing for app pid: %d (ret: %d)",
4534 app->pid, ret);
4535 } else {
4536 DBG("UST app stop session failed. Application is dead.");
3757b385
DG
4537 /*
4538 * This is normal behavior, an application can die during the
4539 * creation process. Don't report an error so the execution can
4540 * continue normally.
4541 */
4542 goto end_unlock;
ffe60014 4543 }
9d6c7d3f
DG
4544 goto error_rcu_unlock;
4545 }
4546
840cb59c 4547 health_code_update();
86acf0da 4548
9d6c7d3f 4549 /* Quiescent wait after stopping trace */
fb45065e 4550 pthread_mutex_lock(&app->sock_lock);
ffe60014 4551 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4552 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4553 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4554 ERR("UST app wait quiescent failed for app pid %d ret %d",
4555 app->pid, ret);
4556 }
9d6c7d3f 4557
840cb59c 4558 health_code_update();
86acf0da 4559
b34cbebf 4560 registry = get_session_registry(ua_sess);
fad1ed2f
JR
4561
4562 /* The UST app session is held registry shall not be null. */
b34cbebf 4563 assert(registry);
1b532a60 4564
ce34fcd0
MD
4565 /* Push metadata for application before freeing the application. */
4566 (void) push_metadata(registry, ua_sess->consumer);
b34cbebf 4567
3757b385 4568end_unlock:
b34cbebf
MD
4569 pthread_mutex_unlock(&ua_sess->lock);
4570end_no_session:
4571 rcu_read_unlock();
4572 health_code_update();
4573 return 0;
4574
4575error_rcu_unlock:
4576 pthread_mutex_unlock(&ua_sess->lock);
4577 rcu_read_unlock();
4578 health_code_update();
4579 return -1;
4580}
4581
b34cbebf 4582static
c4b88406
MD
4583int ust_app_flush_app_session(struct ust_app *app,
4584 struct ust_app_session *ua_sess)
b34cbebf 4585{
c4b88406 4586 int ret, retval = 0;
b34cbebf 4587 struct lttng_ht_iter iter;
b34cbebf 4588 struct ust_app_channel *ua_chan;
c4b88406 4589 struct consumer_socket *socket;
b34cbebf 4590
c4b88406 4591 DBG("Flushing app session buffers for ust app pid %d", app->pid);
b34cbebf
MD
4592
4593 rcu_read_lock();
4594
4595 if (!app->compatible) {
c4b88406 4596 goto end_not_compatible;
b34cbebf
MD
4597 }
4598
4599 pthread_mutex_lock(&ua_sess->lock);
4600
b161602a
MD
4601 if (ua_sess->deleted) {
4602 goto end_deleted;
4603 }
4604
b34cbebf
MD
4605 health_code_update();
4606
9d6c7d3f 4607 /* Flushing buffers */
c4b88406
MD
4608 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4609 ua_sess->consumer);
ce34fcd0
MD
4610
4611 /* Flush buffers and push metadata. */
4612 switch (ua_sess->buffer_type) {
4613 case LTTNG_BUFFER_PER_PID:
4614 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter, ua_chan,
4615 node.node) {
4616 health_code_update();
ce34fcd0
MD
4617 ret = consumer_flush_channel(socket, ua_chan->key);
4618 if (ret) {
4619 ERR("Error flushing consumer channel");
4620 retval = -1;
4621 continue;
4622 }
8be98f9a 4623 }
ce34fcd0
MD
4624 break;
4625 case LTTNG_BUFFER_PER_UID:
4626 default:
4627 assert(0);
4628 break;
8be98f9a 4629 }
8be98f9a 4630
840cb59c 4631 health_code_update();
86acf0da 4632
b161602a 4633end_deleted:
d88aee68 4634 pthread_mutex_unlock(&ua_sess->lock);
ce34fcd0 4635
c4b88406
MD
4636end_not_compatible:
4637 rcu_read_unlock();
4638 health_code_update();
4639 return retval;
4640}
4641
4642/*
ce34fcd0
MD
4643 * Flush buffers for all applications for a specific UST session.
4644 * Called with UST session lock held.
c4b88406
MD
4645 */
4646static
ce34fcd0 4647int ust_app_flush_session(struct ltt_ust_session *usess)
c4b88406
MD
4648
4649{
99b1411c 4650 int ret = 0;
c4b88406 4651
ce34fcd0 4652 DBG("Flushing session buffers for all ust apps");
c4b88406
MD
4653
4654 rcu_read_lock();
4655
ce34fcd0
MD
4656 /* Flush buffers and push metadata. */
4657 switch (usess->buffer_type) {
4658 case LTTNG_BUFFER_PER_UID:
4659 {
4660 struct buffer_reg_uid *reg;
4661 struct lttng_ht_iter iter;
4662
4663 /* Flush all per UID buffers associated to that session. */
4664 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4665 struct ust_registry_session *ust_session_reg;
4666 struct buffer_reg_channel *reg_chan;
4667 struct consumer_socket *socket;
4668
4669 /* Get consumer socket to use to push the metadata.*/
4670 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
4671 usess->consumer);
4672 if (!socket) {
4673 /* Ignore request if no consumer is found for the session. */
4674 continue;
4675 }
4676
4677 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
4678 reg_chan, node.node) {
4679 /*
4680 * The following call will print error values so the return
4681 * code is of little importance because whatever happens, we
4682 * have to try them all.
4683 */
4684 (void) consumer_flush_channel(socket, reg_chan->consumer_key);
4685 }
4686
4687 ust_session_reg = reg->registry->reg.ust;
4688 /* Push metadata. */
4689 (void) push_metadata(ust_session_reg, usess->consumer);
4690 }
ce34fcd0
MD
4691 break;
4692 }
4693 case LTTNG_BUFFER_PER_PID:
4694 {
4695 struct ust_app_session *ua_sess;
4696 struct lttng_ht_iter iter;
4697 struct ust_app *app;
4698
4699 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4700 ua_sess = lookup_session_by_app(usess, app);
4701 if (ua_sess == NULL) {
4702 continue;
4703 }
4704 (void) ust_app_flush_app_session(app, ua_sess);
4705 }
4706 break;
4707 }
4708 default:
99b1411c 4709 ret = -1;
ce34fcd0
MD
4710 assert(0);
4711 break;
c4b88406 4712 }
c4b88406 4713
7db205b5 4714 rcu_read_unlock();
840cb59c 4715 health_code_update();
c4b88406 4716 return ret;
8be98f9a
MD
4717}
4718
0dd01979
MD
4719static
4720int ust_app_clear_quiescent_app_session(struct ust_app *app,
4721 struct ust_app_session *ua_sess)
4722{
4723 int ret = 0;
4724 struct lttng_ht_iter iter;
4725 struct ust_app_channel *ua_chan;
4726 struct consumer_socket *socket;
4727
4728 DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
4729
4730 rcu_read_lock();
4731
4732 if (!app->compatible) {
4733 goto end_not_compatible;
4734 }
4735
4736 pthread_mutex_lock(&ua_sess->lock);
4737
4738 if (ua_sess->deleted) {
4739 goto end_unlock;
4740 }
4741
4742 health_code_update();
4743
4744 socket = consumer_find_socket_by_bitness(app->bits_per_long,
4745 ua_sess->consumer);
4746 if (!socket) {
4747 ERR("Failed to find consumer (%" PRIu32 ") socket",
4748 app->bits_per_long);
4749 ret = -1;
4750 goto end_unlock;
4751 }
4752
4753 /* Clear quiescent state. */
4754 switch (ua_sess->buffer_type) {
4755 case LTTNG_BUFFER_PER_PID:
4756 cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
4757 ua_chan, node.node) {
4758 health_code_update();
4759 ret = consumer_clear_quiescent_channel(socket,
4760 ua_chan->key);
4761 if (ret) {
4762 ERR("Error clearing quiescent state for consumer channel");
4763 ret = -1;
4764 continue;
4765 }
4766 }
4767 break;
4768 case LTTNG_BUFFER_PER_UID:
4769 default:
4770 assert(0);
4771 ret = -1;
4772 break;
4773 }
4774
4775 health_code_update();
4776
4777end_unlock:
4778 pthread_mutex_unlock(&ua_sess->lock);
4779
4780end_not_compatible:
4781 rcu_read_unlock();
4782 health_code_update();
4783 return ret;
4784}
4785
4786/*
4787 * Clear quiescent state in each stream for all applications for a
4788 * specific UST session.
4789 * Called with UST session lock held.
4790 */
4791static
4792int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
4793
4794{
4795 int ret = 0;
4796
4797 DBG("Clearing stream quiescent state for all ust apps");
4798
4799 rcu_read_lock();
4800
4801 switch (usess->buffer_type) {
4802 case LTTNG_BUFFER_PER_UID:
4803 {
4804 struct lttng_ht_iter iter;
4805 struct buffer_reg_uid *reg;
4806
4807 /*
4808 * Clear quiescent for all per UID buffers associated to
4809 * that session.
4810 */
4811 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
4812 struct consumer_socket *socket;
4813 struct buffer_reg_channel *reg_chan;
4814
4815 /* Get associated consumer socket.*/
4816 socket = consumer_find_socket_by_bitness(
4817 reg->bits_per_long, usess->consumer);
4818 if (!socket) {
4819 /*
4820 * Ignore request if no consumer is found for
4821 * the session.
4822 */
4823 continue;
4824 }
4825
4826 cds_lfht_for_each_entry(reg->registry->channels->ht,
4827 &iter.iter, reg_chan, node.node) {
4828 /*
4829 * The following call will print error values so
4830 * the return code is of little importance
4831 * because whatever happens, we have to try them
4832 * all.
4833 */
4834 (void) consumer_clear_quiescent_channel(socket,
4835 reg_chan->consumer_key);
4836 }
4837 }
4838 break;
4839 }
4840 case LTTNG_BUFFER_PER_PID:
4841 {
4842 struct ust_app_session *ua_sess;
4843 struct lttng_ht_iter iter;
4844 struct ust_app *app;
4845
4846 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
4847 pid_n.node) {
4848 ua_sess = lookup_session_by_app(usess, app);
4849 if (ua_sess == NULL) {
4850 continue;
4851 }
4852 (void) ust_app_clear_quiescent_app_session(app,
4853 ua_sess);
4854 }
4855 break;
4856 }
4857 default:
4858 ret = -1;
4859 assert(0);
4860 break;
4861 }
4862
4863 rcu_read_unlock();
4864 health_code_update();
4865 return ret;
4866}
4867
84cd17c6
MD
4868/*
4869 * Destroy a specific UST session in apps.
4870 */
3353de95 4871static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
84cd17c6 4872{
ffe60014 4873 int ret;
84cd17c6 4874 struct ust_app_session *ua_sess;
bec39940 4875 struct lttng_ht_iter iter;
d9bf3ca4 4876 struct lttng_ht_node_u64 *node;
84cd17c6 4877
852d0037 4878 DBG("Destroy tracing for ust app pid %d", app->pid);
84cd17c6
MD
4879
4880 rcu_read_lock();
4881
e0c7ec2b
DG
4882 if (!app->compatible) {
4883 goto end;
4884 }
4885
84cd17c6 4886 __lookup_session_by_app(usess, app, &iter);
d9bf3ca4 4887 node = lttng_ht_iter_get_node_u64(&iter);
84cd17c6 4888 if (node == NULL) {
d42f20df
DG
4889 /* Session is being or is deleted. */
4890 goto end;
84cd17c6
MD
4891 }
4892 ua_sess = caa_container_of(node, struct ust_app_session, node);
c4a1715b 4893
840cb59c 4894 health_code_update();
d0b96690 4895 destroy_app_session(app, ua_sess);
84cd17c6 4896
840cb59c 4897 health_code_update();
7db205b5 4898
84cd17c6 4899 /* Quiescent wait after stopping trace */
fb45065e 4900 pthread_mutex_lock(&app->sock_lock);
ffe60014 4901 ret = ustctl_wait_quiescent(app->sock);
fb45065e 4902 pthread_mutex_unlock(&app->sock_lock);
ffe60014
DG
4903 if (ret < 0 && ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
4904 ERR("UST app wait quiescent failed for app pid %d ret %d",
4905 app->pid, ret);
4906 }
e0c7ec2b
DG
4907end:
4908 rcu_read_unlock();
840cb59c 4909 health_code_update();
84cd17c6 4910 return 0;
84cd17c6
MD
4911}
4912
5b4a0ec0
DG
4913/*
4914 * Start tracing for the UST session.
4915 */
421cb601
DG
4916int ust_app_start_trace_all(struct ltt_ust_session *usess)
4917{
bec39940 4918 struct lttng_ht_iter iter;
421cb601 4919 struct ust_app *app;
48842b30 4920
421cb601
DG
4921 DBG("Starting all UST traces");
4922
1053a273
MD
4923 /*
4924 * Even though the start trace might fail, flag this session active so
4925 * other application coming in are started by default.
4926 */
4927 usess->active = 1;
4928
421cb601 4929 rcu_read_lock();
421cb601 4930
0dd01979
MD
4931 /*
4932 * In a start-stop-start use-case, we need to clear the quiescent state
4933 * of each channel set by the prior stop command, thus ensuring that a
4934 * following stop or destroy is sure to grab a timestamp_end near those
4935 * operations, even if the packet is empty.
4936 */
4937 (void) ust_app_clear_quiescent_session(usess);
4938
60235eb8
MD
4939 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4940 ust_app_global_update(usess, app);
4941 }
4942
48842b30
DG
4943 rcu_read_unlock();
4944
4945 return 0;
4946}
487cf67c 4947
8be98f9a
MD
4948/*
4949 * Start tracing for the UST session.
ce34fcd0 4950 * Called with UST session lock held.
8be98f9a
MD
4951 */
4952int ust_app_stop_trace_all(struct ltt_ust_session *usess)
4953{
4954 int ret = 0;
bec39940 4955 struct lttng_ht_iter iter;
8be98f9a
MD
4956 struct ust_app *app;
4957
4958 DBG("Stopping all UST traces");
4959
1053a273
MD
4960 /*
4961 * Even though the stop trace might fail, flag this session inactive so
4962 * other application coming in are not started by default.
4963 */
4964 usess->active = 0;
4965
8be98f9a
MD
4966 rcu_read_lock();
4967
b34cbebf
MD
4968 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
4969 ret = ust_app_stop_trace(usess, app);
4970 if (ret < 0) {
4971 /* Continue to next apps even on error */
4972 continue;
4973 }
4974 }
4975
ce34fcd0 4976 (void) ust_app_flush_session(usess);
8be98f9a
MD
4977
4978 rcu_read_unlock();
4979
4980 return 0;
4981}
4982
84cd17c6
MD
4983/*
4984 * Destroy app UST session.
4985 */
4986int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
4987{
4988 int ret = 0;
bec39940 4989 struct lttng_ht_iter iter;
84cd17c6
MD
4990 struct ust_app *app;
4991
4992 DBG("Destroy all UST traces");
4993
4994 rcu_read_lock();
4995
852d0037 4996 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
3353de95 4997 ret = destroy_trace(usess, app);
84cd17c6
MD
4998 if (ret < 0) {
4999 /* Continue to next apps even on error */
5000 continue;
5001 }
5002 }
5003
5004 rcu_read_unlock();
5005
5006 return 0;
5007}
5008
0ce9aa93 5009/* The ua_sess lock must be held by the caller. */
a9ad0c8f 5010static
0ce9aa93
JG
5011int find_or_create_ust_app_channel(
5012 struct ltt_ust_session *usess,
5013 struct ust_app_session *ua_sess,
5014 struct ust_app *app,
5015 struct ltt_ust_channel *uchan,
5016 struct ust_app_channel **ua_chan)
487cf67c 5017{
55c54cce 5018 int ret = 0;
0ce9aa93
JG
5019 struct lttng_ht_iter iter;
5020 struct lttng_ht_node_str *ua_chan_node;
5021
5022 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &iter);
5023 ua_chan_node = lttng_ht_iter_get_node_str(&iter);
5024 if (ua_chan_node) {
5025 *ua_chan = caa_container_of(ua_chan_node,
5026 struct ust_app_channel, node);
5027 goto end;
5028 }
5029
5030 ret = ust_app_channel_create(usess, ua_sess, uchan, app, ua_chan);
5031 if (ret) {
5032 goto end;
5033 }
5034end:
5035 return ret;
5036}
5037
5038static
5039int ust_app_channel_synchronize_event(struct ust_app_channel *ua_chan,
5040 struct ltt_ust_event *uevent, struct ust_app_session *ua_sess,
5041 struct ust_app *app)
5042{
5043 int ret = 0;
5044 struct ust_app_event *ua_event = NULL;
5045
5046 ua_event = find_ust_app_event(ua_chan->events, uevent->attr.name,
5047 uevent->filter, uevent->attr.loglevel, uevent->exclusion);
5048 if (!ua_event) {
5049 ret = create_ust_app_event(ua_sess, ua_chan, uevent, app);
5050 if (ret < 0) {
5051 goto end;
5052 }
5053 } else {
5054 if (ua_event->enabled != uevent->enabled) {
5055 ret = uevent->enabled ?
5056 enable_ust_app_event(ua_sess, ua_event, app) :
5057 disable_ust_app_event(ua_sess, ua_event, app);
5058 }
5059 }
5060
5061end:
5062 return ret;
5063}
5064
5065/*
5066 * The caller must ensure that the application is compatible and is tracked
5067 * by the PID tracker.
5068 */
5069static
5070void ust_app_synchronize(struct ltt_ust_session *usess,
5071 struct ust_app *app)
5072{
5073 int ret = 0;
5074 struct cds_lfht_iter uchan_iter;
5075 struct ltt_ust_channel *uchan;
3d8ca23b 5076 struct ust_app_session *ua_sess = NULL;
1f3580c7 5077
0ce9aa93
JG
5078 /*
5079 * The application's configuration should only be synchronized for
5080 * active sessions.
5081 */
5082 assert(usess->active);
5083
5084 ret = find_or_create_ust_app_session(usess, app, &ua_sess, NULL);
3d8ca23b
DG
5085 if (ret < 0) {
5086 /* Tracer is probably gone or ENOMEM. */
487cf67c
DG
5087 goto error;
5088 }
3d8ca23b 5089 assert(ua_sess);
487cf67c 5090
d0b96690 5091 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5092 if (ua_sess->deleted) {
5093 pthread_mutex_unlock(&ua_sess->lock);
5094 goto end;
5095 }
5096
0ce9aa93 5097 rcu_read_lock();
d02e041f 5098
0ce9aa93
JG
5099 cds_lfht_for_each_entry(usess->domain_global.channels->ht, &uchan_iter,
5100 uchan, node.node) {
5101 struct ust_app_channel *ua_chan;
5102 struct cds_lfht_iter uevent_iter;
5103 struct ltt_ust_event *uevent;
487cf67c 5104
31746f93 5105 /*
0ce9aa93
JG
5106 * Search for a matching ust_app_channel. If none is found,
5107 * create it. Creating the channel will cause the ua_chan
5108 * structure to be allocated, the channel buffers to be
5109 * allocated (if necessary) and sent to the application, and
5110 * all enabled contexts will be added to the channel.
31746f93 5111 */
0ce9aa93
JG
5112 ret = find_or_create_ust_app_channel(usess, ua_sess,
5113 app, uchan, &ua_chan);
5114 if (ret) {
5115 /* Tracer is probably gone or ENOMEM. */
5116 goto error_unlock;
727d5404
DG
5117 }
5118
0ce9aa93
JG
5119 if (!ua_chan) {
5120 /* ua_chan will be NULL for the metadata channel */
5121 continue;
5122 }
727d5404 5123
0ce9aa93 5124 cds_lfht_for_each_entry(uchan->events->ht, &uevent_iter, uevent,
bec39940 5125 node.node) {
0ce9aa93
JG
5126 ret = ust_app_channel_synchronize_event(ua_chan,
5127 uevent, ua_sess, app);
5128 if (ret) {
d0b96690 5129 goto error_unlock;
487cf67c 5130 }
36dc12cc 5131 }
d0b96690 5132
0ce9aa93
JG
5133 if (ua_chan->enabled != uchan->enabled) {
5134 ret = uchan->enabled ?
5135 enable_ust_app_channel(ua_sess, uchan, app) :
5136 disable_ust_app_channel(ua_sess, ua_chan, app);
5137 if (ret) {
5138 goto error_unlock;
5139 }
5140 }
36dc12cc 5141 }
d02e041f
JG
5142
5143 /*
5144 * Create the metadata for the application. This returns gracefully if a
5145 * metadata was already set for the session.
5146 *
5147 * The metadata channel must be created after the data channels as the
5148 * consumer daemon assumes this ordering. When interacting with a relay
5149 * daemon, the consumer will use this assumption to send the
5150 * "STREAMS_SENT" message to the relay daemon.
5151 */
5152 ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
5153 if (ret < 0) {
5154 goto error_unlock;
5155 }
5156
0ce9aa93 5157 rcu_read_unlock();
60235eb8 5158
a9ad0c8f 5159end:
0ce9aa93 5160 pthread_mutex_unlock(&ua_sess->lock);
ffe60014 5161 /* Everything went well at this point. */
ffe60014
DG
5162 return;
5163
d0b96690 5164error_unlock:
0ce9aa93 5165 rcu_read_unlock();
d0b96690 5166 pthread_mutex_unlock(&ua_sess->lock);
487cf67c 5167error:
ffe60014 5168 if (ua_sess) {
d0b96690 5169 destroy_app_session(app, ua_sess);
ffe60014 5170 }
487cf67c
DG
5171 return;
5172}
55cc08a6 5173
a9ad0c8f
MD
5174static
5175void ust_app_global_destroy(struct ltt_ust_session *usess, struct ust_app *app)
5176{
5177 struct ust_app_session *ua_sess;
5178
5179 ua_sess = lookup_session_by_app(usess, app);
5180 if (ua_sess == NULL) {
5181 return;
5182 }
5183 destroy_app_session(app, ua_sess);
5184}
5185
5186/*
5187 * Add channels/events from UST global domain to registered apps at sock.
5188 *
5189 * Called with session lock held.
5190 * Called with RCU read-side lock held.
5191 */
5192void ust_app_global_update(struct ltt_ust_session *usess, struct ust_app *app)
5193{
5194 assert(usess);
0ce9aa93 5195 assert(usess->active);
a9ad0c8f
MD
5196
5197 DBG2("UST app global update for app sock %d for session id %" PRIu64,
5198 app->sock, usess->id);
5199
5200 if (!app->compatible) {
5201 return;
5202 }
a9ad0c8f 5203 if (trace_ust_pid_tracker_lookup(usess, app->pid)) {
0ce9aa93
JG
5204 /*
5205 * Synchronize the application's internal tracing configuration
5206 * and start tracing.
5207 */
5208 ust_app_synchronize(usess, app);
5209 ust_app_start_trace(usess, app);
a9ad0c8f
MD
5210 } else {
5211 ust_app_global_destroy(usess, app);
5212 }
5213}
5214
5215/*
5216 * Called with session lock held.
5217 */
5218void ust_app_global_update_all(struct ltt_ust_session *usess)
5219{
5220 struct lttng_ht_iter iter;
5221 struct ust_app *app;
5222
5223 rcu_read_lock();
5224 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5225 ust_app_global_update(usess, app);
5226 }
5227 rcu_read_unlock();
5228}
5229
55cc08a6
DG
5230/*
5231 * Add context to a specific channel for global UST domain.
5232 */
5233int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
5234 struct ltt_ust_channel *uchan, struct ltt_ust_context *uctx)
5235{
5236 int ret = 0;
bec39940
DG
5237 struct lttng_ht_node_str *ua_chan_node;
5238 struct lttng_ht_iter iter, uiter;
55cc08a6
DG
5239 struct ust_app_channel *ua_chan = NULL;
5240 struct ust_app_session *ua_sess;
5241 struct ust_app *app;
5242
0ce9aa93 5243 assert(usess->active);
60235eb8 5244
55cc08a6 5245 rcu_read_lock();
852d0037 5246 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
e0c7ec2b
DG
5247 if (!app->compatible) {
5248 /*
5249 * TODO: In time, we should notice the caller of this error by
5250 * telling him that this is a version error.
5251 */
5252 continue;
5253 }
55cc08a6
DG
5254 ua_sess = lookup_session_by_app(usess, app);
5255 if (ua_sess == NULL) {
5256 continue;
5257 }
5258
d0b96690 5259 pthread_mutex_lock(&ua_sess->lock);
b161602a
MD
5260
5261 if (ua_sess->deleted) {
5262 pthread_mutex_unlock(&ua_sess->lock);
5263 continue;
5264 }
5265
55cc08a6 5266 /* Lookup channel in the ust app session */
bec39940
DG
5267 lttng_ht_lookup(ua_sess->channels, (void *)uchan->name, &uiter);
5268 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
55cc08a6 5269 if (ua_chan_node == NULL) {
d0b96690 5270 goto next_app;
55cc08a6
DG
5271 }
5272 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel,
5273 node);
c9edf082 5274 ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
55cc08a6 5275 if (ret < 0) {
d0b96690 5276 goto next_app;
55cc08a6 5277 }
d0b96690
DG
5278 next_app:
5279 pthread_mutex_unlock(&ua_sess->lock);
55cc08a6
DG
5280 }
5281
55cc08a6 5282 rcu_read_unlock();
76d45b40
DG
5283 return ret;
5284}
7f79d3a1 5285
d0b96690
DG
5286/*
5287 * Receive registration and populate the given msg structure.
5288 *
5289 * On success return 0 else a negative value returned by the ustctl call.
5290 */
5291int ust_app_recv_registration(int sock, struct ust_register_msg *msg)
5292{
5293 int ret;
5294 uint32_t pid, ppid, uid, gid;
5295
5296 assert(msg);
5297
5298 ret = ustctl_recv_reg_msg(sock, &msg->type, &msg->major, &msg->minor,
5299 &pid, &ppid, &uid, &gid,
5300 &msg->bits_per_long,
5301 &msg->uint8_t_alignment,
5302 &msg->uint16_t_alignment,
5303 &msg->uint32_t_alignment,
5304 &msg->uint64_t_alignment,
5305 &msg->long_alignment,
5306 &msg->byte_order,
5307 msg->name);
5308 if (ret < 0) {
5309 switch (-ret) {
5310 case EPIPE:
5311 case ECONNRESET:
5312 case LTTNG_UST_ERR_EXITING:
5313 DBG3("UST app recv reg message failed. Application died");
5314 break;
5315 case LTTNG_UST_ERR_UNSUP_MAJOR:
5316 ERR("UST app recv reg unsupported version %d.%d. Supporting %d.%d",
5317 msg->major, msg->minor, LTTNG_UST_ABI_MAJOR_VERSION,
5318 LTTNG_UST_ABI_MINOR_VERSION);
5319 break;
5320 default:
5321 ERR("UST app recv reg message failed with ret %d", ret);
5322 break;
5323 }
5324 goto error;
5325 }
5326 msg->pid = (pid_t) pid;
5327 msg->ppid = (pid_t) ppid;
5328 msg->uid = (uid_t) uid;
5329 msg->gid = (gid_t) gid;
5330
5331error:
5332 return ret;
5333}
5334
10b56aef
MD
5335/*
5336 * Return a ust app session object using the application object and the
5337 * session object descriptor has a key. If not found, NULL is returned.
5338 * A RCU read side lock MUST be acquired when calling this function.
5339*/
5340static struct ust_app_session *find_session_by_objd(struct ust_app *app,
5341 int objd)
5342{
5343 struct lttng_ht_node_ulong *node;
5344 struct lttng_ht_iter iter;
5345 struct ust_app_session *ua_sess = NULL;
5346
5347 assert(app);
5348
5349 lttng_ht_lookup(app->ust_sessions_objd, (void *)((unsigned long) objd), &iter);
5350 node = lttng_ht_iter_get_node_ulong(&iter);
5351 if (node == NULL) {
5352 DBG2("UST app session find by objd %d not found", objd);
5353 goto error;
5354 }
5355
5356 ua_sess = caa_container_of(node, struct ust_app_session, ust_objd_node);
5357
5358error:
5359 return ua_sess;
5360}
5361
d88aee68
DG
5362/*
5363 * Return a ust app channel object using the application object and the channel
5364 * object descriptor has a key. If not found, NULL is returned. A RCU read side
5365 * lock MUST be acquired before calling this function.
5366 */
d0b96690
DG
5367static struct ust_app_channel *find_channel_by_objd(struct ust_app *app,
5368 int objd)
5369{
5370 struct lttng_ht_node_ulong *node;
5371 struct lttng_ht_iter iter;
5372 struct ust_app_channel *ua_chan = NULL;
5373
5374 assert(app);
5375
5376 lttng_ht_lookup(app->ust_objd, (void *)((unsigned long) objd), &iter);
5377 node = lttng_ht_iter_get_node_ulong(&iter);
5378 if (node == NULL) {
5379 DBG2("UST app channel find by objd %d not found", objd);
5380 goto error;
5381 }
5382
5383 ua_chan = caa_container_of(node, struct ust_app_channel, ust_objd_node);
5384
5385error:
5386 return ua_chan;
5387}
5388
d88aee68
DG
5389/*
5390 * Reply to a register channel notification from an application on the notify
5391 * socket. The channel metadata is also created.
5392 *
5393 * The session UST registry lock is acquired in this function.
5394 *
5395 * On success 0 is returned else a negative value.
5396 */
8eede835 5397static int reply_ust_register_channel(int sock, int cobjd,
d0b96690
DG
5398 size_t nr_fields, struct ustctl_field *fields)
5399{
5400 int ret, ret_code = 0;
dcdd8e41 5401 uint32_t chan_id;
7972aab2 5402 uint64_t chan_reg_key;
d0b96690
DG
5403 enum ustctl_channel_header type;
5404 struct ust_app *app;
5405 struct ust_app_channel *ua_chan;
5406 struct ust_app_session *ua_sess;
7972aab2 5407 struct ust_registry_session *registry;
45893984 5408 struct ust_registry_channel *chan_reg;
d0b96690
DG
5409
5410 rcu_read_lock();
5411
5412 /* Lookup application. If not found, there is a code flow error. */
5413 app = find_app_by_notify_sock(sock);
d88aee68 5414 if (!app) {
fad1ed2f 5415 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
5416 sock);
5417 ret = 0;
5418 goto error_rcu_unlock;
5419 }
d0b96690 5420
4950b860 5421 /* Lookup channel by UST object descriptor. */
d0b96690 5422 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 5423 if (!ua_chan) {
fad1ed2f 5424 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
5425 ret = 0;
5426 goto error_rcu_unlock;
5427 }
5428
d0b96690
DG
5429 assert(ua_chan->session);
5430 ua_sess = ua_chan->session;
d0b96690 5431
7972aab2
DG
5432 /* Get right session registry depending on the session buffer type. */
5433 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5434 if (!registry) {
5435 DBG("Application session is being torn down. Abort event notify");
5436 ret = 0;
5437 goto error_rcu_unlock;
5438 };
45893984 5439
7972aab2
DG
5440 /* Depending on the buffer type, a different channel key is used. */
5441 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5442 chan_reg_key = ua_chan->tracing_channel_id;
d0b96690 5443 } else {
7972aab2 5444 chan_reg_key = ua_chan->key;
d0b96690
DG
5445 }
5446
7972aab2
DG
5447 pthread_mutex_lock(&registry->lock);
5448
5449 chan_reg = ust_registry_channel_find(registry, chan_reg_key);
5450 assert(chan_reg);
5451
5452 if (!chan_reg->register_done) {
dcdd8e41
MD
5453 /*
5454 * TODO: eventually use the registry event count for
5455 * this channel to better guess header type for per-pid
5456 * buffers.
5457 */
5458 type = USTCTL_CHANNEL_HEADER_LARGE;
7972aab2
DG
5459 chan_reg->nr_ctx_fields = nr_fields;
5460 chan_reg->ctx_fields = fields;
fad1ed2f 5461 fields = NULL;
7972aab2 5462 chan_reg->header_type = type;
d0b96690 5463 } else {
7972aab2
DG
5464 /* Get current already assigned values. */
5465 type = chan_reg->header_type;
d0b96690 5466 }
7972aab2
DG
5467 /* Channel id is set during the object creation. */
5468 chan_id = chan_reg->chan_id;
d0b96690
DG
5469
5470 /* Append to metadata */
7972aab2
DG
5471 if (!chan_reg->metadata_dumped) {
5472 ret_code = ust_metadata_channel_statedump(registry, chan_reg);
d0b96690
DG
5473 if (ret_code) {
5474 ERR("Error appending channel metadata (errno = %d)", ret_code);
5475 goto reply;
5476 }
5477 }
5478
5479reply:
7972aab2
DG
5480 DBG3("UST app replying to register channel key %" PRIu64
5481 " with id %u, type: %d, ret: %d", chan_reg_key, chan_id, type,
5482 ret_code);
d0b96690
DG
5483
5484 ret = ustctl_reply_register_channel(sock, chan_id, type, ret_code);
5485 if (ret < 0) {
5486 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5487 ERR("UST app reply channel failed with ret %d", ret);
5488 } else {
5489 DBG3("UST app reply channel failed. Application died");
5490 }
5491 goto error;
5492 }
5493
7972aab2
DG
5494 /* This channel registry registration is completed. */
5495 chan_reg->register_done = 1;
5496
d0b96690 5497error:
7972aab2 5498 pthread_mutex_unlock(&registry->lock);
d88aee68 5499error_rcu_unlock:
d0b96690 5500 rcu_read_unlock();
fad1ed2f 5501 free(fields);
d0b96690
DG
5502 return ret;
5503}
5504
d88aee68
DG
5505/*
5506 * Add event to the UST channel registry. When the event is added to the
5507 * registry, the metadata is also created. Once done, this replies to the
5508 * application with the appropriate error code.
5509 *
5510 * The session UST registry lock is acquired in the function.
5511 *
5512 * On success 0 is returned else a negative value.
5513 */
d0b96690 5514static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
2106efa0
PP
5515 char *sig, size_t nr_fields, struct ustctl_field *fields,
5516 int loglevel_value, char *model_emf_uri)
d0b96690
DG
5517{
5518 int ret, ret_code;
5519 uint32_t event_id = 0;
7972aab2 5520 uint64_t chan_reg_key;
d0b96690
DG
5521 struct ust_app *app;
5522 struct ust_app_channel *ua_chan;
5523 struct ust_app_session *ua_sess;
7972aab2 5524 struct ust_registry_session *registry;
d0b96690
DG
5525
5526 rcu_read_lock();
5527
5528 /* Lookup application. If not found, there is a code flow error. */
5529 app = find_app_by_notify_sock(sock);
d88aee68 5530 if (!app) {
fad1ed2f 5531 DBG("Application socket %d is being torn down. Abort event notify",
d88aee68
DG
5532 sock);
5533 ret = 0;
5534 goto error_rcu_unlock;
5535 }
d0b96690 5536
4950b860 5537 /* Lookup channel by UST object descriptor. */
d0b96690 5538 ua_chan = find_channel_by_objd(app, cobjd);
4950b860 5539 if (!ua_chan) {
fad1ed2f 5540 DBG("Application channel is being torn down. Abort event notify");
4950b860
MD
5541 ret = 0;
5542 goto error_rcu_unlock;
5543 }
5544
d0b96690
DG
5545 assert(ua_chan->session);
5546 ua_sess = ua_chan->session;
5547
7972aab2 5548 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5549 if (!registry) {
5550 DBG("Application session is being torn down. Abort event notify");
5551 ret = 0;
5552 goto error_rcu_unlock;
5553 }
7972aab2
DG
5554
5555 if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
5556 chan_reg_key = ua_chan->tracing_channel_id;
5557 } else {
5558 chan_reg_key = ua_chan->key;
5559 }
5560
5561 pthread_mutex_lock(&registry->lock);
d0b96690 5562
d5d629b5
DG
5563 /*
5564 * From this point on, this call acquires the ownership of the sig, fields
5565 * and model_emf_uri meaning any free are done inside it if needed. These
5566 * three variables MUST NOT be read/write after this.
5567 */
7972aab2 5568 ret_code = ust_registry_create_event(registry, chan_reg_key,
2106efa0
PP
5569 sobjd, cobjd, name, sig, nr_fields, fields,
5570 loglevel_value, model_emf_uri, ua_sess->buffer_type,
5571 &event_id, app);
fad1ed2f
JR
5572 sig = NULL;
5573 fields = NULL;
5574 model_emf_uri = NULL;
d0b96690
DG
5575
5576 /*
5577 * The return value is returned to ustctl so in case of an error, the
5578 * application can be notified. In case of an error, it's important not to
5579 * return a negative error or else the application will get closed.
5580 */
5581 ret = ustctl_reply_register_event(sock, event_id, ret_code);
5582 if (ret < 0) {
5583 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5584 ERR("UST app reply event failed with ret %d", ret);
5585 } else {
5586 DBG3("UST app reply event failed. Application died");
5587 }
5588 /*
5589 * No need to wipe the create event since the application socket will
5590 * get close on error hence cleaning up everything by itself.
5591 */
5592 goto error;
5593 }
5594
7972aab2
DG
5595 DBG3("UST registry event %s with id %" PRId32 " added successfully",
5596 name, event_id);
d88aee68 5597
d0b96690 5598error:
7972aab2 5599 pthread_mutex_unlock(&registry->lock);
d88aee68 5600error_rcu_unlock:
d0b96690 5601 rcu_read_unlock();
fad1ed2f
JR
5602 free(sig);
5603 free(fields);
5604 free(model_emf_uri);
d0b96690
DG
5605 return ret;
5606}
5607
10b56aef
MD
5608/*
5609 * Add enum to the UST session registry. Once done, this replies to the
5610 * application with the appropriate error code.
5611 *
5612 * The session UST registry lock is acquired within this function.
5613 *
5614 * On success 0 is returned else a negative value.
5615 */
5616static int add_enum_ust_registry(int sock, int sobjd, char *name,
5617 struct ustctl_enum_entry *entries, size_t nr_entries)
5618{
5619 int ret = 0, ret_code;
5620 struct ust_app *app;
5621 struct ust_app_session *ua_sess;
5622 struct ust_registry_session *registry;
5623 uint64_t enum_id = -1ULL;
5624
5625 rcu_read_lock();
5626
5627 /* Lookup application. If not found, there is a code flow error. */
5628 app = find_app_by_notify_sock(sock);
5629 if (!app) {
5630 /* Return an error since this is not an error */
5631 DBG("Application socket %d is being torn down. Aborting enum registration",
5632 sock);
5633 free(entries);
5634 goto error_rcu_unlock;
5635 }
5636
5637 /* Lookup session by UST object descriptor. */
5638 ua_sess = find_session_by_objd(app, sobjd);
5639 if (!ua_sess) {
5640 /* Return an error since this is not an error */
fad1ed2f 5641 DBG("Application session is being torn down (session not found). Aborting enum registration.");
10b56aef
MD
5642 free(entries);
5643 goto error_rcu_unlock;
5644 }
5645
5646 registry = get_session_registry(ua_sess);
fad1ed2f
JR
5647 if (!registry) {
5648 DBG("Application session is being torn down (registry not found). Aborting enum registration.");
5649 free(entries);
5650 goto error_rcu_unlock;
5651 }
10b56aef
MD
5652
5653 pthread_mutex_lock(&registry->lock);
5654
5655 /*
5656 * From this point on, the callee acquires the ownership of
5657 * entries. The variable entries MUST NOT be read/written after
5658 * call.
5659 */
5660 ret_code = ust_registry_create_or_find_enum(registry, sobjd, name,
5661 entries, nr_entries, &enum_id);
5662 entries = NULL;
5663
5664 /*
5665 * The return value is returned to ustctl so in case of an error, the
5666 * application can be notified. In case of an error, it's important not to
5667 * return a negative error or else the application will get closed.
5668 */
5669 ret = ustctl_reply_register_enum(sock, enum_id, ret_code);
5670 if (ret < 0) {
5671 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5672 ERR("UST app reply enum failed with ret %d", ret);
5673 } else {
5674 DBG3("UST app reply enum failed. Application died");
5675 }
5676 /*
5677 * No need to wipe the create enum since the application socket will
5678 * get close on error hence cleaning up everything by itself.
5679 */
5680 goto error;
5681 }
5682
5683 DBG3("UST registry enum %s added successfully or already found", name);
5684
5685error:
5686 pthread_mutex_unlock(&registry->lock);
5687error_rcu_unlock:
5688 rcu_read_unlock();
5689 return ret;
5690}
5691
d88aee68
DG
5692/*
5693 * Handle application notification through the given notify socket.
5694 *
5695 * Return 0 on success or else a negative value.
5696 */
d0b96690
DG
5697int ust_app_recv_notify(int sock)
5698{
5699 int ret;
5700 enum ustctl_notify_cmd cmd;
5701
5702 DBG3("UST app receiving notify from sock %d", sock);
5703
5704 ret = ustctl_recv_notify(sock, &cmd);
5705 if (ret < 0) {
5706 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5707 ERR("UST app recv notify failed with ret %d", ret);
5708 } else {
5709 DBG3("UST app recv notify failed. Application died");
5710 }
5711 goto error;
5712 }
5713
5714 switch (cmd) {
5715 case USTCTL_NOTIFY_CMD_EVENT:
5716 {
2106efa0 5717 int sobjd, cobjd, loglevel_value;
d0b96690
DG
5718 char name[LTTNG_UST_SYM_NAME_LEN], *sig, *model_emf_uri;
5719 size_t nr_fields;
5720 struct ustctl_field *fields;
5721
5722 DBG2("UST app ustctl register event received");
5723
2106efa0
PP
5724 ret = ustctl_recv_register_event(sock, &sobjd, &cobjd, name,
5725 &loglevel_value, &sig, &nr_fields, &fields,
5726 &model_emf_uri);
d0b96690
DG
5727 if (ret < 0) {
5728 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5729 ERR("UST app recv event failed with ret %d", ret);
5730 } else {
5731 DBG3("UST app recv event failed. Application died");
5732 }
5733 goto error;
5734 }
5735
d5d629b5
DG
5736 /*
5737 * Add event to the UST registry coming from the notify socket. This
5738 * call will free if needed the sig, fields and model_emf_uri. This
5739 * code path loses the ownsership of these variables and transfer them
5740 * to the this function.
5741 */
d0b96690 5742 ret = add_event_ust_registry(sock, sobjd, cobjd, name, sig, nr_fields,
2106efa0 5743 fields, loglevel_value, model_emf_uri);
d0b96690
DG
5744 if (ret < 0) {
5745 goto error;
5746 }
5747
5748 break;
5749 }
5750 case USTCTL_NOTIFY_CMD_CHANNEL:
5751 {
5752 int sobjd, cobjd;
5753 size_t nr_fields;
5754 struct ustctl_field *fields;
5755
5756 DBG2("UST app ustctl register channel received");
5757
5758 ret = ustctl_recv_register_channel(sock, &sobjd, &cobjd, &nr_fields,
5759 &fields);
5760 if (ret < 0) {
5761 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5762 ERR("UST app recv channel failed with ret %d", ret);
5763 } else {
5764 DBG3("UST app recv channel failed. Application died");
5765 }
5766 goto error;
5767 }
5768
d5d629b5
DG
5769 /*
5770 * The fields ownership are transfered to this function call meaning
5771 * that if needed it will be freed. After this, it's invalid to access
5772 * fields or clean it up.
5773 */
8eede835 5774 ret = reply_ust_register_channel(sock, cobjd, nr_fields,
d0b96690
DG
5775 fields);
5776 if (ret < 0) {
5777 goto error;
5778 }
5779
5780 break;
5781 }
10b56aef
MD
5782 case USTCTL_NOTIFY_CMD_ENUM:
5783 {
5784 int sobjd;
5785 char name[LTTNG_UST_SYM_NAME_LEN];
5786 size_t nr_entries;
5787 struct ustctl_enum_entry *entries;
5788
5789 DBG2("UST app ustctl register enum received");
5790
5791 ret = ustctl_recv_register_enum(sock, &sobjd, name,
5792 &entries, &nr_entries);
5793 if (ret < 0) {
5794 if (ret != -EPIPE && ret != -LTTNG_UST_ERR_EXITING) {
5795 ERR("UST app recv enum failed with ret %d", ret);
5796 } else {
5797 DBG3("UST app recv enum failed. Application died");
5798 }
5799 goto error;
5800 }
5801
5802 /* Callee assumes ownership of entries */
5803 ret = add_enum_ust_registry(sock, sobjd, name,
5804 entries, nr_entries);
5805 if (ret < 0) {
5806 goto error;
5807 }
5808
5809 break;
5810 }
d0b96690
DG
5811 default:
5812 /* Should NEVER happen. */
5813 assert(0);
5814 }
5815
5816error:
5817 return ret;
5818}
d88aee68
DG
5819
5820/*
5821 * Once the notify socket hangs up, this is called. First, it tries to find the
5822 * corresponding application. On failure, the call_rcu to close the socket is
5823 * executed. If an application is found, it tries to delete it from the notify
5824 * socket hash table. Whathever the result, it proceeds to the call_rcu.
5825 *
5826 * Note that an object needs to be allocated here so on ENOMEM failure, the
5827 * call RCU is not done but the rest of the cleanup is.
5828 */
5829void ust_app_notify_sock_unregister(int sock)
5830{
5831 int err_enomem = 0;
5832 struct lttng_ht_iter iter;
5833 struct ust_app *app;
5834 struct ust_app_notify_sock_obj *obj;
5835
5836 assert(sock >= 0);
5837
5838 rcu_read_lock();
5839
5840 obj = zmalloc(sizeof(*obj));
5841 if (!obj) {
5842 /*
5843 * An ENOMEM is kind of uncool. If this strikes we continue the
5844 * procedure but the call_rcu will not be called. In this case, we
5845 * accept the fd leak rather than possibly creating an unsynchronized
5846 * state between threads.
5847 *
5848 * TODO: The notify object should be created once the notify socket is
5849 * registered and stored independantely from the ust app object. The
5850 * tricky part is to synchronize the teardown of the application and
5851 * this notify object. Let's keep that in mind so we can avoid this
5852 * kind of shenanigans with ENOMEM in the teardown path.
5853 */
5854 err_enomem = 1;
5855 } else {
5856 obj->fd = sock;
5857 }
5858
5859 DBG("UST app notify socket unregister %d", sock);
5860
5861 /*
5862 * Lookup application by notify socket. If this fails, this means that the
5863 * hash table delete has already been done by the application
5864 * unregistration process so we can safely close the notify socket in a
5865 * call RCU.
5866 */
5867 app = find_app_by_notify_sock(sock);
5868 if (!app) {
5869 goto close_socket;
5870 }
5871
5872 iter.iter.node = &app->notify_sock_n.node;
5873
5874 /*
5875 * Whatever happens here either we fail or succeed, in both cases we have
5876 * to close the socket after a grace period to continue to the call RCU
5877 * here. If the deletion is successful, the application is not visible
5878 * anymore by other threads and is it fails it means that it was already
5879 * deleted from the hash table so either way we just have to close the
5880 * socket.
5881 */
5882 (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
5883
5884close_socket:
5885 rcu_read_unlock();
5886
5887 /*
5888 * Close socket after a grace period to avoid for the socket to be reused
5889 * before the application object is freed creating potential race between
5890 * threads trying to add unique in the global hash table.
5891 */
5892 if (!err_enomem) {
5893 call_rcu(&obj->head, close_notify_sock_rcu);
5894 }
5895}
f45e313d
DG
5896
5897/*
5898 * Destroy a ust app data structure and free its memory.
5899 */
5900void ust_app_destroy(struct ust_app *app)
5901{
5902 if (!app) {
5903 return;
5904 }
5905
5906 call_rcu(&app->pid_n.head, delete_ust_app_rcu);
5907}
6dc3064a
DG
5908
5909/*
5910 * Take a snapshot for a given UST session. The snapshot is sent to the given
5911 * output.
5912 *
8f07cd01 5913 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
6dc3064a 5914 */
65ff8ea3
JG
5915enum lttng_error_code ust_app_snapshot_record(
5916 const struct ltt_ust_session *usess,
61ace1d3 5917 const struct consumer_output *output, int wait,
d07ceecd 5918 uint64_t nb_packets_per_stream)
6dc3064a
DG
5919{
5920 int ret = 0;
8f07cd01 5921 enum lttng_error_code status = LTTNG_OK;
6dc3064a
DG
5922 struct lttng_ht_iter iter;
5923 struct ust_app *app;
9cb4b50c 5924 char *trace_path = NULL;
6dc3064a
DG
5925
5926 assert(usess);
5927 assert(output);
5928
5929 rcu_read_lock();
5930
8c924c7b
MD
5931 switch (usess->buffer_type) {
5932 case LTTNG_BUFFER_PER_UID:
5933 {
5934 struct buffer_reg_uid *reg;
6dc3064a 5935
8c924c7b
MD
5936 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
5937 struct buffer_reg_channel *reg_chan;
5938 struct consumer_socket *socket;
1819b04a 5939 char pathname[PATH_MAX];
6dc3064a 5940
f6567e8d
JR
5941 if (!reg->registry->reg.ust->metadata_key) {
5942 /* Skip since no metadata is present */
5943 continue;
5944 }
5945
8c924c7b
MD
5946 /* Get consumer socket to use to push the metadata.*/
5947 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
5948 usess->consumer);
5949 if (!socket) {
8f07cd01 5950 status = LTTNG_ERR_INVALID;
8c924c7b
MD
5951 goto error;
5952 }
6dc3064a 5953
8c924c7b 5954 memset(pathname, 0, sizeof(pathname));
e5148e25
JG
5955 /*
5956 * DEFAULT_UST_TRACE_UID_PATH already contains a path
5957 * separator.
5958 */
8c924c7b 5959 ret = snprintf(pathname, sizeof(pathname),
e5148e25 5960 DEFAULT_UST_TRACE_DIR DEFAULT_UST_TRACE_UID_PATH,
8c924c7b
MD
5961 reg->uid, reg->bits_per_long);
5962 if (ret < 0) {
5963 PERROR("snprintf snapshot path");
8f07cd01 5964 status = LTTNG_ERR_INVALID;
8c924c7b
MD
5965 goto error;
5966 }
9cb4b50c
JG
5967 /* Free path allowed on previous iteration. */
5968 free(trace_path);
1819b04a
MD
5969 trace_path = setup_channel_trace_path(usess->consumer, pathname);
5970 if (!trace_path) {
5971 status = LTTNG_ERR_INVALID;
5972 goto error;
5973 }
e5148e25 5974 /* Add the UST default trace dir to path. */
8c924c7b
MD
5975 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
5976 reg_chan, node.node) {
8f07cd01 5977 status = consumer_snapshot_channel(socket,
e098433c
JG
5978 reg_chan->consumer_key,
5979 output, 0, usess->uid,
1819b04a 5980 usess->gid, trace_path, wait,
e5148e25 5981 nb_packets_per_stream);
8f07cd01 5982 if (status != LTTNG_OK) {
8c924c7b
MD
5983 goto error;
5984 }
5985 }
8f07cd01 5986 status = consumer_snapshot_channel(socket,
68808f4e 5987 reg->registry->reg.ust->metadata_key, output, 1,
1819b04a 5988 usess->uid, usess->gid, trace_path, wait, 0);
8f07cd01 5989 if (status != LTTNG_OK) {
8c924c7b
MD
5990 goto error;
5991 }
af706bb7 5992 }
8c924c7b
MD
5993 break;
5994 }
5995 case LTTNG_BUFFER_PER_PID:
5996 {
5997 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
5998 struct consumer_socket *socket;
5999 struct lttng_ht_iter chan_iter;
6000 struct ust_app_channel *ua_chan;
6001 struct ust_app_session *ua_sess;
6002 struct ust_registry_session *registry;
1819b04a 6003 char pathname[PATH_MAX];
8c924c7b
MD
6004
6005 ua_sess = lookup_session_by_app(usess, app);
6006 if (!ua_sess) {
6007 /* Session not associated with this app. */
6008 continue;
6009 }
af706bb7 6010
8c924c7b
MD
6011 /* Get the right consumer socket for the application. */
6012 socket = consumer_find_socket_by_bitness(app->bits_per_long,
61ace1d3 6013 output);
8c924c7b 6014 if (!socket) {
8f07cd01 6015 status = LTTNG_ERR_INVALID;
5c786ded
JD
6016 goto error;
6017 }
6018
8c924c7b
MD
6019 /* Add the UST default trace dir to path. */
6020 memset(pathname, 0, sizeof(pathname));
e5148e25 6021 ret = snprintf(pathname, sizeof(pathname), DEFAULT_UST_TRACE_DIR "%s",
8c924c7b 6022 ua_sess->path);
6dc3064a 6023 if (ret < 0) {
8f07cd01 6024 status = LTTNG_ERR_INVALID;
8c924c7b 6025 PERROR("snprintf snapshot path");
6dc3064a
DG
6026 goto error;
6027 }
9cb4b50c
JG
6028 /* Free path allowed on previous iteration. */
6029 free(trace_path);
1819b04a
MD
6030 trace_path = setup_channel_trace_path(usess->consumer, pathname);
6031 if (!trace_path) {
6032 status = LTTNG_ERR_INVALID;
6033 goto error;
6034 }
e5148e25 6035 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
8c924c7b 6036 ua_chan, node.node) {
8f07cd01 6037 status = consumer_snapshot_channel(socket,
c51311d6
JG
6038 ua_chan->key, output, 0,
6039 ua_sess->effective_credentials
6040 .uid,
6041 ua_sess->effective_credentials
6042 .gid,
1819b04a 6043 trace_path, wait,
e5148e25 6044 nb_packets_per_stream);
8f07cd01
JG
6045 switch (status) {
6046 case LTTNG_OK:
6047 break;
6048 case LTTNG_ERR_CHAN_NOT_FOUND:
6049 continue;
6050 default:
8c924c7b
MD
6051 goto error;
6052 }
6053 }
6054
6055 registry = get_session_registry(ua_sess);
fad1ed2f 6056 if (!registry) {
d5f19c28
MD
6057 DBG("Application session is being torn down. Skip application.");
6058 continue;
fad1ed2f 6059 }
8f07cd01 6060 status = consumer_snapshot_channel(socket,
c51311d6
JG
6061 registry->metadata_key, output, 1,
6062 ua_sess->effective_credentials.uid,
6063 ua_sess->effective_credentials.gid,
1819b04a 6064 trace_path, wait, 0);
8f07cd01
JG
6065 switch (status) {
6066 case LTTNG_OK:
6067 break;
6068 case LTTNG_ERR_CHAN_NOT_FOUND:
6069 continue;
6070 default:
8c924c7b
MD
6071 goto error;
6072 }
6073 }
6074 break;
6075 }
6076 default:
6077 assert(0);
6078 break;
6dc3064a
DG
6079 }
6080
6081error:
9cb4b50c 6082 free(trace_path);
6dc3064a 6083 rcu_read_unlock();
8f07cd01 6084 return status;
6dc3064a 6085}
5c786ded
JD
6086
6087/*
d07ceecd 6088 * Return the size taken by one more packet per stream.
5c786ded 6089 */
65ff8ea3
JG
6090uint64_t ust_app_get_size_one_more_packet_per_stream(
6091 const struct ltt_ust_session *usess, uint64_t cur_nr_packets)
5c786ded 6092{
d07ceecd 6093 uint64_t tot_size = 0;
5c786ded
JD
6094 struct ust_app *app;
6095 struct lttng_ht_iter iter;
6096
6097 assert(usess);
6098
6099 switch (usess->buffer_type) {
6100 case LTTNG_BUFFER_PER_UID:
6101 {
6102 struct buffer_reg_uid *reg;
6103
6104 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6105 struct buffer_reg_channel *reg_chan;
6106
b7064eaa 6107 rcu_read_lock();
5c786ded
JD
6108 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6109 reg_chan, node.node) {
d07ceecd
MD
6110 if (cur_nr_packets >= reg_chan->num_subbuf) {
6111 /*
6112 * Don't take channel into account if we
6113 * already grab all its packets.
6114 */
6115 continue;
6116 }
6117 tot_size += reg_chan->subbuf_size * reg_chan->stream_count;
5c786ded 6118 }
b7064eaa 6119 rcu_read_unlock();
5c786ded
JD
6120 }
6121 break;
6122 }
6123 case LTTNG_BUFFER_PER_PID:
6124 {
6125 rcu_read_lock();
6126 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6127 struct ust_app_channel *ua_chan;
6128 struct ust_app_session *ua_sess;
6129 struct lttng_ht_iter chan_iter;
6130
6131 ua_sess = lookup_session_by_app(usess, app);
6132 if (!ua_sess) {
6133 /* Session not associated with this app. */
6134 continue;
6135 }
6136
6137 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6138 ua_chan, node.node) {
d07ceecd
MD
6139 if (cur_nr_packets >= ua_chan->attr.num_subbuf) {
6140 /*
6141 * Don't take channel into account if we
6142 * already grab all its packets.
6143 */
6144 continue;
6145 }
6146 tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
5c786ded
JD
6147 }
6148 }
6149 rcu_read_unlock();
6150 break;
6151 }
6152 default:
6153 assert(0);
6154 break;
6155 }
6156
d07ceecd 6157 return tot_size;
5c786ded 6158}
fb83fe64
JD
6159
6160int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
6161 struct cds_list_head *buffer_reg_uid_list,
6162 struct consumer_output *consumer, uint64_t uchan_id,
6163 int overwrite, uint64_t *discarded, uint64_t *lost)
6164{
6165 int ret;
6166 uint64_t consumer_chan_key;
6167
70dd8162
MD
6168 *discarded = 0;
6169 *lost = 0;
6170
fb83fe64 6171 ret = buffer_reg_uid_consumer_channel_key(
76604852 6172 buffer_reg_uid_list, uchan_id, &consumer_chan_key);
fb83fe64 6173 if (ret < 0) {
70dd8162
MD
6174 /* Not found */
6175 ret = 0;
fb83fe64
JD
6176 goto end;
6177 }
6178
6179 if (overwrite) {
6180 ret = consumer_get_lost_packets(ust_session_id,
6181 consumer_chan_key, consumer, lost);
6182 } else {
6183 ret = consumer_get_discarded_events(ust_session_id,
6184 consumer_chan_key, consumer, discarded);
6185 }
6186
6187end:
6188 return ret;
6189}
6190
6191int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
6192 struct ltt_ust_channel *uchan,
6193 struct consumer_output *consumer, int overwrite,
6194 uint64_t *discarded, uint64_t *lost)
6195{
6196 int ret = 0;
6197 struct lttng_ht_iter iter;
6198 struct lttng_ht_node_str *ua_chan_node;
6199 struct ust_app *app;
6200 struct ust_app_session *ua_sess;
6201 struct ust_app_channel *ua_chan;
6202
70dd8162
MD
6203 *discarded = 0;
6204 *lost = 0;
6205
fb83fe64
JD
6206 rcu_read_lock();
6207 /*
70dd8162
MD
6208 * Iterate over every registered applications. Sum counters for
6209 * all applications containing requested session and channel.
fb83fe64
JD
6210 */
6211 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6212 struct lttng_ht_iter uiter;
6213
6214 ua_sess = lookup_session_by_app(usess, app);
6215 if (ua_sess == NULL) {
6216 continue;
6217 }
6218
6219 /* Get channel */
ee022399 6220 lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
fb83fe64
JD
6221 ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
6222 /* If the session is found for the app, the channel must be there */
6223 assert(ua_chan_node);
6224
6225 ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
6226
6227 if (overwrite) {
70dd8162
MD
6228 uint64_t _lost;
6229
fb83fe64 6230 ret = consumer_get_lost_packets(usess->id, ua_chan->key,
70dd8162
MD
6231 consumer, &_lost);
6232 if (ret < 0) {
6233 break;
6234 }
6235 (*lost) += _lost;
fb83fe64 6236 } else {
70dd8162
MD
6237 uint64_t _discarded;
6238
fb83fe64 6239 ret = consumer_get_discarded_events(usess->id,
70dd8162
MD
6240 ua_chan->key, consumer, &_discarded);
6241 if (ret < 0) {
6242 break;
6243 }
6244 (*discarded) += _discarded;
fb83fe64 6245 }
fb83fe64
JD
6246 }
6247
fb83fe64
JD
6248 rcu_read_unlock();
6249 return ret;
6250}
c2561365
JD
6251
6252static
6253int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
6254 struct ust_app *app)
6255{
6256 int ret = 0;
6257 struct ust_app_session *ua_sess;
6258
6259 DBG("Regenerating the metadata for ust app pid %d", app->pid);
6260
6261 rcu_read_lock();
6262
6263 ua_sess = lookup_session_by_app(usess, app);
6264 if (ua_sess == NULL) {
6265 /* The session is in teardown process. Ignore and continue. */
6266 goto end;
6267 }
6268
6269 pthread_mutex_lock(&ua_sess->lock);
6270
6271 if (ua_sess->deleted) {
6272 goto end_unlock;
6273 }
6274
6275 pthread_mutex_lock(&app->sock_lock);
6276 ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
6277 pthread_mutex_unlock(&app->sock_lock);
6278
6279end_unlock:
6280 pthread_mutex_unlock(&ua_sess->lock);
6281
6282end:
6283 rcu_read_unlock();
6284 health_code_update();
6285 return ret;
6286}
6287
6288/*
6289 * Regenerate the statedump for each app in the session.
6290 */
6291int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
6292{
6293 int ret = 0;
6294 struct lttng_ht_iter iter;
6295 struct ust_app *app;
6296
6297 DBG("Regenerating the metadata for all UST apps");
6298
6299 rcu_read_lock();
6300
6301 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6302 if (!app->compatible) {
6303 continue;
6304 }
6305
6306 ret = ust_app_regenerate_statedump(usess, app);
6307 if (ret < 0) {
6308 /* Continue to the next app even on error */
6309 continue;
6310 }
6311 }
6312
6313 rcu_read_unlock();
6314
6315 return 0;
6316}
5c408ad8
JD
6317
6318/*
6319 * Rotate all the channels of a session.
6320 *
fbb8cd76 6321 * Return LTTNG_OK on success or else an LTTng error code.
5c408ad8 6322 */
fbb8cd76 6323enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
5c408ad8 6324{
fbb8cd76
MD
6325 int ret;
6326 enum lttng_error_code cmd_ret = LTTNG_OK;
5c408ad8
JD
6327 struct lttng_ht_iter iter;
6328 struct ust_app *app;
6329 struct ltt_ust_session *usess = session->ust_session;
5c408ad8
JD
6330
6331 assert(usess);
6332
6333 rcu_read_lock();
6334
6335 switch (usess->buffer_type) {
6336 case LTTNG_BUFFER_PER_UID:
6337 {
6338 struct buffer_reg_uid *reg;
6339
6340 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6341 struct buffer_reg_channel *reg_chan;
6342 struct consumer_socket *socket;
6343
6344 /* Get consumer socket to use to push the metadata.*/
6345 socket = consumer_find_socket_by_bitness(reg->bits_per_long,
6346 usess->consumer);
6347 if (!socket) {
fbb8cd76 6348 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6349 goto error;
6350 }
6351
5c408ad8
JD
6352 /* Rotate the data channels. */
6353 cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
6354 reg_chan, node.node) {
5c408ad8
JD
6355 ret = consumer_rotate_channel(socket,
6356 reg_chan->consumer_key,
6357 usess->uid, usess->gid,
e5148e25
JG
6358 usess->consumer,
6359 /* is_metadata_channel */ false);
5c408ad8 6360 if (ret < 0) {
fbb8cd76 6361 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6362 goto error;
6363 }
6364 }
6365
e1ac6bb9
JR
6366 /*
6367 * The metadata channel might not be present.
6368 *
6369 * Consumer stream allocation can be done
6370 * asynchronously and can fail on intermediary
6371 * operations (i.e add context) and lead to data
6372 * channels created with no metadata channel.
6373 */
6374 if (!reg->registry->reg.ust->metadata_key) {
6375 /* Skip since no metadata is present. */
6376 continue;
6377 }
6378
5c408ad8
JD
6379 (void) push_metadata(reg->registry->reg.ust, usess->consumer);
6380
6381 ret = consumer_rotate_channel(socket,
6382 reg->registry->reg.ust->metadata_key,
6383 usess->uid, usess->gid,
e5148e25
JG
6384 usess->consumer,
6385 /* is_metadata_channel */ true);
5c408ad8 6386 if (ret < 0) {
fbb8cd76 6387 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6388 goto error;
6389 }
5c408ad8
JD
6390 }
6391 break;
6392 }
6393 case LTTNG_BUFFER_PER_PID:
6394 {
6395 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
6396 struct consumer_socket *socket;
6397 struct lttng_ht_iter chan_iter;
6398 struct ust_app_channel *ua_chan;
6399 struct ust_app_session *ua_sess;
6400 struct ust_registry_session *registry;
6401
6402 ua_sess = lookup_session_by_app(usess, app);
6403 if (!ua_sess) {
6404 /* Session not associated with this app. */
6405 continue;
6406 }
5c408ad8
JD
6407
6408 /* Get the right consumer socket for the application. */
6409 socket = consumer_find_socket_by_bitness(app->bits_per_long,
6410 usess->consumer);
6411 if (!socket) {
fbb8cd76 6412 cmd_ret = LTTNG_ERR_INVALID;
5c408ad8
JD
6413 goto error;
6414 }
6415
6416 registry = get_session_registry(ua_sess);
6417 if (!registry) {
fbb8cd76
MD
6418 DBG("Application session is being torn down. Skip application.");
6419 continue;
5c408ad8
JD
6420 }
6421
5c408ad8
JD
6422 /* Rotate the data channels. */
6423 cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
6424 ua_chan, node.node) {
c51311d6
JG
6425 ret = consumer_rotate_channel(socket,
6426 ua_chan->key,
6427 ua_sess->effective_credentials
6428 .uid,
6429 ua_sess->effective_credentials
6430 .gid,
e5148e25
JG
6431 ua_sess->consumer,
6432 /* is_metadata_channel */ false);
5c408ad8 6433 if (ret < 0) {
fbb8cd76
MD
6434 /* Per-PID buffer and application going away. */
6435 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
6436 continue;
6437 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6438 goto error;
6439 }
6440 }
6441
6442 /* Rotate the metadata channel. */
6443 (void) push_metadata(registry, usess->consumer);
c51311d6
JG
6444 ret = consumer_rotate_channel(socket,
6445 registry->metadata_key,
6446 ua_sess->effective_credentials.uid,
6447 ua_sess->effective_credentials.gid,
e5148e25
JG
6448 ua_sess->consumer,
6449 /* is_metadata_channel */ true);
5c408ad8 6450 if (ret < 0) {
fbb8cd76
MD
6451 /* Per-PID buffer and application going away. */
6452 if (ret == -LTTNG_ERR_CHAN_NOT_FOUND)
6453 continue;
6454 cmd_ret = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
5c408ad8
JD
6455 goto error;
6456 }
5c408ad8
JD
6457 }
6458 break;
6459 }
6460 default:
6461 assert(0);
6462 break;
6463 }
6464
fbb8cd76 6465 cmd_ret = LTTNG_OK;
5c408ad8
JD
6466
6467error:
6468 rcu_read_unlock();
fbb8cd76 6469 return cmd_ret;
5c408ad8 6470}
e5148e25
JG
6471
6472enum lttng_error_code ust_app_create_channel_subdirectories(
6473 const struct ltt_ust_session *usess)
6474{
6475 enum lttng_error_code ret = LTTNG_OK;
6476 struct lttng_ht_iter iter;
6477 enum lttng_trace_chunk_status chunk_status;
6478 char *pathname_index;
6479 int fmt_ret;
6480
6481 assert(usess->current_trace_chunk);
6482 rcu_read_lock();
6483
6484 switch (usess->buffer_type) {
6485 case LTTNG_BUFFER_PER_UID:
6486 {
6487 struct buffer_reg_uid *reg;
6488
6489 cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
6490 fmt_ret = asprintf(&pathname_index,
6491 DEFAULT_UST_TRACE_DIR DEFAULT_UST_TRACE_UID_PATH "/" DEFAULT_INDEX_DIR,
6492 reg->uid, reg->bits_per_long);
6493 if (fmt_ret < 0) {
6494 ERR("Failed to format channel index directory");
6495 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6496 goto error;
6497 }
6498
6499 /*
6500 * Create the index subdirectory which will take care
6501 * of implicitly creating the channel's path.
6502 */
6503 chunk_status = lttng_trace_chunk_create_subdirectory(
6504 usess->current_trace_chunk,
6505 pathname_index);
6506 free(pathname_index);
6507 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6508 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6509 goto error;
6510 }
6511 }
6512 break;
6513 }
6514 case LTTNG_BUFFER_PER_PID:
6515 {
6516 struct ust_app *app;
6517
8366341d
MD
6518 /*
6519 * Create the toplevel ust/ directory in case no apps are running.
6520 */
6521 chunk_status = lttng_trace_chunk_create_subdirectory(
6522 usess->current_trace_chunk,
6523 DEFAULT_UST_TRACE_DIR);
6524 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6525 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6526 goto error;
6527 }
6528
e5148e25
JG
6529 cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
6530 pid_n.node) {
6531 struct ust_app_session *ua_sess;
6532 struct ust_registry_session *registry;
6533
6534 ua_sess = lookup_session_by_app(usess, app);
6535 if (!ua_sess) {
6536 /* Session not associated with this app. */
6537 continue;
6538 }
6539
6540 registry = get_session_registry(ua_sess);
6541 if (!registry) {
6542 DBG("Application session is being torn down. Skip application.");
6543 continue;
6544 }
6545
6546 fmt_ret = asprintf(&pathname_index,
6547 DEFAULT_UST_TRACE_DIR "%s/" DEFAULT_INDEX_DIR,
6548 ua_sess->path);
6549 if (fmt_ret < 0) {
6550 ERR("Failed to format channel index directory");
6551 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6552 goto error;
6553 }
6554 /*
6555 * Create the index subdirectory which will take care
6556 * of implicitly creating the channel's path.
6557 */
6558 chunk_status = lttng_trace_chunk_create_subdirectory(
6559 usess->current_trace_chunk,
6560 pathname_index);
6561 free(pathname_index);
6562 if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) {
6563 ret = LTTNG_ERR_CREATE_DIR_FAIL;
6564 goto error;
6565 }
6566 }
6567 break;
6568 }
6569 default:
6570 abort();
6571 }
6572
6573 ret = LTTNG_OK;
6574error:
6575 rcu_read_unlock();
6576 return ret;
6577}
This page took 0.481959 seconds and 4 git commands to generate.