0933c33c8b77e4dae3179b741fbe2b08c8e38dce
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.cpp
1 /*
2 * Copyright (C) 2011 EfficiOS Inc.
3 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * Copyright (C) 2017 Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * SPDX-License-Identifier: GPL-2.0-only
7 *
8 */
9
10 #define _LGPL_SOURCE
11 #include <lttng/ust-ctl.h>
12 #include <lttng/ust-sigbus.h>
13 #include <poll.h>
14 #include <pthread.h>
15 #include <stdlib.h>
16 #include <string.h>
17 #include <sys/mman.h>
18 #include <sys/socket.h>
19 #include <sys/stat.h>
20 #include <sys/types.h>
21 #include <inttypes.h>
22 #include <unistd.h>
23 #include <urcu/list.h>
24 #include <signal.h>
25 #include <stdbool.h>
26 #include <stdint.h>
27
28 #include <bin/lttng-consumerd/health-consumerd.hpp>
29 #include <common/common.hpp>
30 #include <common/sessiond-comm/sessiond-comm.hpp>
31 #include <common/relayd/relayd.hpp>
32 #include <common/compat/fcntl.hpp>
33 #include <common/compat/endian.hpp>
34 #include <common/consumer/consumer-metadata-cache.hpp>
35 #include <common/consumer/consumer-stream.hpp>
36 #include <common/consumer/consumer-timer.hpp>
37 #include <common/utils.hpp>
38 #include <common/index/index.hpp>
39 #include <common/consumer/consumer.hpp>
40 #include <common/shm.hpp>
41 #include <common/optional.hpp>
42
43 #include "ust-consumer.hpp"
44
45 #define INT_MAX_STR_LEN 12 /* includes \0 */
46
47 extern struct lttng_consumer_global_data the_consumer_data;
48 extern int consumer_poll_timeout;
49
50 LTTNG_EXPORT DEFINE_LTTNG_UST_SIGBUS_STATE();
51
52 /*
53 * Add channel to internal consumer state.
54 *
55 * Returns 0 on success or else a negative value.
56 */
57 static int add_channel(struct lttng_consumer_channel *channel,
58 struct lttng_consumer_local_data *ctx)
59 {
60 int ret = 0;
61
62 LTTNG_ASSERT(channel);
63 LTTNG_ASSERT(ctx);
64
65 if (ctx->on_recv_channel != NULL) {
66 ret = ctx->on_recv_channel(channel);
67 if (ret == 0) {
68 ret = consumer_add_channel(channel, ctx);
69 } else if (ret < 0) {
70 /* Most likely an ENOMEM. */
71 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
72 goto error;
73 }
74 } else {
75 ret = consumer_add_channel(channel, ctx);
76 }
77
78 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
79
80 error:
81 return ret;
82 }
83
84 /*
85 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
86 * error value if applicable is set in it else it is kept untouched.
87 *
88 * Return NULL on error else the newly allocated stream object.
89 */
90 static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
91 struct lttng_consumer_channel *channel,
92 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
93 {
94 int alloc_ret;
95 struct lttng_consumer_stream *stream = NULL;
96
97 LTTNG_ASSERT(channel);
98 LTTNG_ASSERT(ctx);
99
100 stream = consumer_stream_create(
101 channel,
102 channel->key,
103 key,
104 channel->name,
105 channel->relayd_id,
106 channel->session_id,
107 channel->trace_chunk,
108 cpu,
109 &alloc_ret,
110 channel->type,
111 channel->monitor);
112 if (stream == NULL) {
113 switch (alloc_ret) {
114 case -ENOENT:
115 /*
116 * We could not find the channel. Can happen if cpu hotplug
117 * happens while tearing down.
118 */
119 DBG3("Could not find channel");
120 break;
121 case -ENOMEM:
122 case -EINVAL:
123 default:
124 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
125 break;
126 }
127 goto error;
128 }
129
130 consumer_stream_update_channel_attributes(stream, channel);
131
132 error:
133 if (_alloc_ret) {
134 *_alloc_ret = alloc_ret;
135 }
136 return stream;
137 }
138
139 /*
140 * Send the given stream pointer to the corresponding thread.
141 *
142 * Returns 0 on success else a negative value.
143 */
144 static int send_stream_to_thread(struct lttng_consumer_stream *stream,
145 struct lttng_consumer_local_data *ctx)
146 {
147 int ret;
148 struct lttng_pipe *stream_pipe;
149
150 /* Get the right pipe where the stream will be sent. */
151 if (stream->metadata_flag) {
152 consumer_add_metadata_stream(stream);
153 stream_pipe = ctx->consumer_metadata_pipe;
154 } else {
155 consumer_add_data_stream(stream);
156 stream_pipe = ctx->consumer_data_pipe;
157 }
158
159 /*
160 * From this point on, the stream's ownership has been moved away from
161 * the channel and it becomes globally visible. Hence, remove it from
162 * the local stream list to prevent the stream from being both local and
163 * global.
164 */
165 stream->globally_visible = 1;
166 cds_list_del_init(&stream->send_node);
167
168 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
169 if (ret < 0) {
170 ERR("Consumer write %s stream to pipe %d",
171 stream->metadata_flag ? "metadata" : "data",
172 lttng_pipe_get_writefd(stream_pipe));
173 if (stream->metadata_flag) {
174 consumer_del_stream_for_metadata(stream);
175 } else {
176 consumer_del_stream_for_data(stream);
177 }
178 goto error;
179 }
180
181 error:
182 return ret;
183 }
184
185 static
186 int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu)
187 {
188 char cpu_nr[INT_MAX_STR_LEN]; /* int max len */
189 int ret;
190
191 strncpy(stream_shm_path, shm_path, PATH_MAX);
192 stream_shm_path[PATH_MAX - 1] = '\0';
193 ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu);
194 if (ret < 0) {
195 PERROR("snprintf");
196 goto end;
197 }
198 strncat(stream_shm_path, cpu_nr,
199 PATH_MAX - strlen(stream_shm_path) - 1);
200 ret = 0;
201 end:
202 return ret;
203 }
204
205 /*
206 * Create streams for the given channel using liblttng-ust-ctl.
207 * The channel lock must be acquired by the caller.
208 *
209 * Return 0 on success else a negative value.
210 */
211 static int create_ust_streams(struct lttng_consumer_channel *channel,
212 struct lttng_consumer_local_data *ctx)
213 {
214 int ret, cpu = 0;
215 struct lttng_ust_ctl_consumer_stream *ustream;
216 struct lttng_consumer_stream *stream;
217 pthread_mutex_t *current_stream_lock = NULL;
218
219 LTTNG_ASSERT(channel);
220 LTTNG_ASSERT(ctx);
221
222 /*
223 * While a stream is available from ustctl. When NULL is returned, we've
224 * reached the end of the possible stream for the channel.
225 */
226 while ((ustream = lttng_ust_ctl_create_stream(channel->uchan, cpu))) {
227 int wait_fd;
228 int ust_metadata_pipe[2];
229
230 health_code_update();
231
232 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
233 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
234 if (ret < 0) {
235 ERR("Create ust metadata poll pipe");
236 goto error;
237 }
238 wait_fd = ust_metadata_pipe[0];
239 } else {
240 wait_fd = lttng_ust_ctl_stream_get_wait_fd(ustream);
241 }
242
243 /* Allocate consumer stream object. */
244 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
245 if (!stream) {
246 goto error_alloc;
247 }
248 stream->ustream = ustream;
249 /*
250 * Store it so we can save multiple function calls afterwards since
251 * this value is used heavily in the stream threads. This is UST
252 * specific so this is why it's done after allocation.
253 */
254 stream->wait_fd = wait_fd;
255
256 /*
257 * Increment channel refcount since the channel reference has now been
258 * assigned in the allocation process above.
259 */
260 if (stream->chan->monitor) {
261 uatomic_inc(&stream->chan->refcount);
262 }
263
264 pthread_mutex_lock(&stream->lock);
265 current_stream_lock = &stream->lock;
266 /*
267 * Order is important this is why a list is used. On error, the caller
268 * should clean this list.
269 */
270 cds_list_add_tail(&stream->send_node, &channel->streams.head);
271
272 ret = lttng_ust_ctl_get_max_subbuf_size(stream->ustream,
273 &stream->max_sb_size);
274 if (ret < 0) {
275 ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s",
276 stream->name);
277 goto error;
278 }
279
280 /* Do actions once stream has been received. */
281 if (ctx->on_recv_stream) {
282 ret = ctx->on_recv_stream(stream);
283 if (ret < 0) {
284 goto error;
285 }
286 }
287
288 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
289 stream->name, stream->key, stream->relayd_stream_id);
290
291 /* Set next CPU stream. */
292 channel->streams.count = ++cpu;
293
294 /* Keep stream reference when creating metadata. */
295 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
296 channel->metadata_stream = stream;
297 if (channel->monitor) {
298 /* Set metadata poll pipe if we created one */
299 memcpy(stream->ust_metadata_poll_pipe,
300 ust_metadata_pipe,
301 sizeof(ust_metadata_pipe));
302 }
303 }
304 pthread_mutex_unlock(&stream->lock);
305 current_stream_lock = NULL;
306 }
307
308 return 0;
309
310 error:
311 error_alloc:
312 if (current_stream_lock) {
313 pthread_mutex_unlock(current_stream_lock);
314 }
315 return ret;
316 }
317
318 static int open_ust_stream_fd(struct lttng_consumer_channel *channel, int cpu,
319 const struct lttng_credentials *session_credentials)
320 {
321 char shm_path[PATH_MAX];
322 int ret;
323
324 if (!channel->shm_path[0]) {
325 return shm_create_anonymous("ust-consumer");
326 }
327 ret = get_stream_shm_path(shm_path, channel->shm_path, cpu);
328 if (ret) {
329 goto error_shm_path;
330 }
331 return run_as_open(shm_path,
332 O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR,
333 lttng_credentials_get_uid(session_credentials),
334 lttng_credentials_get_gid(session_credentials));
335
336 error_shm_path:
337 return -1;
338 }
339
340 /*
341 * Create an UST channel with the given attributes and send it to the session
342 * daemon using the ust ctl API.
343 *
344 * Return 0 on success or else a negative value.
345 */
346 static int create_ust_channel(struct lttng_consumer_channel *channel,
347 struct lttng_ust_ctl_consumer_channel_attr *attr,
348 struct lttng_ust_ctl_consumer_channel **ust_chanp)
349 {
350 int ret, nr_stream_fds, i, j;
351 int *stream_fds;
352 struct lttng_ust_ctl_consumer_channel *ust_channel;
353
354 LTTNG_ASSERT(channel);
355 LTTNG_ASSERT(attr);
356 LTTNG_ASSERT(ust_chanp);
357 LTTNG_ASSERT(channel->buffer_credentials.is_set);
358
359 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
360 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
361 "switch_timer_interval: %u, read_timer_interval: %u, "
362 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
363 attr->num_subbuf, attr->switch_timer_interval,
364 attr->read_timer_interval, attr->output, attr->type);
365
366 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA)
367 nr_stream_fds = 1;
368 else
369 nr_stream_fds = lttng_ust_ctl_get_nr_stream_per_channel();
370 stream_fds = calloc<int>(nr_stream_fds);
371 if (!stream_fds) {
372 ret = -1;
373 goto error_alloc;
374 }
375 for (i = 0; i < nr_stream_fds; i++) {
376 stream_fds[i] = open_ust_stream_fd(channel, i,
377 &channel->buffer_credentials.value);
378 if (stream_fds[i] < 0) {
379 ret = -1;
380 goto error_open;
381 }
382 }
383 ust_channel = lttng_ust_ctl_create_channel(attr, stream_fds, nr_stream_fds);
384 if (!ust_channel) {
385 ret = -1;
386 goto error_create;
387 }
388 channel->nr_stream_fds = nr_stream_fds;
389 channel->stream_fds = stream_fds;
390 *ust_chanp = ust_channel;
391
392 return 0;
393
394 error_create:
395 error_open:
396 for (j = i - 1; j >= 0; j--) {
397 int closeret;
398
399 closeret = close(stream_fds[j]);
400 if (closeret) {
401 PERROR("close");
402 }
403 if (channel->shm_path[0]) {
404 char shm_path[PATH_MAX];
405
406 closeret = get_stream_shm_path(shm_path,
407 channel->shm_path, j);
408 if (closeret) {
409 ERR("Cannot get stream shm path");
410 }
411 closeret = run_as_unlink(shm_path,
412 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
413 channel->buffer_credentials)),
414 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
415 channel->buffer_credentials)));
416 if (closeret) {
417 PERROR("unlink %s", shm_path);
418 }
419 }
420 }
421 /* Try to rmdir all directories under shm_path root. */
422 if (channel->root_shm_path[0]) {
423 (void) run_as_rmdir_recursive(channel->root_shm_path,
424 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
425 channel->buffer_credentials)),
426 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
427 channel->buffer_credentials)),
428 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
429 }
430 free(stream_fds);
431 error_alloc:
432 return ret;
433 }
434
435 /*
436 * Send a single given stream to the session daemon using the sock.
437 *
438 * Return 0 on success else a negative value.
439 */
440 static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
441 {
442 int ret;
443
444 LTTNG_ASSERT(stream);
445 LTTNG_ASSERT(sock >= 0);
446
447 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
448
449 /* Send stream to session daemon. */
450 ret = lttng_ust_ctl_send_stream_to_sessiond(sock, stream->ustream);
451 if (ret < 0) {
452 goto error;
453 }
454
455 error:
456 return ret;
457 }
458
459 /*
460 * Send channel to sessiond and relayd if applicable.
461 *
462 * Return 0 on success or else a negative value.
463 */
464 static int send_channel_to_sessiond_and_relayd(int sock,
465 struct lttng_consumer_channel *channel,
466 struct lttng_consumer_local_data *ctx, int *relayd_error)
467 {
468 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
469 struct lttng_consumer_stream *stream;
470 uint64_t net_seq_idx = -1ULL;
471
472 LTTNG_ASSERT(channel);
473 LTTNG_ASSERT(ctx);
474 LTTNG_ASSERT(sock >= 0);
475
476 DBG("UST consumer sending channel %s to sessiond", channel->name);
477
478 if (channel->relayd_id != (uint64_t) -1ULL) {
479 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
480
481 health_code_update();
482
483 /* Try to send the stream to the relayd if one is available. */
484 DBG("Sending stream %" PRIu64 " of channel \"%s\" to relayd",
485 stream->key, channel->name);
486 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
487 if (ret < 0) {
488 /*
489 * Flag that the relayd was the problem here probably due to a
490 * communicaton error on the socket.
491 */
492 if (relayd_error) {
493 *relayd_error = 1;
494 }
495 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
496 }
497 if (net_seq_idx == -1ULL) {
498 net_seq_idx = stream->net_seq_idx;
499 }
500 }
501 }
502
503 /* Inform sessiond that we are about to send channel and streams. */
504 ret = consumer_send_status_msg(sock, ret_code);
505 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
506 /*
507 * Either the session daemon is not responding or the relayd died so we
508 * stop now.
509 */
510 goto error;
511 }
512
513 /* Send channel to sessiond. */
514 ret = lttng_ust_ctl_send_channel_to_sessiond(sock, channel->uchan);
515 if (ret < 0) {
516 goto error;
517 }
518
519 ret = lttng_ust_ctl_channel_close_wakeup_fd(channel->uchan);
520 if (ret < 0) {
521 goto error;
522 }
523
524 /* The channel was sent successfully to the sessiond at this point. */
525 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
526
527 health_code_update();
528
529 /* Send stream to session daemon. */
530 ret = send_sessiond_stream(sock, stream);
531 if (ret < 0) {
532 goto error;
533 }
534 }
535
536 /* Tell sessiond there is no more stream. */
537 ret = lttng_ust_ctl_send_stream_to_sessiond(sock, NULL);
538 if (ret < 0) {
539 goto error;
540 }
541
542 DBG("UST consumer NULL stream sent to sessiond");
543
544 return 0;
545
546 error:
547 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
548 ret = -1;
549 }
550 return ret;
551 }
552
553 /*
554 * Creates a channel and streams and add the channel it to the channel internal
555 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
556 * received.
557 *
558 * Return 0 on success or else, a negative value is returned and the channel
559 * MUST be destroyed by consumer_del_channel().
560 */
561 static int ask_channel(struct lttng_consumer_local_data *ctx,
562 struct lttng_consumer_channel *channel,
563 struct lttng_ust_ctl_consumer_channel_attr *attr)
564 {
565 int ret;
566
567 LTTNG_ASSERT(ctx);
568 LTTNG_ASSERT(channel);
569 LTTNG_ASSERT(attr);
570
571 /*
572 * This value is still used by the kernel consumer since for the kernel,
573 * the stream ownership is not IN the consumer so we need to have the
574 * number of left stream that needs to be initialized so we can know when
575 * to delete the channel (see consumer.c).
576 *
577 * As for the user space tracer now, the consumer creates and sends the
578 * stream to the session daemon which only sends them to the application
579 * once every stream of a channel is received making this value useless
580 * because we they will be added to the poll thread before the application
581 * receives them. This ensures that a stream can not hang up during
582 * initilization of a channel.
583 */
584 channel->nb_init_stream_left = 0;
585
586 /* The reply msg status is handled in the following call. */
587 ret = create_ust_channel(channel, attr, &channel->uchan);
588 if (ret < 0) {
589 goto end;
590 }
591
592 channel->wait_fd = lttng_ust_ctl_channel_get_wait_fd(channel->uchan);
593
594 /*
595 * For the snapshots (no monitor), we create the metadata streams
596 * on demand, not during the channel creation.
597 */
598 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
599 ret = 0;
600 goto end;
601 }
602
603 /* Open all streams for this channel. */
604 pthread_mutex_lock(&channel->lock);
605 ret = create_ust_streams(channel, ctx);
606 pthread_mutex_unlock(&channel->lock);
607 if (ret < 0) {
608 goto end;
609 }
610
611 end:
612 return ret;
613 }
614
615 /*
616 * Send all stream of a channel to the right thread handling it.
617 *
618 * On error, return a negative value else 0 on success.
619 */
620 static int send_streams_to_thread(struct lttng_consumer_channel *channel,
621 struct lttng_consumer_local_data *ctx)
622 {
623 int ret = 0;
624 struct lttng_consumer_stream *stream, *stmp;
625
626 LTTNG_ASSERT(channel);
627 LTTNG_ASSERT(ctx);
628
629 /* Send streams to the corresponding thread. */
630 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
631 send_node) {
632
633 health_code_update();
634
635 /* Sending the stream to the thread. */
636 ret = send_stream_to_thread(stream, ctx);
637 if (ret < 0) {
638 /*
639 * If we are unable to send the stream to the thread, there is
640 * a big problem so just stop everything.
641 */
642 goto error;
643 }
644 }
645
646 error:
647 return ret;
648 }
649
650 /*
651 * Flush channel's streams using the given key to retrieve the channel.
652 *
653 * Return 0 on success else an LTTng error code.
654 */
655 static int flush_channel(uint64_t chan_key)
656 {
657 int ret = 0;
658 struct lttng_consumer_channel *channel;
659 struct lttng_consumer_stream *stream;
660 struct lttng_ht *ht;
661 struct lttng_ht_iter iter;
662
663 DBG("UST consumer flush channel key %" PRIu64, chan_key);
664
665 rcu_read_lock();
666 channel = consumer_find_channel(chan_key);
667 if (!channel) {
668 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
669 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
670 goto error;
671 }
672
673 ht = the_consumer_data.stream_per_chan_id_ht;
674
675 /* For each stream of the channel id, flush it. */
676 cds_lfht_for_each_entry_duplicate(ht->ht,
677 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
678 &channel->key, &iter.iter, stream, node_channel_id.node) {
679
680 health_code_update();
681
682 pthread_mutex_lock(&stream->lock);
683
684 /*
685 * Protect against concurrent teardown of a stream.
686 */
687 if (cds_lfht_is_node_deleted(&stream->node.node)) {
688 goto next;
689 }
690
691 if (!stream->quiescent) {
692 ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0);
693 if (ret) {
694 ERR("Failed to flush buffer while flushing channel: channel key = %" PRIu64 ", channel name = '%s'",
695 chan_key, channel->name);
696 ret = LTTNG_ERR_BUFFER_FLUSH_FAILED;
697 pthread_mutex_unlock(&stream->lock);
698 goto error;
699 }
700 stream->quiescent = true;
701 }
702 next:
703 pthread_mutex_unlock(&stream->lock);
704 }
705 error:
706 rcu_read_unlock();
707 return ret;
708 }
709
710 /*
711 * Clear quiescent state from channel's streams using the given key to
712 * retrieve the channel.
713 *
714 * Return 0 on success else an LTTng error code.
715 */
716 static int clear_quiescent_channel(uint64_t chan_key)
717 {
718 int ret = 0;
719 struct lttng_consumer_channel *channel;
720 struct lttng_consumer_stream *stream;
721 struct lttng_ht *ht;
722 struct lttng_ht_iter iter;
723
724 DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
725
726 rcu_read_lock();
727 channel = consumer_find_channel(chan_key);
728 if (!channel) {
729 ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
730 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
731 goto error;
732 }
733
734 ht = the_consumer_data.stream_per_chan_id_ht;
735
736 /* For each stream of the channel id, clear quiescent state. */
737 cds_lfht_for_each_entry_duplicate(ht->ht,
738 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
739 &channel->key, &iter.iter, stream, node_channel_id.node) {
740
741 health_code_update();
742
743 pthread_mutex_lock(&stream->lock);
744 stream->quiescent = false;
745 pthread_mutex_unlock(&stream->lock);
746 }
747 error:
748 rcu_read_unlock();
749 return ret;
750 }
751
752 /*
753 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
754 *
755 * Return 0 on success else an LTTng error code.
756 */
757 static int close_metadata(uint64_t chan_key)
758 {
759 int ret = 0;
760 struct lttng_consumer_channel *channel;
761 unsigned int channel_monitor;
762
763 DBG("UST consumer close metadata key %" PRIu64, chan_key);
764
765 channel = consumer_find_channel(chan_key);
766 if (!channel) {
767 /*
768 * This is possible if the metadata thread has issue a delete because
769 * the endpoint point of the stream hung up. There is no way the
770 * session daemon can know about it thus use a DBG instead of an actual
771 * error.
772 */
773 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
774 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
775 goto error;
776 }
777
778 pthread_mutex_lock(&the_consumer_data.lock);
779 pthread_mutex_lock(&channel->lock);
780 channel_monitor = channel->monitor;
781 if (cds_lfht_is_node_deleted(&channel->node.node)) {
782 goto error_unlock;
783 }
784
785 lttng_ustconsumer_close_metadata(channel);
786 pthread_mutex_unlock(&channel->lock);
787 pthread_mutex_unlock(&the_consumer_data.lock);
788
789 /*
790 * The ownership of a metadata channel depends on the type of
791 * session to which it belongs. In effect, the monitor flag is checked
792 * to determine if this metadata channel is in "snapshot" mode or not.
793 *
794 * In the non-snapshot case, the metadata channel is created along with
795 * a single stream which will remain present until the metadata channel
796 * is destroyed (on the destruction of its session). In this case, the
797 * metadata stream in "monitored" by the metadata poll thread and holds
798 * the ownership of its channel.
799 *
800 * Closing the metadata will cause the metadata stream's "metadata poll
801 * pipe" to be closed. Closing this pipe will wake-up the metadata poll
802 * thread which will teardown the metadata stream which, in return,
803 * deletes the metadata channel.
804 *
805 * In the snapshot case, the metadata stream is created and destroyed
806 * on every snapshot record. Since the channel doesn't have an owner
807 * other than the session daemon, it is safe to destroy it immediately
808 * on reception of the CLOSE_METADATA command.
809 */
810 if (!channel_monitor) {
811 /*
812 * The channel and consumer_data locks must be
813 * released before this call since consumer_del_channel
814 * re-acquires the channel and consumer_data locks to teardown
815 * the channel and queue its reclamation by the "call_rcu"
816 * worker thread.
817 */
818 consumer_del_channel(channel);
819 }
820
821 return ret;
822 error_unlock:
823 pthread_mutex_unlock(&channel->lock);
824 pthread_mutex_unlock(&the_consumer_data.lock);
825 error:
826 return ret;
827 }
828
829 /*
830 * RCU read side lock MUST be acquired before calling this function.
831 *
832 * Return 0 on success else an LTTng error code.
833 */
834 static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
835 {
836 int ret;
837 struct lttng_consumer_channel *metadata;
838
839 ASSERT_RCU_READ_LOCKED();
840
841 DBG("UST consumer setup metadata key %" PRIu64, key);
842
843 metadata = consumer_find_channel(key);
844 if (!metadata) {
845 ERR("UST consumer push metadata %" PRIu64 " not found", key);
846 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
847 goto end;
848 }
849
850 /*
851 * In no monitor mode, the metadata channel has no stream(s) so skip the
852 * ownership transfer to the metadata thread.
853 */
854 if (!metadata->monitor) {
855 DBG("Metadata channel in no monitor");
856 ret = 0;
857 goto end;
858 }
859
860 /*
861 * Send metadata stream to relayd if one available. Availability is
862 * known if the stream is still in the list of the channel.
863 */
864 if (cds_list_empty(&metadata->streams.head)) {
865 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
866 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
867 goto error_no_stream;
868 }
869
870 /* Send metadata stream to relayd if needed. */
871 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
872 ret = consumer_send_relayd_stream(metadata->metadata_stream,
873 metadata->pathname);
874 if (ret < 0) {
875 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
876 goto error;
877 }
878 ret = consumer_send_relayd_streams_sent(
879 metadata->metadata_stream->net_seq_idx);
880 if (ret < 0) {
881 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
882 goto error;
883 }
884 }
885
886 /*
887 * Ownership of metadata stream is passed along. Freeing is handled by
888 * the callee.
889 */
890 ret = send_streams_to_thread(metadata, ctx);
891 if (ret < 0) {
892 /*
893 * If we are unable to send the stream to the thread, there is
894 * a big problem so just stop everything.
895 */
896 ret = LTTCOMM_CONSUMERD_FATAL;
897 goto send_streams_error;
898 }
899 /* List MUST be empty after or else it could be reused. */
900 LTTNG_ASSERT(cds_list_empty(&metadata->streams.head));
901
902 ret = 0;
903 goto end;
904
905 error:
906 /*
907 * Delete metadata channel on error. At this point, the metadata stream can
908 * NOT be monitored by the metadata thread thus having the guarantee that
909 * the stream is still in the local stream list of the channel. This call
910 * will make sure to clean that list.
911 */
912 consumer_stream_destroy(metadata->metadata_stream, NULL);
913 metadata->metadata_stream = NULL;
914 send_streams_error:
915 error_no_stream:
916 end:
917 return ret;
918 }
919
920 /*
921 * Snapshot the whole metadata.
922 * RCU read-side lock must be held by the caller.
923 *
924 * Returns 0 on success, < 0 on error
925 */
926 static int snapshot_metadata(struct lttng_consumer_channel *metadata_channel,
927 uint64_t key, char *path, uint64_t relayd_id,
928 struct lttng_consumer_local_data *ctx)
929 {
930 int ret = 0;
931 struct lttng_consumer_stream *metadata_stream;
932
933 LTTNG_ASSERT(path);
934 LTTNG_ASSERT(ctx);
935 ASSERT_RCU_READ_LOCKED();
936
937 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
938 key, path);
939
940 rcu_read_lock();
941
942 LTTNG_ASSERT(!metadata_channel->monitor);
943
944 health_code_update();
945
946 /*
947 * Ask the sessiond if we have new metadata waiting and update the
948 * consumer metadata cache.
949 */
950 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
951 if (ret < 0) {
952 goto error;
953 }
954
955 health_code_update();
956
957 /*
958 * The metadata stream is NOT created in no monitor mode when the channel
959 * is created on a sessiond ask channel command.
960 */
961 ret = create_ust_streams(metadata_channel, ctx);
962 if (ret < 0) {
963 goto error;
964 }
965
966 metadata_stream = metadata_channel->metadata_stream;
967 LTTNG_ASSERT(metadata_stream);
968
969 metadata_stream->read_subbuffer_ops.lock(metadata_stream);
970 if (relayd_id != (uint64_t) -1ULL) {
971 metadata_stream->net_seq_idx = relayd_id;
972 ret = consumer_send_relayd_stream(metadata_stream, path);
973 } else {
974 ret = consumer_stream_create_output_files(metadata_stream,
975 false);
976 }
977 if (ret < 0) {
978 goto error_stream;
979 }
980
981 do {
982 health_code_update();
983 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true);
984 if (ret < 0) {
985 goto error_stream;
986 }
987 } while (ret > 0);
988
989 error_stream:
990 metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
991 /*
992 * Clean up the stream completely because the next snapshot will use a
993 * new metadata stream.
994 */
995 consumer_stream_destroy(metadata_stream, NULL);
996 metadata_channel->metadata_stream = NULL;
997
998 error:
999 rcu_read_unlock();
1000 return ret;
1001 }
1002
1003 static
1004 int get_current_subbuf_addr(struct lttng_consumer_stream *stream,
1005 const char **addr)
1006 {
1007 int ret;
1008 unsigned long mmap_offset;
1009 const char *mmap_base;
1010
1011 mmap_base = (const char *) lttng_ust_ctl_get_mmap_base(stream->ustream);
1012 if (!mmap_base) {
1013 ERR("Failed to get mmap base for stream `%s`",
1014 stream->name);
1015 ret = -EPERM;
1016 goto error;
1017 }
1018
1019 ret = lttng_ust_ctl_get_mmap_read_offset(stream->ustream, &mmap_offset);
1020 if (ret != 0) {
1021 ERR("Failed to get mmap offset for stream `%s`", stream->name);
1022 ret = -EINVAL;
1023 goto error;
1024 }
1025
1026 *addr = mmap_base + mmap_offset;
1027 error:
1028 return ret;
1029
1030 }
1031
1032 /*
1033 * Take a snapshot of all the stream of a channel.
1034 * RCU read-side lock and the channel lock must be held by the caller.
1035 *
1036 * Returns 0 on success, < 0 on error
1037 */
1038 static int snapshot_channel(struct lttng_consumer_channel *channel,
1039 uint64_t key, char *path, uint64_t relayd_id,
1040 uint64_t nb_packets_per_stream,
1041 struct lttng_consumer_local_data *ctx)
1042 {
1043 int ret;
1044 unsigned use_relayd = 0;
1045 unsigned long consumed_pos, produced_pos;
1046 struct lttng_consumer_stream *stream;
1047
1048 LTTNG_ASSERT(path);
1049 LTTNG_ASSERT(ctx);
1050 ASSERT_RCU_READ_LOCKED();
1051
1052 rcu_read_lock();
1053
1054 if (relayd_id != (uint64_t) -1ULL) {
1055 use_relayd = 1;
1056 }
1057
1058 LTTNG_ASSERT(!channel->monitor);
1059 DBG("UST consumer snapshot channel %" PRIu64, key);
1060
1061 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
1062 health_code_update();
1063
1064 /* Lock stream because we are about to change its state. */
1065 pthread_mutex_lock(&stream->lock);
1066 LTTNG_ASSERT(channel->trace_chunk);
1067 if (!lttng_trace_chunk_get(channel->trace_chunk)) {
1068 /*
1069 * Can't happen barring an internal error as the channel
1070 * holds a reference to the trace chunk.
1071 */
1072 ERR("Failed to acquire reference to channel's trace chunk");
1073 ret = -1;
1074 goto error_unlock;
1075 }
1076 LTTNG_ASSERT(!stream->trace_chunk);
1077 stream->trace_chunk = channel->trace_chunk;
1078
1079 stream->net_seq_idx = relayd_id;
1080
1081 if (use_relayd) {
1082 ret = consumer_send_relayd_stream(stream, path);
1083 if (ret < 0) {
1084 goto error_unlock;
1085 }
1086 } else {
1087 ret = consumer_stream_create_output_files(stream,
1088 false);
1089 if (ret < 0) {
1090 goto error_unlock;
1091 }
1092 DBG("UST consumer snapshot stream (%" PRIu64 ")",
1093 stream->key);
1094 }
1095
1096 /*
1097 * If tracing is active, we want to perform a "full" buffer flush.
1098 * Else, if quiescent, it has already been done by the prior stop.
1099 */
1100 if (!stream->quiescent) {
1101 ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0);
1102 if (ret < 0) {
1103 ERR("Failed to flush buffer during snapshot of channel: channel key = %" PRIu64 ", channel name = '%s'",
1104 channel->key, channel->name);
1105 goto error_unlock;
1106 }
1107 }
1108
1109 ret = lttng_ustconsumer_take_snapshot(stream);
1110 if (ret < 0) {
1111 ERR("Taking UST snapshot");
1112 goto error_unlock;
1113 }
1114
1115 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
1116 if (ret < 0) {
1117 ERR("Produced UST snapshot position");
1118 goto error_unlock;
1119 }
1120
1121 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
1122 if (ret < 0) {
1123 ERR("Consumerd UST snapshot position");
1124 goto error_unlock;
1125 }
1126
1127 /*
1128 * The original value is sent back if max stream size is larger than
1129 * the possible size of the snapshot. Also, we assume that the session
1130 * daemon should never send a maximum stream size that is lower than
1131 * subbuffer size.
1132 */
1133 consumed_pos = consumer_get_consume_start_pos(consumed_pos,
1134 produced_pos, nb_packets_per_stream,
1135 stream->max_sb_size);
1136
1137 while ((long) (consumed_pos - produced_pos) < 0) {
1138 ssize_t read_len;
1139 unsigned long len, padded_len;
1140 const char *subbuf_addr;
1141 struct lttng_buffer_view subbuf_view;
1142
1143 health_code_update();
1144
1145 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
1146
1147 ret = lttng_ust_ctl_get_subbuf(stream->ustream, &consumed_pos);
1148 if (ret < 0) {
1149 if (ret != -EAGAIN) {
1150 PERROR("lttng_ust_ctl_get_subbuf snapshot");
1151 goto error_close_stream;
1152 }
1153 DBG("UST consumer get subbuf failed. Skipping it.");
1154 consumed_pos += stream->max_sb_size;
1155 stream->chan->lost_packets++;
1156 continue;
1157 }
1158
1159 ret = lttng_ust_ctl_get_subbuf_size(stream->ustream, &len);
1160 if (ret < 0) {
1161 ERR("Snapshot lttng_ust_ctl_get_subbuf_size");
1162 goto error_put_subbuf;
1163 }
1164
1165 ret = lttng_ust_ctl_get_padded_subbuf_size(stream->ustream, &padded_len);
1166 if (ret < 0) {
1167 ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size");
1168 goto error_put_subbuf;
1169 }
1170
1171 ret = get_current_subbuf_addr(stream, &subbuf_addr);
1172 if (ret) {
1173 goto error_put_subbuf;
1174 }
1175
1176 subbuf_view = lttng_buffer_view_init(
1177 subbuf_addr, 0, padded_len);
1178 read_len = lttng_consumer_on_read_subbuffer_mmap(
1179 stream, &subbuf_view, padded_len - len);
1180 if (use_relayd) {
1181 if (read_len != len) {
1182 ret = -EPERM;
1183 goto error_put_subbuf;
1184 }
1185 } else {
1186 if (read_len != padded_len) {
1187 ret = -EPERM;
1188 goto error_put_subbuf;
1189 }
1190 }
1191
1192 ret = lttng_ust_ctl_put_subbuf(stream->ustream);
1193 if (ret < 0) {
1194 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1195 goto error_close_stream;
1196 }
1197 consumed_pos += stream->max_sb_size;
1198 }
1199
1200 /* Simply close the stream so we can use it on the next snapshot. */
1201 consumer_stream_close(stream);
1202 pthread_mutex_unlock(&stream->lock);
1203 }
1204
1205 rcu_read_unlock();
1206 return 0;
1207
1208 error_put_subbuf:
1209 if (lttng_ust_ctl_put_subbuf(stream->ustream) < 0) {
1210 ERR("Snapshot lttng_ust_ctl_put_subbuf");
1211 }
1212 error_close_stream:
1213 consumer_stream_close(stream);
1214 error_unlock:
1215 pthread_mutex_unlock(&stream->lock);
1216 rcu_read_unlock();
1217 return ret;
1218 }
1219
1220 static
1221 void metadata_stream_reset_cache_consumed_position(
1222 struct lttng_consumer_stream *stream)
1223 {
1224 ASSERT_LOCKED(stream->lock);
1225
1226 DBG("Reset metadata cache of session %" PRIu64,
1227 stream->chan->session_id);
1228 stream->ust_metadata_pushed = 0;
1229 }
1230
1231 /*
1232 * Receive the metadata updates from the sessiond. Supports receiving
1233 * overlapping metadata, but is needs to always belong to a contiguous
1234 * range starting from 0.
1235 * Be careful about the locks held when calling this function: it needs
1236 * the metadata cache flush to concurrently progress in order to
1237 * complete.
1238 */
1239 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
1240 uint64_t len, uint64_t version,
1241 struct lttng_consumer_channel *channel, int timer, int wait)
1242 {
1243 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1244 char *metadata_str;
1245 enum consumer_metadata_cache_write_status cache_write_status;
1246
1247 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
1248
1249 metadata_str = calloc<char>(len);
1250 if (!metadata_str) {
1251 PERROR("zmalloc metadata string");
1252 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1253 goto end;
1254 }
1255
1256 health_code_update();
1257
1258 /* Receive metadata string. */
1259 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1260 if (ret < 0) {
1261 /* Session daemon is dead so return gracefully. */
1262 ret_code = ret;
1263 goto end_free;
1264 }
1265
1266 health_code_update();
1267
1268 pthread_mutex_lock(&channel->metadata_cache->lock);
1269 cache_write_status = consumer_metadata_cache_write(
1270 channel->metadata_cache, offset, len, version,
1271 metadata_str);
1272 pthread_mutex_unlock(&channel->metadata_cache->lock);
1273 switch (cache_write_status) {
1274 case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE:
1275 /*
1276 * The write entirely overlapped with existing contents of the
1277 * same metadata version (same content); there is nothing to do.
1278 */
1279 break;
1280 case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED:
1281 /*
1282 * The metadata cache was invalidated (previously pushed
1283 * content has been overwritten). Reset the stream's consumed
1284 * metadata position to ensure the metadata poll thread consumes
1285 * the whole cache.
1286 */
1287
1288 /*
1289 * channel::metadata_stream can be null when the metadata
1290 * channel is under a snapshot session type. No need to update
1291 * the stream position in that scenario.
1292 */
1293 if (channel->metadata_stream != NULL) {
1294 pthread_mutex_lock(&channel->metadata_stream->lock);
1295 metadata_stream_reset_cache_consumed_position(
1296 channel->metadata_stream);
1297 pthread_mutex_unlock(&channel->metadata_stream->lock);
1298 } else {
1299 /* Validate we are in snapshot mode. */
1300 LTTNG_ASSERT(!channel->monitor);
1301 }
1302 /* Fall-through. */
1303 case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT:
1304 /*
1305 * In both cases, the metadata poll thread has new data to
1306 * consume.
1307 */
1308 ret = consumer_metadata_wakeup_pipe(channel);
1309 if (ret) {
1310 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1311 goto end_free;
1312 }
1313 break;
1314 case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR:
1315 /* Unable to handle metadata. Notify session daemon. */
1316 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
1317 /*
1318 * Skip metadata flush on write error since the offset and len might
1319 * not have been updated which could create an infinite loop below when
1320 * waiting for the metadata cache to be flushed.
1321 */
1322 goto end_free;
1323 default:
1324 abort();
1325 }
1326
1327 if (!wait) {
1328 goto end_free;
1329 }
1330 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
1331 DBG("Waiting for metadata to be flushed");
1332
1333 health_code_update();
1334
1335 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1336 }
1337
1338 end_free:
1339 free(metadata_str);
1340 end:
1341 return ret_code;
1342 }
1343
1344 /*
1345 * Receive command from session daemon and process it.
1346 *
1347 * Return 1 on success else a negative value or 0.
1348 */
1349 int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1350 int sock, struct pollfd *consumer_sockpoll)
1351 {
1352 int ret_func;
1353 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1354 struct lttcomm_consumer_msg msg;
1355 struct lttng_consumer_channel *channel = NULL;
1356
1357 health_code_update();
1358
1359 {
1360 ssize_t ret_recv;
1361
1362 ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1363 if (ret_recv != sizeof(msg)) {
1364 DBG("Consumer received unexpected message size %zd (expects %zu)",
1365 ret_recv, sizeof(msg));
1366 /*
1367 * The ret value might 0 meaning an orderly shutdown but this is ok
1368 * since the caller handles this.
1369 */
1370 if (ret_recv > 0) {
1371 lttng_consumer_send_error(ctx,
1372 LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
1373 ret_recv = -1;
1374 }
1375 return ret_recv;
1376 }
1377 }
1378
1379 health_code_update();
1380
1381 /* deprecated */
1382 LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP);
1383
1384 health_code_update();
1385
1386 /* relayd needs RCU read-side lock */
1387 rcu_read_lock();
1388
1389 switch (msg.cmd_type) {
1390 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1391 {
1392 uint32_t major = msg.u.relayd_sock.major;
1393 uint32_t minor = msg.u.relayd_sock.minor;
1394 enum lttcomm_sock_proto protocol =
1395 (enum lttcomm_sock_proto) msg.u.relayd_sock
1396 .relayd_socket_protocol;
1397
1398 /* Session daemon status message are handled in the following call. */
1399 consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1400 msg.u.relayd_sock.type, ctx, sock,
1401 consumer_sockpoll, msg.u.relayd_sock.session_id,
1402 msg.u.relayd_sock.relayd_session_id, major,
1403 minor, protocol);
1404 goto end_nosignal;
1405 }
1406 case LTTNG_CONSUMER_DESTROY_RELAYD:
1407 {
1408 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
1409 struct consumer_relayd_sock_pair *relayd;
1410
1411 DBG("UST consumer destroying relayd %" PRIu64, index);
1412
1413 /* Get relayd reference if exists. */
1414 relayd = consumer_find_relayd(index);
1415 if (relayd == NULL) {
1416 DBG("Unable to find relayd %" PRIu64, index);
1417 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
1418 }
1419
1420 /*
1421 * Each relayd socket pair has a refcount of stream attached to it
1422 * which tells if the relayd is still active or not depending on the
1423 * refcount value.
1424 *
1425 * This will set the destroy flag of the relayd object and destroy it
1426 * if the refcount reaches zero when called.
1427 *
1428 * The destroy can happen either here or when a stream fd hangs up.
1429 */
1430 if (relayd) {
1431 consumer_flag_relayd_for_destroy(relayd);
1432 }
1433
1434 goto end_msg_sessiond;
1435 }
1436 case LTTNG_CONSUMER_UPDATE_STREAM:
1437 {
1438 rcu_read_unlock();
1439 return -ENOSYS;
1440 }
1441 case LTTNG_CONSUMER_DATA_PENDING:
1442 {
1443 int is_data_pending;
1444 ssize_t ret_send;
1445 uint64_t id = msg.u.data_pending.session_id;
1446
1447 DBG("UST consumer data pending command for id %" PRIu64, id);
1448
1449 is_data_pending = consumer_data_pending(id);
1450
1451 /* Send back returned value to session daemon */
1452 ret_send = lttcomm_send_unix_sock(sock, &is_data_pending,
1453 sizeof(is_data_pending));
1454 if (ret_send < 0) {
1455 DBG("Error when sending the data pending ret code: %zd",
1456 ret_send);
1457 goto error_fatal;
1458 }
1459
1460 /*
1461 * No need to send back a status message since the data pending
1462 * returned value is the response.
1463 */
1464 break;
1465 }
1466 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1467 {
1468 int ret_ask_channel, ret_add_channel, ret_send;
1469 struct lttng_ust_ctl_consumer_channel_attr attr;
1470 const uint64_t chunk_id = msg.u.ask_channel.chunk_id.value;
1471 const struct lttng_credentials buffer_credentials = {
1472 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.uid),
1473 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.gid),
1474 };
1475
1476 /* Create a plain object and reserve a channel key. */
1477 channel = consumer_allocate_channel(
1478 msg.u.ask_channel.key,
1479 msg.u.ask_channel.session_id,
1480 msg.u.ask_channel.chunk_id.is_set ?
1481 &chunk_id : NULL,
1482 msg.u.ask_channel.pathname,
1483 msg.u.ask_channel.name,
1484 msg.u.ask_channel.relayd_id,
1485 (enum lttng_event_output) msg.u.ask_channel.output,
1486 msg.u.ask_channel.tracefile_size,
1487 msg.u.ask_channel.tracefile_count,
1488 msg.u.ask_channel.session_id_per_pid,
1489 msg.u.ask_channel.monitor,
1490 msg.u.ask_channel.live_timer_interval,
1491 msg.u.ask_channel.is_live,
1492 msg.u.ask_channel.root_shm_path,
1493 msg.u.ask_channel.shm_path);
1494 if (!channel) {
1495 goto end_channel_error;
1496 }
1497
1498 LTTNG_OPTIONAL_SET(&channel->buffer_credentials,
1499 buffer_credentials);
1500
1501 /*
1502 * Assign UST application UID to the channel. This value is ignored for
1503 * per PID buffers. This is specific to UST thus setting this after the
1504 * allocation.
1505 */
1506 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1507
1508 /* Build channel attributes from received message. */
1509 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1510 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1511 attr.overwrite = msg.u.ask_channel.overwrite;
1512 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1513 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
1514 attr.chan_id = msg.u.ask_channel.chan_id;
1515 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1516 attr.blocking_timeout= msg.u.ask_channel.blocking_timeout;
1517
1518 /* Match channel buffer type to the UST abi. */
1519 switch (msg.u.ask_channel.output) {
1520 case LTTNG_EVENT_MMAP:
1521 default:
1522 attr.output = LTTNG_UST_ABI_MMAP;
1523 break;
1524 }
1525
1526 /* Translate and save channel type. */
1527 switch (msg.u.ask_channel.type) {
1528 case LTTNG_UST_ABI_CHAN_PER_CPU:
1529 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1530 attr.type = LTTNG_UST_ABI_CHAN_PER_CPU;
1531 /*
1532 * Set refcount to 1 for owner. Below, we will
1533 * pass ownership to the
1534 * consumer_thread_channel_poll() thread.
1535 */
1536 channel->refcount = 1;
1537 break;
1538 case LTTNG_UST_ABI_CHAN_METADATA:
1539 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1540 attr.type = LTTNG_UST_ABI_CHAN_METADATA;
1541 break;
1542 default:
1543 abort();
1544 goto error_fatal;
1545 };
1546
1547 health_code_update();
1548
1549 ret_ask_channel = ask_channel(ctx, channel, &attr);
1550 if (ret_ask_channel < 0) {
1551 goto end_channel_error;
1552 }
1553
1554 if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) {
1555 int ret_allocate;
1556
1557 ret_allocate = consumer_metadata_cache_allocate(
1558 channel);
1559 if (ret_allocate < 0) {
1560 ERR("Allocating metadata cache");
1561 goto end_channel_error;
1562 }
1563 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1564 attr.switch_timer_interval = 0;
1565 } else {
1566 int monitor_start_ret;
1567
1568 consumer_timer_live_start(channel,
1569 msg.u.ask_channel.live_timer_interval);
1570 monitor_start_ret = consumer_timer_monitor_start(
1571 channel,
1572 msg.u.ask_channel.monitor_timer_interval);
1573 if (monitor_start_ret < 0) {
1574 ERR("Starting channel monitoring timer failed");
1575 goto end_channel_error;
1576 }
1577 }
1578
1579 health_code_update();
1580
1581 /*
1582 * Add the channel to the internal state AFTER all streams were created
1583 * and successfully sent to session daemon. This way, all streams must
1584 * be ready before this channel is visible to the threads.
1585 * If add_channel succeeds, ownership of the channel is
1586 * passed to consumer_thread_channel_poll().
1587 */
1588 ret_add_channel = add_channel(channel, ctx);
1589 if (ret_add_channel < 0) {
1590 if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) {
1591 if (channel->switch_timer_enabled == 1) {
1592 consumer_timer_switch_stop(channel);
1593 }
1594 consumer_metadata_cache_destroy(channel);
1595 }
1596 if (channel->live_timer_enabled == 1) {
1597 consumer_timer_live_stop(channel);
1598 }
1599 if (channel->monitor_timer_enabled == 1) {
1600 consumer_timer_monitor_stop(channel);
1601 }
1602 goto end_channel_error;
1603 }
1604
1605 health_code_update();
1606
1607 /*
1608 * Channel and streams are now created. Inform the session daemon that
1609 * everything went well and should wait to receive the channel and
1610 * streams with ustctl API.
1611 */
1612 ret_send = consumer_send_status_channel(sock, channel);
1613 if (ret_send < 0) {
1614 /*
1615 * There is probably a problem on the socket.
1616 */
1617 goto error_fatal;
1618 }
1619
1620 break;
1621 }
1622 case LTTNG_CONSUMER_GET_CHANNEL:
1623 {
1624 int ret, relayd_err = 0;
1625 uint64_t key = msg.u.get_channel.key;
1626 struct lttng_consumer_channel *found_channel;
1627
1628 found_channel = consumer_find_channel(key);
1629 if (!found_channel) {
1630 ERR("UST consumer get channel key %" PRIu64 " not found", key);
1631 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1632 goto end_get_channel;
1633 }
1634
1635 health_code_update();
1636
1637 /* Send the channel to sessiond (and relayd, if applicable). */
1638 ret = send_channel_to_sessiond_and_relayd(
1639 sock, found_channel, ctx, &relayd_err);
1640 if (ret < 0) {
1641 if (relayd_err) {
1642 /*
1643 * We were unable to send to the relayd the stream so avoid
1644 * sending back a fatal error to the thread since this is OK
1645 * and the consumer can continue its work. The above call
1646 * has sent the error status message to the sessiond.
1647 */
1648 goto end_get_channel_nosignal;
1649 }
1650 /*
1651 * The communicaton was broken hence there is a bad state between
1652 * the consumer and sessiond so stop everything.
1653 */
1654 goto error_get_channel_fatal;
1655 }
1656
1657 health_code_update();
1658
1659 /*
1660 * In no monitor mode, the streams ownership is kept inside the channel
1661 * so don't send them to the data thread.
1662 */
1663 if (!found_channel->monitor) {
1664 goto end_get_channel;
1665 }
1666
1667 ret = send_streams_to_thread(found_channel, ctx);
1668 if (ret < 0) {
1669 /*
1670 * If we are unable to send the stream to the thread, there is
1671 * a big problem so just stop everything.
1672 */
1673 goto error_get_channel_fatal;
1674 }
1675 /* List MUST be empty after or else it could be reused. */
1676 LTTNG_ASSERT(cds_list_empty(&found_channel->streams.head));
1677 end_get_channel:
1678 goto end_msg_sessiond;
1679 error_get_channel_fatal:
1680 goto error_fatal;
1681 end_get_channel_nosignal:
1682 goto end_nosignal;
1683 }
1684 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1685 {
1686 uint64_t key = msg.u.destroy_channel.key;
1687
1688 /*
1689 * Only called if streams have not been sent to stream
1690 * manager thread. However, channel has been sent to
1691 * channel manager thread.
1692 */
1693 notify_thread_del_channel(ctx, key);
1694 goto end_msg_sessiond;
1695 }
1696 case LTTNG_CONSUMER_CLOSE_METADATA:
1697 {
1698 int ret;
1699
1700 ret = close_metadata(msg.u.close_metadata.key);
1701 if (ret != 0) {
1702 ret_code = (lttcomm_return_code) ret;
1703 }
1704
1705 goto end_msg_sessiond;
1706 }
1707 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1708 {
1709 int ret;
1710
1711 ret = flush_channel(msg.u.flush_channel.key);
1712 if (ret != 0) {
1713 ret_code = (lttcomm_return_code) ret;
1714 }
1715
1716 goto end_msg_sessiond;
1717 }
1718 case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
1719 {
1720 int ret;
1721
1722 ret = clear_quiescent_channel(
1723 msg.u.clear_quiescent_channel.key);
1724 if (ret != 0) {
1725 ret_code = (lttcomm_return_code) ret;
1726 }
1727
1728 goto end_msg_sessiond;
1729 }
1730 case LTTNG_CONSUMER_PUSH_METADATA:
1731 {
1732 int ret;
1733 uint64_t len = msg.u.push_metadata.len;
1734 uint64_t key = msg.u.push_metadata.key;
1735 uint64_t offset = msg.u.push_metadata.target_offset;
1736 uint64_t version = msg.u.push_metadata.version;
1737 struct lttng_consumer_channel *found_channel;
1738
1739 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1740 len);
1741
1742 found_channel = consumer_find_channel(key);
1743 if (!found_channel) {
1744 /*
1745 * This is possible if the metadata creation on the consumer side
1746 * is in flight vis-a-vis a concurrent push metadata from the
1747 * session daemon. Simply return that the channel failed and the
1748 * session daemon will handle that message correctly considering
1749 * that this race is acceptable thus the DBG() statement here.
1750 */
1751 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1752 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
1753 goto end_push_metadata_msg_sessiond;
1754 }
1755
1756 health_code_update();
1757
1758 if (!len) {
1759 /*
1760 * There is nothing to receive. We have simply
1761 * checked whether the channel can be found.
1762 */
1763 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1764 goto end_push_metadata_msg_sessiond;
1765 }
1766
1767 /* Tell session daemon we are ready to receive the metadata. */
1768 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
1769 if (ret < 0) {
1770 /* Somehow, the session daemon is not responding anymore. */
1771 goto error_push_metadata_fatal;
1772 }
1773
1774 health_code_update();
1775
1776 /* Wait for more data. */
1777 health_poll_entry();
1778 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1779 health_poll_exit();
1780 if (ret) {
1781 goto error_push_metadata_fatal;
1782 }
1783
1784 health_code_update();
1785
1786 ret = lttng_ustconsumer_recv_metadata(sock, key, offset, len,
1787 version, found_channel, 0, 1);
1788 if (ret < 0) {
1789 /* error receiving from sessiond */
1790 goto error_push_metadata_fatal;
1791 } else {
1792 ret_code = (lttcomm_return_code) ret;
1793 goto end_push_metadata_msg_sessiond;
1794 }
1795 end_push_metadata_msg_sessiond:
1796 goto end_msg_sessiond;
1797 error_push_metadata_fatal:
1798 goto error_fatal;
1799 }
1800 case LTTNG_CONSUMER_SETUP_METADATA:
1801 {
1802 int ret;
1803
1804 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1805 if (ret) {
1806 ret_code = (lttcomm_return_code) ret;
1807 }
1808 goto end_msg_sessiond;
1809 }
1810 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1811 {
1812 struct lttng_consumer_channel *found_channel;
1813 uint64_t key = msg.u.snapshot_channel.key;
1814 int ret_send;
1815
1816 found_channel = consumer_find_channel(key);
1817 if (!found_channel) {
1818 DBG("UST snapshot channel not found for key %" PRIu64, key);
1819 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
1820 } else {
1821 if (msg.u.snapshot_channel.metadata) {
1822 int ret_snapshot;
1823
1824 ret_snapshot = snapshot_metadata(found_channel,
1825 key,
1826 msg.u.snapshot_channel.pathname,
1827 msg.u.snapshot_channel.relayd_id,
1828 ctx);
1829 if (ret_snapshot < 0) {
1830 ERR("Snapshot metadata failed");
1831 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
1832 }
1833 } else {
1834 int ret_snapshot;
1835
1836 ret_snapshot = snapshot_channel(found_channel,
1837 key,
1838 msg.u.snapshot_channel.pathname,
1839 msg.u.snapshot_channel.relayd_id,
1840 msg.u.snapshot_channel
1841 .nb_packets_per_stream,
1842 ctx);
1843 if (ret_snapshot < 0) {
1844 ERR("Snapshot channel failed");
1845 ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED;
1846 }
1847 }
1848 }
1849 health_code_update();
1850 ret_send = consumer_send_status_msg(sock, ret_code);
1851 if (ret_send < 0) {
1852 /* Somehow, the session daemon is not responding anymore. */
1853 goto end_nosignal;
1854 }
1855 health_code_update();
1856 break;
1857 }
1858 case LTTNG_CONSUMER_DISCARDED_EVENTS:
1859 {
1860 int ret = 0;
1861 uint64_t discarded_events;
1862 struct lttng_ht_iter iter;
1863 struct lttng_ht *ht;
1864 struct lttng_consumer_stream *stream;
1865 uint64_t id = msg.u.discarded_events.session_id;
1866 uint64_t key = msg.u.discarded_events.channel_key;
1867
1868 DBG("UST consumer discarded events command for session id %"
1869 PRIu64, id);
1870 rcu_read_lock();
1871 pthread_mutex_lock(&the_consumer_data.lock);
1872
1873 ht = the_consumer_data.stream_list_ht;
1874
1875 /*
1876 * We only need a reference to the channel, but they are not
1877 * directly indexed, so we just use the first matching stream
1878 * to extract the information we need, we default to 0 if not
1879 * found (no events are dropped if the channel is not yet in
1880 * use).
1881 */
1882 discarded_events = 0;
1883 cds_lfht_for_each_entry_duplicate(ht->ht,
1884 ht->hash_fct(&id, lttng_ht_seed),
1885 ht->match_fct, &id,
1886 &iter.iter, stream, node_session_id.node) {
1887 if (stream->chan->key == key) {
1888 discarded_events = stream->chan->discarded_events;
1889 break;
1890 }
1891 }
1892 pthread_mutex_unlock(&the_consumer_data.lock);
1893 rcu_read_unlock();
1894
1895 DBG("UST consumer discarded events command for session id %"
1896 PRIu64 ", channel key %" PRIu64, id, key);
1897
1898 health_code_update();
1899
1900 /* Send back returned value to session daemon */
1901 ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
1902 if (ret < 0) {
1903 PERROR("send discarded events");
1904 goto error_fatal;
1905 }
1906
1907 break;
1908 }
1909 case LTTNG_CONSUMER_LOST_PACKETS:
1910 {
1911 int ret;
1912 uint64_t lost_packets;
1913 struct lttng_ht_iter iter;
1914 struct lttng_ht *ht;
1915 struct lttng_consumer_stream *stream;
1916 uint64_t id = msg.u.lost_packets.session_id;
1917 uint64_t key = msg.u.lost_packets.channel_key;
1918
1919 DBG("UST consumer lost packets command for session id %"
1920 PRIu64, id);
1921 rcu_read_lock();
1922 pthread_mutex_lock(&the_consumer_data.lock);
1923
1924 ht = the_consumer_data.stream_list_ht;
1925
1926 /*
1927 * We only need a reference to the channel, but they are not
1928 * directly indexed, so we just use the first matching stream
1929 * to extract the information we need, we default to 0 if not
1930 * found (no packets lost if the channel is not yet in use).
1931 */
1932 lost_packets = 0;
1933 cds_lfht_for_each_entry_duplicate(ht->ht,
1934 ht->hash_fct(&id, lttng_ht_seed),
1935 ht->match_fct, &id,
1936 &iter.iter, stream, node_session_id.node) {
1937 if (stream->chan->key == key) {
1938 lost_packets = stream->chan->lost_packets;
1939 break;
1940 }
1941 }
1942 pthread_mutex_unlock(&the_consumer_data.lock);
1943 rcu_read_unlock();
1944
1945 DBG("UST consumer lost packets command for session id %"
1946 PRIu64 ", channel key %" PRIu64, id, key);
1947
1948 health_code_update();
1949
1950 /* Send back returned value to session daemon */
1951 ret = lttcomm_send_unix_sock(sock, &lost_packets,
1952 sizeof(lost_packets));
1953 if (ret < 0) {
1954 PERROR("send lost packets");
1955 goto error_fatal;
1956 }
1957
1958 break;
1959 }
1960 case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE:
1961 {
1962 int channel_monitor_pipe, ret_send,
1963 ret_set_channel_monitor_pipe;
1964 ssize_t ret_recv;
1965
1966 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1967 /* Successfully received the command's type. */
1968 ret_send = consumer_send_status_msg(sock, ret_code);
1969 if (ret_send < 0) {
1970 goto error_fatal;
1971 }
1972
1973 ret_recv = lttcomm_recv_fds_unix_sock(
1974 sock, &channel_monitor_pipe, 1);
1975 if (ret_recv != sizeof(channel_monitor_pipe)) {
1976 ERR("Failed to receive channel monitor pipe");
1977 goto error_fatal;
1978 }
1979
1980 DBG("Received channel monitor pipe (%d)", channel_monitor_pipe);
1981 ret_set_channel_monitor_pipe =
1982 consumer_timer_thread_set_channel_monitor_pipe(
1983 channel_monitor_pipe);
1984 if (!ret_set_channel_monitor_pipe) {
1985 int flags;
1986 int ret_fcntl;
1987
1988 ret_code = LTTCOMM_CONSUMERD_SUCCESS;
1989 /* Set the pipe as non-blocking. */
1990 ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0);
1991 if (ret_fcntl == -1) {
1992 PERROR("fcntl get flags of the channel monitoring pipe");
1993 goto error_fatal;
1994 }
1995 flags = ret_fcntl;
1996
1997 ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL,
1998 flags | O_NONBLOCK);
1999 if (ret_fcntl == -1) {
2000 PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe");
2001 goto error_fatal;
2002 }
2003 DBG("Channel monitor pipe set as non-blocking");
2004 } else {
2005 ret_code = LTTCOMM_CONSUMERD_ALREADY_SET;
2006 }
2007 goto end_msg_sessiond;
2008 }
2009 case LTTNG_CONSUMER_ROTATE_CHANNEL:
2010 {
2011 struct lttng_consumer_channel *found_channel;
2012 uint64_t key = msg.u.rotate_channel.key;
2013 int ret_send_status;
2014
2015 found_channel = consumer_find_channel(key);
2016 if (!found_channel) {
2017 DBG("Channel %" PRIu64 " not found", key);
2018 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
2019 } else {
2020 int rotate_channel;
2021
2022 /*
2023 * Sample the rotate position of all the streams in
2024 * this channel.
2025 */
2026 rotate_channel = lttng_consumer_rotate_channel(
2027 found_channel, key,
2028 msg.u.rotate_channel.relayd_id);
2029 if (rotate_channel < 0) {
2030 ERR("Rotate channel failed");
2031 ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL;
2032 }
2033
2034 health_code_update();
2035 }
2036
2037 ret_send_status = consumer_send_status_msg(sock, ret_code);
2038 if (ret_send_status < 0) {
2039 /* Somehow, the session daemon is not responding anymore. */
2040 goto end_rotate_channel_nosignal;
2041 }
2042
2043 /*
2044 * Rotate the streams that are ready right now.
2045 * FIXME: this is a second consecutive iteration over the
2046 * streams in a channel, there is probably a better way to
2047 * handle this, but it needs to be after the
2048 * consumer_send_status_msg() call.
2049 */
2050 if (found_channel) {
2051 int ret_rotate_read_streams;
2052
2053 ret_rotate_read_streams =
2054 lttng_consumer_rotate_ready_streams(
2055 found_channel, key);
2056 if (ret_rotate_read_streams < 0) {
2057 ERR("Rotate channel failed");
2058 }
2059 }
2060 break;
2061 end_rotate_channel_nosignal:
2062 goto end_nosignal;
2063 }
2064 case LTTNG_CONSUMER_CLEAR_CHANNEL:
2065 {
2066 struct lttng_consumer_channel *found_channel;
2067 uint64_t key = msg.u.clear_channel.key;
2068 int ret_send_status;
2069
2070 found_channel = consumer_find_channel(key);
2071 if (!found_channel) {
2072 DBG("Channel %" PRIu64 " not found", key);
2073 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
2074 } else {
2075 int ret_clear_channel;
2076
2077 ret_clear_channel = lttng_consumer_clear_channel(
2078 found_channel);
2079 if (ret_clear_channel) {
2080 ERR("Clear channel failed key %" PRIu64, key);
2081 ret_code = (lttcomm_return_code) ret_clear_channel;
2082 }
2083
2084 health_code_update();
2085 }
2086 ret_send_status = consumer_send_status_msg(sock, ret_code);
2087 if (ret_send_status < 0) {
2088 /* Somehow, the session daemon is not responding anymore. */
2089 goto end_nosignal;
2090 }
2091 break;
2092 }
2093 case LTTNG_CONSUMER_INIT:
2094 {
2095 int ret_send_status;
2096 lttng_uuid sessiond_uuid;
2097
2098 std::copy(std::begin(msg.u.init.sessiond_uuid), std::end(msg.u.init.sessiond_uuid),
2099 sessiond_uuid.begin());
2100 ret_code = lttng_consumer_init_command(ctx, sessiond_uuid);
2101 health_code_update();
2102 ret_send_status = consumer_send_status_msg(sock, ret_code);
2103 if (ret_send_status < 0) {
2104 /* Somehow, the session daemon is not responding anymore. */
2105 goto end_nosignal;
2106 }
2107 break;
2108 }
2109 case LTTNG_CONSUMER_CREATE_TRACE_CHUNK:
2110 {
2111 const struct lttng_credentials credentials = {
2112 .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid),
2113 .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid),
2114 };
2115 const bool is_local_trace =
2116 !msg.u.create_trace_chunk.relayd_id.is_set;
2117 const uint64_t relayd_id =
2118 msg.u.create_trace_chunk.relayd_id.value;
2119 const char *chunk_override_name =
2120 *msg.u.create_trace_chunk.override_name ?
2121 msg.u.create_trace_chunk.override_name :
2122 NULL;
2123 struct lttng_directory_handle *chunk_directory_handle = NULL;
2124
2125 /*
2126 * The session daemon will only provide a chunk directory file
2127 * descriptor for local traces.
2128 */
2129 if (is_local_trace) {
2130 int chunk_dirfd;
2131 int ret_send_status;
2132 ssize_t ret_recv;
2133
2134 /* Acnowledge the reception of the command. */
2135 ret_send_status = consumer_send_status_msg(
2136 sock, LTTCOMM_CONSUMERD_SUCCESS);
2137 if (ret_send_status < 0) {
2138 /* Somehow, the session daemon is not responding anymore. */
2139 goto end_nosignal;
2140 }
2141
2142 /*
2143 * Receive trace chunk domain dirfd.
2144 */
2145 ret_recv = lttcomm_recv_fds_unix_sock(
2146 sock, &chunk_dirfd, 1);
2147 if (ret_recv != sizeof(chunk_dirfd)) {
2148 ERR("Failed to receive trace chunk domain directory file descriptor");
2149 goto error_fatal;
2150 }
2151
2152 DBG("Received trace chunk domain directory fd (%d)",
2153 chunk_dirfd);
2154 chunk_directory_handle = lttng_directory_handle_create_from_dirfd(
2155 chunk_dirfd);
2156 if (!chunk_directory_handle) {
2157 ERR("Failed to initialize chunk domain directory handle from directory file descriptor");
2158 if (close(chunk_dirfd)) {
2159 PERROR("Failed to close chunk directory file descriptor");
2160 }
2161 goto error_fatal;
2162 }
2163 }
2164
2165 ret_code = lttng_consumer_create_trace_chunk(
2166 !is_local_trace ? &relayd_id : NULL,
2167 msg.u.create_trace_chunk.session_id,
2168 msg.u.create_trace_chunk.chunk_id,
2169 (time_t) msg.u.create_trace_chunk
2170 .creation_timestamp,
2171 chunk_override_name,
2172 msg.u.create_trace_chunk.credentials.is_set ?
2173 &credentials :
2174 NULL,
2175 chunk_directory_handle);
2176 lttng_directory_handle_put(chunk_directory_handle);
2177 goto end_msg_sessiond;
2178 }
2179 case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK:
2180 {
2181 enum lttng_trace_chunk_command_type close_command =
2182 (lttng_trace_chunk_command_type)
2183 msg.u.close_trace_chunk.close_command.value;
2184 const uint64_t relayd_id =
2185 msg.u.close_trace_chunk.relayd_id.value;
2186 struct lttcomm_consumer_close_trace_chunk_reply reply;
2187 char closed_trace_chunk_path[LTTNG_PATH_MAX] = {};
2188 int ret;
2189
2190 ret_code = lttng_consumer_close_trace_chunk(
2191 msg.u.close_trace_chunk.relayd_id.is_set ?
2192 &relayd_id :
2193 NULL,
2194 msg.u.close_trace_chunk.session_id,
2195 msg.u.close_trace_chunk.chunk_id,
2196 (time_t) msg.u.close_trace_chunk.close_timestamp,
2197 msg.u.close_trace_chunk.close_command.is_set ?
2198 &close_command :
2199 NULL, closed_trace_chunk_path);
2200 reply.ret_code = ret_code;
2201 reply.path_length = strlen(closed_trace_chunk_path) + 1;
2202 ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply));
2203 if (ret != sizeof(reply)) {
2204 goto error_fatal;
2205 }
2206 ret = lttcomm_send_unix_sock(sock, closed_trace_chunk_path,
2207 reply.path_length);
2208 if (ret != reply.path_length) {
2209 goto error_fatal;
2210 }
2211 goto end_nosignal;
2212 }
2213 case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS:
2214 {
2215 const uint64_t relayd_id =
2216 msg.u.trace_chunk_exists.relayd_id.value;
2217
2218 ret_code = lttng_consumer_trace_chunk_exists(
2219 msg.u.trace_chunk_exists.relayd_id.is_set ?
2220 &relayd_id : NULL,
2221 msg.u.trace_chunk_exists.session_id,
2222 msg.u.trace_chunk_exists.chunk_id);
2223 goto end_msg_sessiond;
2224 }
2225 case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS:
2226 {
2227 const uint64_t key = msg.u.open_channel_packets.key;
2228 struct lttng_consumer_channel *found_channel =
2229 consumer_find_channel(key);
2230
2231 if (found_channel) {
2232 pthread_mutex_lock(&found_channel->lock);
2233 ret_code = lttng_consumer_open_channel_packets(
2234 found_channel);
2235 pthread_mutex_unlock(&found_channel->lock);
2236 } else {
2237 /*
2238 * The channel could have disappeared in per-pid
2239 * buffering mode.
2240 */
2241 DBG("Channel %" PRIu64 " not found", key);
2242 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
2243 }
2244
2245 health_code_update();
2246 goto end_msg_sessiond;
2247 }
2248 default:
2249 break;
2250 }
2251
2252 end_nosignal:
2253 /*
2254 * Return 1 to indicate success since the 0 value can be a socket
2255 * shutdown during the recv() or send() call.
2256 */
2257 ret_func = 1;
2258 goto end;
2259
2260 end_msg_sessiond:
2261 /*
2262 * The returned value here is not useful since either way we'll return 1 to
2263 * the caller because the session daemon socket management is done
2264 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
2265 */
2266 {
2267 int ret_send_status;
2268
2269 ret_send_status = consumer_send_status_msg(sock, ret_code);
2270 if (ret_send_status < 0) {
2271 goto error_fatal;
2272 }
2273 }
2274
2275 ret_func = 1;
2276 goto end;
2277
2278 end_channel_error:
2279 if (channel) {
2280 consumer_del_channel(channel);
2281 }
2282 /* We have to send a status channel message indicating an error. */
2283 {
2284 int ret_send_status;
2285
2286 ret_send_status = consumer_send_status_channel(sock, NULL);
2287 if (ret_send_status < 0) {
2288 /* Stop everything if session daemon can not be notified. */
2289 goto error_fatal;
2290 }
2291 }
2292
2293 ret_func = 1;
2294 goto end;
2295
2296 error_fatal:
2297 /* This will issue a consumer stop. */
2298 ret_func = -1;
2299 goto end;
2300
2301 end:
2302 rcu_read_unlock();
2303 health_code_update();
2304 return ret_func;
2305 }
2306
2307 int lttng_ust_flush_buffer(struct lttng_consumer_stream *stream,
2308 int producer_active)
2309 {
2310 LTTNG_ASSERT(stream);
2311 LTTNG_ASSERT(stream->ustream);
2312
2313 return lttng_ust_ctl_flush_buffer(stream->ustream, producer_active);
2314 }
2315
2316 /*
2317 * Take a snapshot for a specific stream.
2318 *
2319 * Returns 0 on success, < 0 on error
2320 */
2321 int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
2322 {
2323 LTTNG_ASSERT(stream);
2324 LTTNG_ASSERT(stream->ustream);
2325
2326 return lttng_ust_ctl_snapshot(stream->ustream);
2327 }
2328
2329 /*
2330 * Sample consumed and produced positions for a specific stream.
2331 *
2332 * Returns 0 on success, < 0 on error.
2333 */
2334 int lttng_ustconsumer_sample_snapshot_positions(
2335 struct lttng_consumer_stream *stream)
2336 {
2337 LTTNG_ASSERT(stream);
2338 LTTNG_ASSERT(stream->ustream);
2339
2340 return lttng_ust_ctl_snapshot_sample_positions(stream->ustream);
2341 }
2342
2343 /*
2344 * Get the produced position
2345 *
2346 * Returns 0 on success, < 0 on error
2347 */
2348 int lttng_ustconsumer_get_produced_snapshot(
2349 struct lttng_consumer_stream *stream, unsigned long *pos)
2350 {
2351 LTTNG_ASSERT(stream);
2352 LTTNG_ASSERT(stream->ustream);
2353 LTTNG_ASSERT(pos);
2354
2355 return lttng_ust_ctl_snapshot_get_produced(stream->ustream, pos);
2356 }
2357
2358 /*
2359 * Get the consumed position
2360 *
2361 * Returns 0 on success, < 0 on error
2362 */
2363 int lttng_ustconsumer_get_consumed_snapshot(
2364 struct lttng_consumer_stream *stream, unsigned long *pos)
2365 {
2366 LTTNG_ASSERT(stream);
2367 LTTNG_ASSERT(stream->ustream);
2368 LTTNG_ASSERT(pos);
2369
2370 return lttng_ust_ctl_snapshot_get_consumed(stream->ustream, pos);
2371 }
2372
2373 int lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
2374 int producer)
2375 {
2376 LTTNG_ASSERT(stream);
2377 LTTNG_ASSERT(stream->ustream);
2378
2379 return lttng_ust_ctl_flush_buffer(stream->ustream, producer);
2380 }
2381
2382 int lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream *stream)
2383 {
2384 LTTNG_ASSERT(stream);
2385 LTTNG_ASSERT(stream->ustream);
2386
2387 return lttng_ust_ctl_clear_buffer(stream->ustream);
2388 }
2389
2390 int lttng_ustconsumer_get_current_timestamp(
2391 struct lttng_consumer_stream *stream, uint64_t *ts)
2392 {
2393 LTTNG_ASSERT(stream);
2394 LTTNG_ASSERT(stream->ustream);
2395 LTTNG_ASSERT(ts);
2396
2397 return lttng_ust_ctl_get_current_timestamp(stream->ustream, ts);
2398 }
2399
2400 int lttng_ustconsumer_get_sequence_number(
2401 struct lttng_consumer_stream *stream, uint64_t *seq)
2402 {
2403 LTTNG_ASSERT(stream);
2404 LTTNG_ASSERT(stream->ustream);
2405 LTTNG_ASSERT(seq);
2406
2407 return lttng_ust_ctl_get_sequence_number(stream->ustream, seq);
2408 }
2409
2410 /*
2411 * Called when the stream signals the consumer that it has hung up.
2412 */
2413 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
2414 {
2415 LTTNG_ASSERT(stream);
2416 LTTNG_ASSERT(stream->ustream);
2417
2418 pthread_mutex_lock(&stream->lock);
2419 if (!stream->quiescent) {
2420 if (lttng_ust_ctl_flush_buffer(stream->ustream, 0) < 0) {
2421 ERR("Failed to flush buffer on stream hang-up");
2422 } else {
2423 stream->quiescent = true;
2424 }
2425 }
2426 pthread_mutex_unlock(&stream->lock);
2427 stream->hangup_flush_done = 1;
2428 }
2429
2430 void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
2431 {
2432 int i;
2433
2434 LTTNG_ASSERT(chan);
2435 LTTNG_ASSERT(chan->uchan);
2436 LTTNG_ASSERT(chan->buffer_credentials.is_set);
2437
2438 if (chan->switch_timer_enabled == 1) {
2439 consumer_timer_switch_stop(chan);
2440 }
2441 for (i = 0; i < chan->nr_stream_fds; i++) {
2442 int ret;
2443
2444 ret = close(chan->stream_fds[i]);
2445 if (ret) {
2446 PERROR("close");
2447 }
2448 if (chan->shm_path[0]) {
2449 char shm_path[PATH_MAX];
2450
2451 ret = get_stream_shm_path(shm_path, chan->shm_path, i);
2452 if (ret) {
2453 ERR("Cannot get stream shm path");
2454 }
2455 ret = run_as_unlink(shm_path,
2456 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2457 chan->buffer_credentials)),
2458 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2459 chan->buffer_credentials)));
2460 if (ret) {
2461 PERROR("unlink %s", shm_path);
2462 }
2463 }
2464 }
2465 }
2466
2467 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
2468 {
2469 LTTNG_ASSERT(chan);
2470 LTTNG_ASSERT(chan->uchan);
2471 LTTNG_ASSERT(chan->buffer_credentials.is_set);
2472
2473 consumer_metadata_cache_destroy(chan);
2474 lttng_ust_ctl_destroy_channel(chan->uchan);
2475 /* Try to rmdir all directories under shm_path root. */
2476 if (chan->root_shm_path[0]) {
2477 (void) run_as_rmdir_recursive(chan->root_shm_path,
2478 lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR(
2479 chan->buffer_credentials)),
2480 lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR(
2481 chan->buffer_credentials)),
2482 LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG);
2483 }
2484 free(chan->stream_fds);
2485 }
2486
2487 void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
2488 {
2489 LTTNG_ASSERT(stream);
2490 LTTNG_ASSERT(stream->ustream);
2491
2492 if (stream->chan->switch_timer_enabled == 1) {
2493 consumer_timer_switch_stop(stream->chan);
2494 }
2495 lttng_ust_ctl_destroy_stream(stream->ustream);
2496 }
2497
2498 int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
2499 {
2500 LTTNG_ASSERT(stream);
2501 LTTNG_ASSERT(stream->ustream);
2502
2503 return lttng_ust_ctl_stream_get_wakeup_fd(stream->ustream);
2504 }
2505
2506 int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
2507 {
2508 LTTNG_ASSERT(stream);
2509 LTTNG_ASSERT(stream->ustream);
2510
2511 return lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream);
2512 }
2513
2514 /*
2515 * Write up to one packet from the metadata cache to the channel.
2516 *
2517 * Returns the number of bytes pushed from the cache into the ring buffer, or a
2518 * negative value on error.
2519 */
2520 static
2521 int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
2522 {
2523 ssize_t write_len;
2524 int ret;
2525
2526 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2527 if (stream->chan->metadata_cache->contents.size ==
2528 stream->ust_metadata_pushed) {
2529 /*
2530 * In the context of a user space metadata channel, a
2531 * change in version can be detected in two ways:
2532 * 1) During the pre-consume of the `read_subbuffer` loop,
2533 * 2) When populating the metadata ring buffer (i.e. here).
2534 *
2535 * This function is invoked when there is no metadata
2536 * available in the ring-buffer. If all data was consumed
2537 * up to the size of the metadata cache, there is no metadata
2538 * to insert in the ring-buffer.
2539 *
2540 * However, the metadata version could still have changed (a
2541 * regeneration without any new data will yield the same cache
2542 * size).
2543 *
2544 * The cache's version is checked for a version change and the
2545 * consumed position is reset if one occurred.
2546 *
2547 * This check is only necessary for the user space domain as
2548 * it has to manage the cache explicitly. If this reset was not
2549 * performed, no metadata would be consumed (and no reset would
2550 * occur as part of the pre-consume) until the metadata size
2551 * exceeded the cache size.
2552 */
2553 if (stream->metadata_version !=
2554 stream->chan->metadata_cache->version) {
2555 metadata_stream_reset_cache_consumed_position(stream);
2556 consumer_stream_metadata_set_version(stream,
2557 stream->chan->metadata_cache->version);
2558 } else {
2559 ret = 0;
2560 goto end;
2561 }
2562 }
2563
2564 write_len = lttng_ust_ctl_write_one_packet_to_channel(stream->chan->uchan,
2565 &stream->chan->metadata_cache->contents.data[stream->ust_metadata_pushed],
2566 stream->chan->metadata_cache->contents.size -
2567 stream->ust_metadata_pushed);
2568 LTTNG_ASSERT(write_len != 0);
2569 if (write_len < 0) {
2570 ERR("Writing one metadata packet");
2571 ret = write_len;
2572 goto end;
2573 }
2574 stream->ust_metadata_pushed += write_len;
2575
2576 LTTNG_ASSERT(stream->chan->metadata_cache->contents.size >=
2577 stream->ust_metadata_pushed);
2578 ret = write_len;
2579
2580 /*
2581 * Switch packet (but don't open the next one) on every commit of
2582 * a metadata packet. Since the subbuffer is fully filled (with padding,
2583 * if needed), the stream is "quiescent" after this commit.
2584 */
2585 if (lttng_ust_ctl_flush_buffer(stream->ustream, 1)) {
2586 ERR("Failed to flush buffer while committing one metadata packet");
2587 ret = -EIO;
2588 } else {
2589 stream->quiescent = true;
2590 }
2591 end:
2592 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2593 return ret;
2594 }
2595
2596
2597 /*
2598 * Sync metadata meaning request them to the session daemon and snapshot to the
2599 * metadata thread can consumer them.
2600 *
2601 * Metadata stream lock is held here, but we need to release it when
2602 * interacting with sessiond, else we cause a deadlock with live
2603 * awaiting on metadata to be pushed out.
2604 *
2605 * The RCU read side lock must be held by the caller.
2606 */
2607 enum sync_metadata_status lttng_ustconsumer_sync_metadata(
2608 struct lttng_consumer_local_data *ctx,
2609 struct lttng_consumer_stream *metadata_stream)
2610 {
2611 int ret;
2612 enum sync_metadata_status status;
2613 struct lttng_consumer_channel *metadata_channel;
2614
2615 LTTNG_ASSERT(ctx);
2616 LTTNG_ASSERT(metadata_stream);
2617 ASSERT_RCU_READ_LOCKED();
2618
2619 metadata_channel = metadata_stream->chan;
2620 pthread_mutex_unlock(&metadata_stream->lock);
2621 /*
2622 * Request metadata from the sessiond, but don't wait for the flush
2623 * because we locked the metadata thread.
2624 */
2625 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 0);
2626 pthread_mutex_lock(&metadata_stream->lock);
2627 if (ret < 0) {
2628 status = SYNC_METADATA_STATUS_ERROR;
2629 goto end;
2630 }
2631
2632 /*
2633 * The metadata stream and channel can be deleted while the
2634 * metadata stream lock was released. The streamed is checked
2635 * for deletion before we use it further.
2636 *
2637 * Note that it is safe to access a logically-deleted stream since its
2638 * existence is still guaranteed by the RCU read side lock. However,
2639 * it should no longer be used. The close/deletion of the metadata
2640 * channel and stream already guarantees that all metadata has been
2641 * consumed. Therefore, there is nothing left to do in this function.
2642 */
2643 if (consumer_stream_is_deleted(metadata_stream)) {
2644 DBG("Metadata stream %" PRIu64 " was deleted during the metadata synchronization",
2645 metadata_stream->key);
2646 status = SYNC_METADATA_STATUS_NO_DATA;
2647 goto end;
2648 }
2649
2650 ret = commit_one_metadata_packet(metadata_stream);
2651 if (ret < 0) {
2652 status = SYNC_METADATA_STATUS_ERROR;
2653 goto end;
2654 } else if (ret > 0) {
2655 status = SYNC_METADATA_STATUS_NEW_DATA;
2656 } else /* ret == 0 */ {
2657 status = SYNC_METADATA_STATUS_NO_DATA;
2658 goto end;
2659 }
2660
2661 ret = lttng_ust_ctl_snapshot(metadata_stream->ustream);
2662 if (ret < 0) {
2663 ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret);
2664 status = SYNC_METADATA_STATUS_ERROR;
2665 goto end;
2666 }
2667
2668 end:
2669 return status;
2670 }
2671
2672 /*
2673 * Return 0 on success else a negative value.
2674 */
2675 static int notify_if_more_data(struct lttng_consumer_stream *stream,
2676 struct lttng_consumer_local_data *ctx)
2677 {
2678 int ret;
2679 struct lttng_ust_ctl_consumer_stream *ustream;
2680
2681 LTTNG_ASSERT(stream);
2682 LTTNG_ASSERT(ctx);
2683
2684 ustream = stream->ustream;
2685
2686 /*
2687 * First, we are going to check if there is a new subbuffer available
2688 * before reading the stream wait_fd.
2689 */
2690 /* Get the next subbuffer */
2691 ret = lttng_ust_ctl_get_next_subbuf(ustream);
2692 if (ret) {
2693 /* No more data found, flag the stream. */
2694 stream->has_data = 0;
2695 ret = 0;
2696 goto end;
2697 }
2698
2699 ret = lttng_ust_ctl_put_subbuf(ustream);
2700 LTTNG_ASSERT(!ret);
2701
2702 /* This stream still has data. Flag it and wake up the data thread. */
2703 stream->has_data = 1;
2704
2705 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
2706 ssize_t writelen;
2707
2708 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
2709 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2710 ret = writelen;
2711 goto end;
2712 }
2713
2714 /* The wake up pipe has been notified. */
2715 ctx->has_wakeup = 1;
2716 }
2717 ret = 0;
2718
2719 end:
2720 return ret;
2721 }
2722
2723 static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream)
2724 {
2725 int ret = 0;
2726
2727 /*
2728 * We can consume the 1 byte written into the wait_fd by
2729 * UST. Don't trigger error if we cannot read this one byte
2730 * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK.
2731 *
2732 * This is only done when the stream is monitored by a thread,
2733 * before the flush is done after a hangup and if the stream
2734 * is not flagged with data since there might be nothing to
2735 * consume in the wait fd but still have data available
2736 * flagged by the consumer wake up pipe.
2737 */
2738 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
2739 char dummy;
2740 ssize_t readlen;
2741
2742 readlen = lttng_read(stream->wait_fd, &dummy, 1);
2743 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
2744 ret = readlen;
2745 }
2746 }
2747
2748 return ret;
2749 }
2750
2751 static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream,
2752 struct stream_subbuffer *subbuf)
2753 {
2754 int ret;
2755
2756 ret = lttng_ust_ctl_get_subbuf_size(
2757 stream->ustream, &subbuf->info.data.subbuf_size);
2758 if (ret) {
2759 goto end;
2760 }
2761
2762 ret = lttng_ust_ctl_get_padded_subbuf_size(
2763 stream->ustream, &subbuf->info.data.padded_subbuf_size);
2764 if (ret) {
2765 goto end;
2766 }
2767
2768 end:
2769 return ret;
2770 }
2771
2772 static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream,
2773 struct stream_subbuffer *subbuf)
2774 {
2775 int ret;
2776
2777 ret = extract_common_subbuffer_info(stream, subbuf);
2778 if (ret) {
2779 goto end;
2780 }
2781
2782 subbuf->info.metadata.version = stream->metadata_version;
2783
2784 end:
2785 return ret;
2786 }
2787
2788 static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream,
2789 struct stream_subbuffer *subbuf)
2790 {
2791 int ret;
2792
2793 ret = extract_common_subbuffer_info(stream, subbuf);
2794 if (ret) {
2795 goto end;
2796 }
2797
2798 ret = lttng_ust_ctl_get_packet_size(
2799 stream->ustream, &subbuf->info.data.packet_size);
2800 if (ret < 0) {
2801 PERROR("Failed to get sub-buffer packet size");
2802 goto end;
2803 }
2804
2805 ret = lttng_ust_ctl_get_content_size(
2806 stream->ustream, &subbuf->info.data.content_size);
2807 if (ret < 0) {
2808 PERROR("Failed to get sub-buffer content size");
2809 goto end;
2810 }
2811
2812 ret = lttng_ust_ctl_get_timestamp_begin(
2813 stream->ustream, &subbuf->info.data.timestamp_begin);
2814 if (ret < 0) {
2815 PERROR("Failed to get sub-buffer begin timestamp");
2816 goto end;
2817 }
2818
2819 ret = lttng_ust_ctl_get_timestamp_end(
2820 stream->ustream, &subbuf->info.data.timestamp_end);
2821 if (ret < 0) {
2822 PERROR("Failed to get sub-buffer end timestamp");
2823 goto end;
2824 }
2825
2826 ret = lttng_ust_ctl_get_events_discarded(
2827 stream->ustream, &subbuf->info.data.events_discarded);
2828 if (ret) {
2829 PERROR("Failed to get sub-buffer events discarded count");
2830 goto end;
2831 }
2832
2833 ret = lttng_ust_ctl_get_sequence_number(stream->ustream,
2834 &subbuf->info.data.sequence_number.value);
2835 if (ret) {
2836 /* May not be supported by older LTTng-modules. */
2837 if (ret != -ENOTTY) {
2838 PERROR("Failed to get sub-buffer sequence number");
2839 goto end;
2840 }
2841 } else {
2842 subbuf->info.data.sequence_number.is_set = true;
2843 }
2844
2845 ret = lttng_ust_ctl_get_stream_id(
2846 stream->ustream, &subbuf->info.data.stream_id);
2847 if (ret < 0) {
2848 PERROR("Failed to get stream id");
2849 goto end;
2850 }
2851
2852 ret = lttng_ust_ctl_get_instance_id(stream->ustream,
2853 &subbuf->info.data.stream_instance_id.value);
2854 if (ret) {
2855 /* May not be supported by older LTTng-modules. */
2856 if (ret != -ENOTTY) {
2857 PERROR("Failed to get stream instance id");
2858 goto end;
2859 }
2860 } else {
2861 subbuf->info.data.stream_instance_id.is_set = true;
2862 }
2863 end:
2864 return ret;
2865 }
2866
2867 static int get_next_subbuffer_common(struct lttng_consumer_stream *stream,
2868 struct stream_subbuffer *subbuffer)
2869 {
2870 int ret;
2871 const char *addr;
2872
2873 ret = stream->read_subbuffer_ops.extract_subbuffer_info(
2874 stream, subbuffer);
2875 if (ret) {
2876 goto end;
2877 }
2878
2879 ret = get_current_subbuf_addr(stream, &addr);
2880 if (ret) {
2881 goto end;
2882 }
2883
2884 subbuffer->buffer.buffer = lttng_buffer_view_init(
2885 addr, 0, subbuffer->info.data.padded_subbuf_size);
2886 LTTNG_ASSERT(subbuffer->buffer.buffer.data != NULL);
2887 end:
2888 return ret;
2889 }
2890
2891 static enum get_next_subbuffer_status get_next_subbuffer(
2892 struct lttng_consumer_stream *stream,
2893 struct stream_subbuffer *subbuffer)
2894 {
2895 int ret;
2896 enum get_next_subbuffer_status status;
2897
2898 ret = lttng_ust_ctl_get_next_subbuf(stream->ustream);
2899 switch (ret) {
2900 case 0:
2901 status = GET_NEXT_SUBBUFFER_STATUS_OK;
2902 break;
2903 case -ENODATA:
2904 case -EAGAIN:
2905 /*
2906 * The caller only expects -ENODATA when there is no data to
2907 * read, but the kernel tracer returns -EAGAIN when there is
2908 * currently no data for a non-finalized stream, and -ENODATA
2909 * when there is no data for a finalized stream. Those can be
2910 * combined into a -ENODATA return value.
2911 */
2912 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
2913 goto end;
2914 default:
2915 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2916 goto end;
2917 }
2918
2919 ret = get_next_subbuffer_common(stream, subbuffer);
2920 if (ret) {
2921 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2922 goto end;
2923 }
2924 end:
2925 return status;
2926 }
2927
2928 static enum get_next_subbuffer_status get_next_subbuffer_metadata(
2929 struct lttng_consumer_stream *stream,
2930 struct stream_subbuffer *subbuffer)
2931 {
2932 int ret;
2933 bool cache_empty;
2934 bool got_subbuffer;
2935 bool coherent;
2936 bool buffer_empty;
2937 unsigned long consumed_pos, produced_pos;
2938 enum get_next_subbuffer_status status;
2939
2940 do {
2941 ret = lttng_ust_ctl_get_next_subbuf(stream->ustream);
2942 if (ret == 0) {
2943 got_subbuffer = true;
2944 } else {
2945 got_subbuffer = false;
2946 if (ret != -EAGAIN) {
2947 /* Fatal error. */
2948 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2949 goto end;
2950 }
2951 }
2952
2953 /*
2954 * Determine if the cache is empty and ensure that a sub-buffer
2955 * is made available if the cache is not empty.
2956 */
2957 if (!got_subbuffer) {
2958 ret = commit_one_metadata_packet(stream);
2959 if (ret < 0 && ret != -ENOBUFS) {
2960 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2961 goto end;
2962 } else if (ret == 0) {
2963 /* Not an error, the cache is empty. */
2964 cache_empty = true;
2965 status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA;
2966 goto end;
2967 } else {
2968 cache_empty = false;
2969 }
2970 } else {
2971 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
2972 cache_empty = stream->chan->metadata_cache->contents.size ==
2973 stream->ust_metadata_pushed;
2974 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
2975 }
2976 } while (!got_subbuffer);
2977
2978 /* Populate sub-buffer infos and view. */
2979 ret = get_next_subbuffer_common(stream, subbuffer);
2980 if (ret) {
2981 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2982 goto end;
2983 }
2984
2985 ret = lttng_ustconsumer_sample_snapshot_positions(stream);
2986 if (ret < 0) {
2987 /*
2988 * -EAGAIN is not expected since we got a sub-buffer and haven't
2989 * pushed the consumption position yet (on put_next).
2990 */
2991 PERROR("Failed to take a snapshot of metadata buffer positions");
2992 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
2993 goto end;
2994 }
2995
2996 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
2997 if (ret) {
2998 PERROR("Failed to get metadata consumed position");
2999 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
3000 goto end;
3001 }
3002
3003 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
3004 if (ret) {
3005 PERROR("Failed to get metadata produced position");
3006 status = GET_NEXT_SUBBUFFER_STATUS_ERROR;
3007 goto end;
3008 }
3009
3010 /* Last sub-buffer of the ring buffer ? */
3011 buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos;
3012
3013 /*
3014 * The sessiond registry lock ensures that coherent units of metadata
3015 * are pushed to the consumer daemon at once. Hence, if a sub-buffer is
3016 * acquired, the cache is empty, and it is the only available sub-buffer
3017 * available, it is safe to assume that it is "coherent".
3018 */
3019 coherent = got_subbuffer && cache_empty && buffer_empty;
3020
3021 LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent);
3022 status = GET_NEXT_SUBBUFFER_STATUS_OK;
3023 end:
3024 return status;
3025 }
3026
3027 static int put_next_subbuffer(struct lttng_consumer_stream *stream,
3028 struct stream_subbuffer *subbuffer __attribute__((unused)))
3029 {
3030 const int ret = lttng_ust_ctl_put_next_subbuf(stream->ustream);
3031
3032 LTTNG_ASSERT(ret == 0);
3033 return ret;
3034 }
3035
3036 static int signal_metadata(struct lttng_consumer_stream *stream,
3037 struct lttng_consumer_local_data *ctx __attribute__((unused)))
3038 {
3039 ASSERT_LOCKED(stream->metadata_rdv_lock);
3040 return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0;
3041 }
3042
3043 static int lttng_ustconsumer_set_stream_ops(
3044 struct lttng_consumer_stream *stream)
3045 {
3046 int ret = 0;
3047
3048 stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up;
3049 if (stream->metadata_flag) {
3050 stream->read_subbuffer_ops.get_next_subbuffer =
3051 get_next_subbuffer_metadata;
3052 stream->read_subbuffer_ops.extract_subbuffer_info =
3053 extract_metadata_subbuffer_info;
3054 stream->read_subbuffer_ops.reset_metadata =
3055 metadata_stream_reset_cache_consumed_position;
3056 if (stream->chan->is_live) {
3057 stream->read_subbuffer_ops.on_sleep = signal_metadata;
3058 ret = consumer_stream_enable_metadata_bucketization(
3059 stream);
3060 if (ret) {
3061 goto end;
3062 }
3063 }
3064 } else {
3065 stream->read_subbuffer_ops.get_next_subbuffer =
3066 get_next_subbuffer;
3067 stream->read_subbuffer_ops.extract_subbuffer_info =
3068 extract_data_subbuffer_info;
3069 stream->read_subbuffer_ops.on_sleep = notify_if_more_data;
3070 if (stream->chan->is_live) {
3071 stream->read_subbuffer_ops.send_live_beacon =
3072 consumer_flush_ust_index;
3073 }
3074 }
3075
3076 stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer;
3077 end:
3078 return ret;
3079 }
3080
3081 /*
3082 * Called when a stream is created.
3083 *
3084 * Return 0 on success or else a negative value.
3085 */
3086 int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
3087 {
3088 int ret;
3089
3090 LTTNG_ASSERT(stream);
3091
3092 /*
3093 * Don't create anything if this is set for streaming or if there is
3094 * no current trace chunk on the parent channel.
3095 */
3096 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor &&
3097 stream->chan->trace_chunk) {
3098 ret = consumer_stream_create_output_files(stream, true);
3099 if (ret) {
3100 goto error;
3101 }
3102 }
3103
3104 lttng_ustconsumer_set_stream_ops(stream);
3105 ret = 0;
3106
3107 error:
3108 return ret;
3109 }
3110
3111 /*
3112 * Check if data is still being extracted from the buffers for a specific
3113 * stream. Consumer data lock MUST be acquired before calling this function
3114 * and the stream lock.
3115 *
3116 * Return 1 if the traced data are still getting read else 0 meaning that the
3117 * data is available for trace viewer reading.
3118 */
3119 int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
3120 {
3121 int ret;
3122
3123 LTTNG_ASSERT(stream);
3124 LTTNG_ASSERT(stream->ustream);
3125 ASSERT_LOCKED(stream->lock);
3126
3127 DBG("UST consumer checking data pending");
3128
3129 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
3130 ret = 0;
3131 goto end;
3132 }
3133
3134 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
3135 uint64_t contiguous, pushed;
3136
3137 /* Ease our life a bit. */
3138 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
3139 contiguous = stream->chan->metadata_cache->contents.size;
3140 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
3141 pushed = stream->ust_metadata_pushed;
3142
3143 /*
3144 * We can simply check whether all contiguously available data
3145 * has been pushed to the ring buffer, since the push operation
3146 * is performed within get_next_subbuf(), and because both
3147 * get_next_subbuf() and put_next_subbuf() are issued atomically
3148 * thanks to the stream lock within
3149 * lttng_ustconsumer_read_subbuffer(). This basically means that
3150 * whetnever ust_metadata_pushed is incremented, the associated
3151 * metadata has been consumed from the metadata stream.
3152 */
3153 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
3154 contiguous, pushed);
3155 LTTNG_ASSERT(((int64_t) (contiguous - pushed)) >= 0);
3156 if ((contiguous != pushed) ||
3157 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
3158 ret = 1; /* Data is pending */
3159 goto end;
3160 }
3161 } else {
3162 ret = lttng_ust_ctl_get_next_subbuf(stream->ustream);
3163 if (ret == 0) {
3164 /*
3165 * There is still data so let's put back this
3166 * subbuffer.
3167 */
3168 ret = lttng_ust_ctl_put_subbuf(stream->ustream);
3169 LTTNG_ASSERT(ret == 0);
3170 ret = 1; /* Data is pending */
3171 goto end;
3172 }
3173 }
3174
3175 /* Data is NOT pending so ready to be read. */
3176 ret = 0;
3177
3178 end:
3179 return ret;
3180 }
3181
3182 /*
3183 * Stop a given metadata channel timer if enabled and close the wait fd which
3184 * is the poll pipe of the metadata stream.
3185 *
3186 * This MUST be called with the metadata channel lock acquired.
3187 */
3188 void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
3189 {
3190 int ret;
3191
3192 LTTNG_ASSERT(metadata);
3193 LTTNG_ASSERT(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
3194
3195 DBG("Closing metadata channel key %" PRIu64, metadata->key);
3196
3197 if (metadata->switch_timer_enabled == 1) {
3198 consumer_timer_switch_stop(metadata);
3199 }
3200
3201 if (!metadata->metadata_stream) {
3202 goto end;
3203 }
3204
3205 /*
3206 * Closing write side so the thread monitoring the stream wakes up if any
3207 * and clean the metadata stream.
3208 */
3209 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
3210 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
3211 if (ret < 0) {
3212 PERROR("closing metadata pipe write side");
3213 }
3214 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
3215 }
3216
3217 end:
3218 return;
3219 }
3220
3221 /*
3222 * Close every metadata stream wait fd of the metadata hash table. This
3223 * function MUST be used very carefully so not to run into a race between the
3224 * metadata thread handling streams and this function closing their wait fd.
3225 *
3226 * For UST, this is used when the session daemon hangs up. Its the metadata
3227 * producer so calling this is safe because we are assured that no state change
3228 * can occur in the metadata thread for the streams in the hash table.
3229 */
3230 void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
3231 {
3232 struct lttng_ht_iter iter;
3233 struct lttng_consumer_stream *stream;
3234
3235 LTTNG_ASSERT(metadata_ht);
3236 LTTNG_ASSERT(metadata_ht->ht);
3237
3238 DBG("UST consumer closing all metadata streams");
3239
3240 rcu_read_lock();
3241 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
3242 node.node) {
3243
3244 health_code_update();
3245
3246 pthread_mutex_lock(&stream->chan->lock);
3247 lttng_ustconsumer_close_metadata(stream->chan);
3248 pthread_mutex_unlock(&stream->chan->lock);
3249
3250 }
3251 rcu_read_unlock();
3252 }
3253
3254 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
3255 {
3256 int ret;
3257
3258 ret = lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream);
3259 if (ret < 0) {
3260 ERR("Unable to close wakeup fd");
3261 }
3262 }
3263
3264 /*
3265 * Please refer to consumer-timer.c before adding any lock within this
3266 * function or any of its callees. Timers have a very strict locking
3267 * semantic with respect to teardown. Failure to respect this semantic
3268 * introduces deadlocks.
3269 *
3270 * DON'T hold the metadata lock when calling this function, else this
3271 * can cause deadlock involving consumer awaiting for metadata to be
3272 * pushed out due to concurrent interaction with the session daemon.
3273 */
3274 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
3275 struct lttng_consumer_channel *channel, int timer, int wait)
3276 {
3277 struct lttcomm_metadata_request_msg request;
3278 struct lttcomm_consumer_msg msg;
3279 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
3280 uint64_t len, key, offset, version;
3281 int ret;
3282
3283 LTTNG_ASSERT(channel);
3284 LTTNG_ASSERT(channel->metadata_cache);
3285
3286 memset(&request, 0, sizeof(request));
3287
3288 /* send the metadata request to sessiond */
3289 switch (the_consumer_data.type) {
3290 case LTTNG_CONSUMER64_UST:
3291 request.bits_per_long = 64;
3292 break;
3293 case LTTNG_CONSUMER32_UST:
3294 request.bits_per_long = 32;
3295 break;
3296 default:
3297 request.bits_per_long = 0;
3298 break;
3299 }
3300
3301 request.session_id = channel->session_id;
3302 request.session_id_per_pid = channel->session_id_per_pid;
3303 /*
3304 * Request the application UID here so the metadata of that application can
3305 * be sent back. The channel UID corresponds to the user UID of the session
3306 * used for the rights on the stream file(s).
3307 */
3308 request.uid = channel->ust_app_uid;
3309 request.key = channel->key;
3310
3311 DBG("Sending metadata request to sessiond, session id %" PRIu64
3312 ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
3313 request.session_id, request.session_id_per_pid, request.uid,
3314 request.key);
3315
3316 pthread_mutex_lock(&ctx->metadata_socket_lock);
3317
3318 health_code_update();
3319
3320 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
3321 sizeof(request));
3322 if (ret < 0) {
3323 ERR("Asking metadata to sessiond");
3324 goto end;
3325 }
3326
3327 health_code_update();
3328
3329 /* Receive the metadata from sessiond */
3330 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
3331 sizeof(msg));
3332 if (ret != sizeof(msg)) {
3333 DBG("Consumer received unexpected message size %d (expects %zu)",
3334 ret, sizeof(msg));
3335 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
3336 /*
3337 * The ret value might 0 meaning an orderly shutdown but this is ok
3338 * since the caller handles this.
3339 */
3340 goto end;
3341 }
3342
3343 health_code_update();
3344
3345 if (msg.cmd_type == LTTNG_ERR_UND) {
3346 /* No registry found */
3347 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
3348 ret_code);
3349 ret = 0;
3350 goto end;
3351 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
3352 ERR("Unexpected cmd_type received %d", msg.cmd_type);
3353 ret = -1;
3354 goto end;
3355 }
3356
3357 len = msg.u.push_metadata.len;
3358 key = msg.u.push_metadata.key;
3359 offset = msg.u.push_metadata.target_offset;
3360 version = msg.u.push_metadata.version;
3361
3362 LTTNG_ASSERT(key == channel->key);
3363 if (len == 0) {
3364 DBG("No new metadata to receive for key %" PRIu64, key);
3365 }
3366
3367 health_code_update();
3368
3369 /* Tell session daemon we are ready to receive the metadata. */
3370 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
3371 LTTCOMM_CONSUMERD_SUCCESS);
3372 if (ret < 0 || len == 0) {
3373 /*
3374 * Somehow, the session daemon is not responding anymore or there is
3375 * nothing to receive.
3376 */
3377 goto end;
3378 }
3379
3380 health_code_update();
3381
3382 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
3383 key, offset, len, version, channel, timer, wait);
3384 if (ret >= 0) {
3385 /*
3386 * Only send the status msg if the sessiond is alive meaning a positive
3387 * ret code.
3388 */
3389 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
3390 }
3391 ret = 0;
3392
3393 end:
3394 health_code_update();
3395
3396 pthread_mutex_unlock(&ctx->metadata_socket_lock);
3397 return ret;
3398 }
3399
3400 /*
3401 * Return the ustctl call for the get stream id.
3402 */
3403 int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
3404 uint64_t *stream_id)
3405 {
3406 LTTNG_ASSERT(stream);
3407 LTTNG_ASSERT(stream_id);
3408
3409 return lttng_ust_ctl_get_stream_id(stream->ustream, stream_id);
3410 }
3411
3412 void lttng_ustconsumer_sigbus_handle(void *addr)
3413 {
3414 lttng_ust_ctl_sigbus_handle(addr);
3415 }
This page took 0.102388 seconds and 4 git commands to generate.