Fix: per-uid flush and ust registry locking
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
CommitLineData
3bd1e081
MD
1/*
2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
d14d33bf
AM
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
3bd1e081
MD
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
d14d33bf
AM
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
3bd1e081
MD
17 */
18
19#define _GNU_SOURCE
6c1c0768 20#define _LGPL_SOURCE
3bd1e081 21#include <assert.h>
f02e1e8a 22#include <lttng/ust-ctl.h>
3bd1e081
MD
23#include <poll.h>
24#include <pthread.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/mman.h>
28#include <sys/socket.h>
dbb5dfe6 29#include <sys/stat.h>
3bd1e081 30#include <sys/types.h>
77c7c900 31#include <inttypes.h>
3bd1e081 32#include <unistd.h>
ffe60014 33#include <urcu/list.h>
331744e3 34#include <signal.h>
0857097f 35
51a9e1c7 36#include <bin/lttng-consumerd/health-consumerd.h>
990570ed 37#include <common/common.h>
10a8a223 38#include <common/sessiond-comm/sessiond-comm.h>
00e2e675 39#include <common/relayd/relayd.h>
dbb5dfe6 40#include <common/compat/fcntl.h>
f263b7fd 41#include <common/compat/endian.h>
331744e3 42#include <common/consumer-metadata-cache.h>
10a50311 43#include <common/consumer-stream.h>
331744e3 44#include <common/consumer-timer.h>
fe4477ee 45#include <common/utils.h>
309167d2 46#include <common/index/index.h>
10a8a223
DG
47
48#include "ust-consumer.h"
3bd1e081
MD
49
50extern struct lttng_consumer_global_data consumer_data;
51extern int consumer_poll_timeout;
52extern volatile int consumer_quit;
53
54/*
ffe60014
DG
55 * Free channel object and all streams associated with it. This MUST be used
56 * only and only if the channel has _NEVER_ been added to the global channel
57 * hash table.
3bd1e081 58 */
ffe60014 59static void destroy_channel(struct lttng_consumer_channel *channel)
3bd1e081 60{
ffe60014
DG
61 struct lttng_consumer_stream *stream, *stmp;
62
63 assert(channel);
64
65 DBG("UST consumer cleaning stream list");
66
67 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
68 send_node) {
9ce5646a
MD
69
70 health_code_update();
71
ffe60014
DG
72 cds_list_del(&stream->send_node);
73 ustctl_destroy_stream(stream->ustream);
74 free(stream);
75 }
76
77 /*
78 * If a channel is available meaning that was created before the streams
79 * were, delete it.
80 */
81 if (channel->uchan) {
82 lttng_ustconsumer_del_channel(channel);
83 }
84 free(channel);
85}
3bd1e081
MD
86
87/*
ffe60014 88 * Add channel to internal consumer state.
3bd1e081 89 *
ffe60014 90 * Returns 0 on success or else a negative value.
3bd1e081 91 */
ffe60014
DG
92static int add_channel(struct lttng_consumer_channel *channel,
93 struct lttng_consumer_local_data *ctx)
3bd1e081
MD
94{
95 int ret = 0;
96
ffe60014
DG
97 assert(channel);
98 assert(ctx);
99
100 if (ctx->on_recv_channel != NULL) {
101 ret = ctx->on_recv_channel(channel);
102 if (ret == 0) {
d8ef542d 103 ret = consumer_add_channel(channel, ctx);
ffe60014
DG
104 } else if (ret < 0) {
105 /* Most likely an ENOMEM. */
106 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
107 goto error;
108 }
109 } else {
d8ef542d 110 ret = consumer_add_channel(channel, ctx);
3bd1e081
MD
111 }
112
d88aee68 113 DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key);
ffe60014
DG
114
115error:
3bd1e081
MD
116 return ret;
117}
118
119/*
ffe60014
DG
120 * Allocate and return a consumer channel object.
121 */
122static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
123 const char *pathname, const char *name, uid_t uid, gid_t gid,
da009f2c 124 uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
2bba9e53 125 uint64_t tracefile_size, uint64_t tracefile_count,
ecc48a90
JD
126 uint64_t session_id_per_pid, unsigned int monitor,
127 unsigned int live_timer_interval)
ffe60014
DG
128{
129 assert(pathname);
130 assert(name);
131
1950109e
JD
132 return consumer_allocate_channel(key, session_id, pathname, name, uid,
133 gid, relayd_id, output, tracefile_size,
ecc48a90 134 tracefile_count, session_id_per_pid, monitor, live_timer_interval);
ffe60014
DG
135}
136
137/*
138 * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the
139 * error value if applicable is set in it else it is kept untouched.
3bd1e081 140 *
ffe60014 141 * Return NULL on error else the newly allocated stream object.
3bd1e081 142 */
ffe60014
DG
143static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
144 struct lttng_consumer_channel *channel,
145 struct lttng_consumer_local_data *ctx, int *_alloc_ret)
146{
147 int alloc_ret;
148 struct lttng_consumer_stream *stream = NULL;
149
150 assert(channel);
151 assert(ctx);
152
153 stream = consumer_allocate_stream(channel->key,
154 key,
155 LTTNG_CONSUMER_ACTIVE_STREAM,
156 channel->name,
157 channel->uid,
158 channel->gid,
159 channel->relayd_id,
160 channel->session_id,
161 cpu,
162 &alloc_ret,
4891ece8
DG
163 channel->type,
164 channel->monitor);
ffe60014
DG
165 if (stream == NULL) {
166 switch (alloc_ret) {
167 case -ENOENT:
168 /*
169 * We could not find the channel. Can happen if cpu hotplug
170 * happens while tearing down.
171 */
172 DBG3("Could not find channel");
173 break;
174 case -ENOMEM:
175 case -EINVAL:
176 default:
177 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR);
178 break;
179 }
180 goto error;
181 }
182
183 stream->chan = channel;
184
185error:
186 if (_alloc_ret) {
187 *_alloc_ret = alloc_ret;
188 }
189 return stream;
190}
191
192/*
193 * Send the given stream pointer to the corresponding thread.
194 *
195 * Returns 0 on success else a negative value.
196 */
197static int send_stream_to_thread(struct lttng_consumer_stream *stream,
198 struct lttng_consumer_local_data *ctx)
199{
dae10966
DG
200 int ret;
201 struct lttng_pipe *stream_pipe;
ffe60014
DG
202
203 /* Get the right pipe where the stream will be sent. */
204 if (stream->metadata_flag) {
5ab66908
MD
205 ret = consumer_add_metadata_stream(stream);
206 if (ret) {
207 ERR("Consumer add metadata stream %" PRIu64 " failed.",
208 stream->key);
209 goto error;
210 }
dae10966 211 stream_pipe = ctx->consumer_metadata_pipe;
ffe60014 212 } else {
5ab66908
MD
213 ret = consumer_add_data_stream(stream);
214 if (ret) {
215 ERR("Consumer add stream %" PRIu64 " failed.",
216 stream->key);
217 goto error;
218 }
dae10966 219 stream_pipe = ctx->consumer_data_pipe;
ffe60014
DG
220 }
221
5ab66908
MD
222 /*
223 * From this point on, the stream's ownership has been moved away from
224 * the channel and becomes globally visible.
225 */
226 stream->globally_visible = 1;
227
dae10966 228 ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
ffe60014 229 if (ret < 0) {
dae10966
DG
230 ERR("Consumer write %s stream to pipe %d",
231 stream->metadata_flag ? "metadata" : "data",
232 lttng_pipe_get_writefd(stream_pipe));
5ab66908
MD
233 if (stream->metadata_flag) {
234 consumer_del_stream_for_metadata(stream);
235 } else {
236 consumer_del_stream_for_data(stream);
237 }
ffe60014 238 }
5ab66908 239error:
ffe60014
DG
240 return ret;
241}
242
d88aee68
DG
243/*
244 * Create streams for the given channel using liblttng-ust-ctl.
245 *
246 * Return 0 on success else a negative value.
247 */
ffe60014
DG
248static int create_ust_streams(struct lttng_consumer_channel *channel,
249 struct lttng_consumer_local_data *ctx)
250{
251 int ret, cpu = 0;
252 struct ustctl_consumer_stream *ustream;
253 struct lttng_consumer_stream *stream;
254
255 assert(channel);
256 assert(ctx);
257
258 /*
259 * While a stream is available from ustctl. When NULL is returned, we've
260 * reached the end of the possible stream for the channel.
261 */
262 while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
263 int wait_fd;
04ef1097 264 int ust_metadata_pipe[2];
ffe60014 265
9ce5646a
MD
266 health_code_update();
267
04ef1097
MD
268 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
269 ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
270 if (ret < 0) {
271 ERR("Create ust metadata poll pipe");
272 goto error;
273 }
274 wait_fd = ust_metadata_pipe[0];
275 } else {
276 wait_fd = ustctl_stream_get_wait_fd(ustream);
277 }
ffe60014
DG
278
279 /* Allocate consumer stream object. */
280 stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
281 if (!stream) {
282 goto error_alloc;
283 }
284 stream->ustream = ustream;
285 /*
286 * Store it so we can save multiple function calls afterwards since
287 * this value is used heavily in the stream threads. This is UST
288 * specific so this is why it's done after allocation.
289 */
290 stream->wait_fd = wait_fd;
291
b31398bb
DG
292 /*
293 * Increment channel refcount since the channel reference has now been
294 * assigned in the allocation process above.
295 */
10a50311
JD
296 if (stream->chan->monitor) {
297 uatomic_inc(&stream->chan->refcount);
298 }
b31398bb 299
ffe60014
DG
300 /*
301 * Order is important this is why a list is used. On error, the caller
302 * should clean this list.
303 */
304 cds_list_add_tail(&stream->send_node, &channel->streams.head);
305
306 ret = ustctl_get_max_subbuf_size(stream->ustream,
307 &stream->max_sb_size);
308 if (ret < 0) {
309 ERR("ustctl_get_max_subbuf_size failed for stream %s",
310 stream->name);
311 goto error;
312 }
313
314 /* Do actions once stream has been received. */
315 if (ctx->on_recv_stream) {
316 ret = ctx->on_recv_stream(stream);
317 if (ret < 0) {
318 goto error;
319 }
320 }
321
d88aee68 322 DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64,
ffe60014
DG
323 stream->name, stream->key, stream->relayd_stream_id);
324
325 /* Set next CPU stream. */
326 channel->streams.count = ++cpu;
d88aee68
DG
327
328 /* Keep stream reference when creating metadata. */
329 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
330 channel->metadata_stream = stream;
04ef1097
MD
331 stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
332 stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
d88aee68 333 }
ffe60014
DG
334 }
335
336 return 0;
337
338error:
339error_alloc:
340 return ret;
341}
342
343/*
344 * Create an UST channel with the given attributes and send it to the session
345 * daemon using the ust ctl API.
346 *
347 * Return 0 on success or else a negative value.
348 */
349static int create_ust_channel(struct ustctl_consumer_channel_attr *attr,
350 struct ustctl_consumer_channel **chanp)
351{
352 int ret;
353 struct ustctl_consumer_channel *channel;
354
355 assert(attr);
356 assert(chanp);
357
358 DBG3("Creating channel to ustctl with attr: [overwrite: %d, "
359 "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", "
360 "switch_timer_interval: %u, read_timer_interval: %u, "
361 "output: %d, type: %d", attr->overwrite, attr->subbuf_size,
362 attr->num_subbuf, attr->switch_timer_interval,
363 attr->read_timer_interval, attr->output, attr->type);
364
365 channel = ustctl_create_channel(attr);
366 if (!channel) {
367 ret = -1;
368 goto error_create;
369 }
370
371 *chanp = channel;
372
373 return 0;
374
375error_create:
376 return ret;
377}
378
d88aee68
DG
379/*
380 * Send a single given stream to the session daemon using the sock.
381 *
382 * Return 0 on success else a negative value.
383 */
ffe60014
DG
384static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
385{
386 int ret;
387
388 assert(stream);
389 assert(sock >= 0);
390
3eb914c0 391 DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
ffe60014
DG
392
393 /* Send stream to session daemon. */
394 ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
395 if (ret < 0) {
396 goto error;
397 }
398
ffe60014
DG
399error:
400 return ret;
401}
402
403/*
404 * Send channel to sessiond.
405 *
d88aee68 406 * Return 0 on success or else a negative value.
ffe60014
DG
407 */
408static int send_sessiond_channel(int sock,
409 struct lttng_consumer_channel *channel,
410 struct lttng_consumer_local_data *ctx, int *relayd_error)
411{
0c759fc9 412 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
ffe60014 413 struct lttng_consumer_stream *stream;
a4baae1b 414 uint64_t net_seq_idx = -1ULL;
ffe60014
DG
415
416 assert(channel);
417 assert(ctx);
418 assert(sock >= 0);
419
420 DBG("UST consumer sending channel %s to sessiond", channel->name);
421
62285ea4
DG
422 if (channel->relayd_id != (uint64_t) -1ULL) {
423 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
424
425 health_code_update();
426
62285ea4
DG
427 /* Try to send the stream to the relayd if one is available. */
428 ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
429 if (ret < 0) {
430 /*
431 * Flag that the relayd was the problem here probably due to a
432 * communicaton error on the socket.
433 */
434 if (relayd_error) {
435 *relayd_error = 1;
436 }
725d28b2 437 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
ffe60014 438 }
a4baae1b
JD
439 if (net_seq_idx == -1ULL) {
440 net_seq_idx = stream->net_seq_idx;
441 }
442 }
f2a444f1 443 }
ffe60014 444
f2a444f1
DG
445 /* Inform sessiond that we are about to send channel and streams. */
446 ret = consumer_send_status_msg(sock, ret_code);
0c759fc9 447 if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
448 /*
449 * Either the session daemon is not responding or the relayd died so we
450 * stop now.
451 */
452 goto error;
453 }
454
455 /* Send channel to sessiond. */
456 ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
457 if (ret < 0) {
458 goto error;
459 }
460
461 ret = ustctl_channel_close_wakeup_fd(channel->uchan);
462 if (ret < 0) {
463 goto error;
464 }
465
466 /* The channel was sent successfully to the sessiond at this point. */
467 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
468
469 health_code_update();
470
ffe60014
DG
471 /* Send stream to session daemon. */
472 ret = send_sessiond_stream(sock, stream);
473 if (ret < 0) {
474 goto error;
475 }
476 }
477
478 /* Tell sessiond there is no more stream. */
479 ret = ustctl_send_stream_to_sessiond(sock, NULL);
480 if (ret < 0) {
481 goto error;
482 }
483
484 DBG("UST consumer NULL stream sent to sessiond");
485
486 return 0;
487
488error:
0c759fc9 489 if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) {
f2a444f1
DG
490 ret = -1;
491 }
ffe60014
DG
492 return ret;
493}
494
495/*
496 * Creates a channel and streams and add the channel it to the channel internal
497 * state. The created stream must ONLY be sent once the GET_CHANNEL command is
498 * received.
499 *
500 * Return 0 on success or else, a negative value is returned and the channel
501 * MUST be destroyed by consumer_del_channel().
502 */
503static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
504 struct lttng_consumer_channel *channel,
505 struct ustctl_consumer_channel_attr *attr)
3bd1e081
MD
506{
507 int ret;
508
ffe60014
DG
509 assert(ctx);
510 assert(channel);
511 assert(attr);
512
513 /*
514 * This value is still used by the kernel consumer since for the kernel,
515 * the stream ownership is not IN the consumer so we need to have the
516 * number of left stream that needs to be initialized so we can know when
517 * to delete the channel (see consumer.c).
518 *
519 * As for the user space tracer now, the consumer creates and sends the
520 * stream to the session daemon which only sends them to the application
521 * once every stream of a channel is received making this value useless
522 * because we they will be added to the poll thread before the application
523 * receives them. This ensures that a stream can not hang up during
524 * initilization of a channel.
525 */
526 channel->nb_init_stream_left = 0;
527
528 /* The reply msg status is handled in the following call. */
529 ret = create_ust_channel(attr, &channel->uchan);
530 if (ret < 0) {
10a50311 531 goto end;
3bd1e081
MD
532 }
533
d8ef542d
MD
534 channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
535
10a50311
JD
536 /*
537 * For the snapshots (no monitor), we create the metadata streams
538 * on demand, not during the channel creation.
539 */
540 if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
541 ret = 0;
542 goto end;
543 }
544
ffe60014
DG
545 /* Open all streams for this channel. */
546 ret = create_ust_streams(channel, ctx);
547 if (ret < 0) {
10a50311 548 goto end;
ffe60014
DG
549 }
550
10a50311 551end:
3bd1e081
MD
552 return ret;
553}
554
d88aee68
DG
555/*
556 * Send all stream of a channel to the right thread handling it.
557 *
558 * On error, return a negative value else 0 on success.
559 */
560static int send_streams_to_thread(struct lttng_consumer_channel *channel,
561 struct lttng_consumer_local_data *ctx)
562{
563 int ret = 0;
564 struct lttng_consumer_stream *stream, *stmp;
565
566 assert(channel);
567 assert(ctx);
568
569 /* Send streams to the corresponding thread. */
570 cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head,
571 send_node) {
9ce5646a
MD
572
573 health_code_update();
574
d88aee68
DG
575 /* Sending the stream to the thread. */
576 ret = send_stream_to_thread(stream, ctx);
577 if (ret < 0) {
578 /*
579 * If we are unable to send the stream to the thread, there is
580 * a big problem so just stop everything.
581 */
5ab66908
MD
582 /* Remove node from the channel stream list. */
583 cds_list_del(&stream->send_node);
d88aee68
DG
584 goto error;
585 }
586
587 /* Remove node from the channel stream list. */
588 cds_list_del(&stream->send_node);
4891ece8 589
d88aee68
DG
590 }
591
592error:
593 return ret;
594}
595
7972aab2
DG
596/*
597 * Flush channel's streams using the given key to retrieve the channel.
598 *
599 * Return 0 on success else an LTTng error code.
600 */
601static int flush_channel(uint64_t chan_key)
602{
603 int ret = 0;
604 struct lttng_consumer_channel *channel;
605 struct lttng_consumer_stream *stream;
606 struct lttng_ht *ht;
607 struct lttng_ht_iter iter;
608
8fd623e0 609 DBG("UST consumer flush channel key %" PRIu64, chan_key);
7972aab2 610
a500c257 611 rcu_read_lock();
7972aab2
DG
612 channel = consumer_find_channel(chan_key);
613 if (!channel) {
8fd623e0 614 ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
7972aab2
DG
615 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
616 goto error;
617 }
618
619 ht = consumer_data.stream_per_chan_id_ht;
620
621 /* For each stream of the channel id, flush it. */
7972aab2
DG
622 cds_lfht_for_each_entry_duplicate(ht->ht,
623 ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
624 &channel->key, &iter.iter, stream, node_channel_id.node) {
9ce5646a
MD
625
626 health_code_update();
627
b8086166 628 ustctl_flush_buffer(stream->ustream, 1);
7972aab2 629 }
7972aab2 630error:
a500c257 631 rcu_read_unlock();
7972aab2
DG
632 return ret;
633}
634
d88aee68
DG
635/*
636 * Close metadata stream wakeup_fd using the given key to retrieve the channel.
a500c257 637 * RCU read side lock MUST be acquired before calling this function.
d88aee68
DG
638 *
639 * Return 0 on success else an LTTng error code.
640 */
641static int close_metadata(uint64_t chan_key)
642{
ea88ca2a 643 int ret = 0;
d88aee68
DG
644 struct lttng_consumer_channel *channel;
645
8fd623e0 646 DBG("UST consumer close metadata key %" PRIu64, chan_key);
d88aee68
DG
647
648 channel = consumer_find_channel(chan_key);
649 if (!channel) {
84cc9aa0
DG
650 /*
651 * This is possible if the metadata thread has issue a delete because
652 * the endpoint point of the stream hung up. There is no way the
653 * session daemon can know about it thus use a DBG instead of an actual
654 * error.
655 */
656 DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
d88aee68
DG
657 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
658 goto error;
659 }
660
ea88ca2a 661 pthread_mutex_lock(&consumer_data.lock);
a9838785 662 pthread_mutex_lock(&channel->lock);
73811ecc
DG
663
664 if (cds_lfht_is_node_deleted(&channel->node.node)) {
665 goto error_unlock;
666 }
667
6d574024 668 lttng_ustconsumer_close_metadata(channel);
d88aee68 669
ea88ca2a 670error_unlock:
a9838785 671 pthread_mutex_unlock(&channel->lock);
ea88ca2a 672 pthread_mutex_unlock(&consumer_data.lock);
d88aee68
DG
673error:
674 return ret;
675}
676
677/*
678 * RCU read side lock MUST be acquired before calling this function.
679 *
680 * Return 0 on success else an LTTng error code.
681 */
682static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
683{
684 int ret;
685 struct lttng_consumer_channel *metadata;
686
8fd623e0 687 DBG("UST consumer setup metadata key %" PRIu64, key);
d88aee68
DG
688
689 metadata = consumer_find_channel(key);
690 if (!metadata) {
691 ERR("UST consumer push metadata %" PRIu64 " not found", key);
692 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
10a50311
JD
693 goto end;
694 }
695
696 /*
697 * In no monitor mode, the metadata channel has no stream(s) so skip the
698 * ownership transfer to the metadata thread.
699 */
700 if (!metadata->monitor) {
701 DBG("Metadata channel in no monitor");
702 ret = 0;
703 goto end;
d88aee68
DG
704 }
705
706 /*
707 * Send metadata stream to relayd if one available. Availability is
708 * known if the stream is still in the list of the channel.
709 */
710 if (cds_list_empty(&metadata->streams.head)) {
711 ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
712 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
f5a0c9cf 713 goto error_no_stream;
d88aee68
DG
714 }
715
716 /* Send metadata stream to relayd if needed. */
62285ea4
DG
717 if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
718 ret = consumer_send_relayd_stream(metadata->metadata_stream,
719 metadata->pathname);
720 if (ret < 0) {
721 ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
722 goto error;
723 }
601262d6
JD
724 ret = consumer_send_relayd_streams_sent(
725 metadata->metadata_stream->net_seq_idx);
726 if (ret < 0) {
727 ret = LTTCOMM_CONSUMERD_RELAYD_FAIL;
728 goto error;
729 }
d88aee68
DG
730 }
731
732 ret = send_streams_to_thread(metadata, ctx);
733 if (ret < 0) {
734 /*
735 * If we are unable to send the stream to the thread, there is
736 * a big problem so just stop everything.
737 */
738 ret = LTTCOMM_CONSUMERD_FATAL;
739 goto error;
740 }
741 /* List MUST be empty after or else it could be reused. */
742 assert(cds_list_empty(&metadata->streams.head));
743
10a50311
JD
744 ret = 0;
745 goto end;
d88aee68
DG
746
747error:
f2a444f1
DG
748 /*
749 * Delete metadata channel on error. At this point, the metadata stream can
750 * NOT be monitored by the metadata thread thus having the guarantee that
751 * the stream is still in the local stream list of the channel. This call
752 * will make sure to clean that list.
753 */
f5a0c9cf 754 consumer_stream_destroy(metadata->metadata_stream, NULL);
212d67a2
DG
755 cds_list_del(&metadata->metadata_stream->send_node);
756 metadata->metadata_stream = NULL;
f5a0c9cf 757error_no_stream:
10a50311
JD
758end:
759 return ret;
760}
761
762/*
763 * Snapshot the whole metadata.
764 *
765 * Returns 0 on success, < 0 on error
766 */
767static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
768 struct lttng_consumer_local_data *ctx)
769{
770 int ret = 0;
10a50311
JD
771 struct lttng_consumer_channel *metadata_channel;
772 struct lttng_consumer_stream *metadata_stream;
773
774 assert(path);
775 assert(ctx);
776
777 DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
778 key, path);
779
780 rcu_read_lock();
781
782 metadata_channel = consumer_find_channel(key);
783 if (!metadata_channel) {
6a00837f
MD
784 ERR("UST snapshot metadata channel not found for key %" PRIu64,
785 key);
10a50311
JD
786 ret = -1;
787 goto error;
788 }
789 assert(!metadata_channel->monitor);
790
9ce5646a
MD
791 health_code_update();
792
10a50311
JD
793 /*
794 * Ask the sessiond if we have new metadata waiting and update the
795 * consumer metadata cache.
796 */
94d49140 797 ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1);
10a50311
JD
798 if (ret < 0) {
799 goto error;
800 }
801
9ce5646a
MD
802 health_code_update();
803
10a50311
JD
804 /*
805 * The metadata stream is NOT created in no monitor mode when the channel
806 * is created on a sessiond ask channel command.
807 */
808 ret = create_ust_streams(metadata_channel, ctx);
809 if (ret < 0) {
810 goto error;
811 }
812
813 metadata_stream = metadata_channel->metadata_stream;
814 assert(metadata_stream);
815
816 if (relayd_id != (uint64_t) -1ULL) {
817 metadata_stream->net_seq_idx = relayd_id;
818 ret = consumer_send_relayd_stream(metadata_stream, path);
819 if (ret < 0) {
820 goto error_stream;
821 }
822 } else {
823 ret = utils_create_stream_file(path, metadata_stream->name,
824 metadata_stream->chan->tracefile_size,
825 metadata_stream->tracefile_count_current,
309167d2 826 metadata_stream->uid, metadata_stream->gid, NULL);
10a50311
JD
827 if (ret < 0) {
828 goto error_stream;
829 }
830 metadata_stream->out_fd = ret;
831 metadata_stream->tracefile_size_current = 0;
832 }
833
04ef1097 834 do {
9ce5646a
MD
835 health_code_update();
836
10a50311
JD
837 ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
838 if (ret < 0) {
94d49140 839 goto error_stream;
10a50311 840 }
04ef1097 841 } while (ret > 0);
10a50311 842
10a50311
JD
843error_stream:
844 /*
845 * Clean up the stream completly because the next snapshot will use a new
846 * metadata stream.
847 */
10a50311 848 consumer_stream_destroy(metadata_stream, NULL);
212d67a2 849 cds_list_del(&metadata_stream->send_node);
10a50311
JD
850 metadata_channel->metadata_stream = NULL;
851
852error:
853 rcu_read_unlock();
854 return ret;
855}
856
857/*
858 * Take a snapshot of all the stream of a channel.
859 *
860 * Returns 0 on success, < 0 on error
861 */
862static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
5c786ded 863 uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
10a50311
JD
864{
865 int ret;
866 unsigned use_relayd = 0;
867 unsigned long consumed_pos, produced_pos;
868 struct lttng_consumer_channel *channel;
869 struct lttng_consumer_stream *stream;
870
871 assert(path);
872 assert(ctx);
873
874 rcu_read_lock();
875
876 if (relayd_id != (uint64_t) -1ULL) {
877 use_relayd = 1;
878 }
879
880 channel = consumer_find_channel(key);
881 if (!channel) {
6a00837f 882 ERR("UST snapshot channel not found for key %" PRIu64, key);
10a50311
JD
883 ret = -1;
884 goto error;
885 }
886 assert(!channel->monitor);
6a00837f 887 DBG("UST consumer snapshot channel %" PRIu64, key);
10a50311
JD
888
889 cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
9ce5646a
MD
890
891 health_code_update();
892
10a50311
JD
893 /* Lock stream because we are about to change its state. */
894 pthread_mutex_lock(&stream->lock);
895 stream->net_seq_idx = relayd_id;
896
897 if (use_relayd) {
898 ret = consumer_send_relayd_stream(stream, path);
899 if (ret < 0) {
900 goto error_unlock;
901 }
902 } else {
903 ret = utils_create_stream_file(path, stream->name,
904 stream->chan->tracefile_size,
905 stream->tracefile_count_current,
309167d2 906 stream->uid, stream->gid, NULL);
10a50311
JD
907 if (ret < 0) {
908 goto error_unlock;
909 }
910 stream->out_fd = ret;
911 stream->tracefile_size_current = 0;
912
913 DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
914 stream->name, stream->key);
915 }
a4baae1b
JD
916 if (relayd_id != -1ULL) {
917 ret = consumer_send_relayd_streams_sent(relayd_id);
918 if (ret < 0) {
919 goto error_unlock;
920 }
921 }
10a50311
JD
922
923 ustctl_flush_buffer(stream->ustream, 1);
924
925 ret = lttng_ustconsumer_take_snapshot(stream);
926 if (ret < 0) {
927 ERR("Taking UST snapshot");
928 goto error_unlock;
929 }
930
931 ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
932 if (ret < 0) {
933 ERR("Produced UST snapshot position");
934 goto error_unlock;
935 }
936
937 ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
938 if (ret < 0) {
939 ERR("Consumerd UST snapshot position");
940 goto error_unlock;
941 }
942
5c786ded
JD
943 /*
944 * The original value is sent back if max stream size is larger than
945 * the possible size of the snapshot. Also, we asume that the session
946 * daemon should never send a maximum stream size that is lower than
947 * subbuffer size.
948 */
949 consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
950 produced_pos, max_stream_size);
951
10a50311
JD
952 while (consumed_pos < produced_pos) {
953 ssize_t read_len;
954 unsigned long len, padded_len;
955
9ce5646a
MD
956 health_code_update();
957
10a50311
JD
958 DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
959
960 ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
961 if (ret < 0) {
962 if (ret != -EAGAIN) {
963 PERROR("ustctl_get_subbuf snapshot");
964 goto error_close_stream;
965 }
966 DBG("UST consumer get subbuf failed. Skipping it.");
967 consumed_pos += stream->max_sb_size;
968 continue;
969 }
970
971 ret = ustctl_get_subbuf_size(stream->ustream, &len);
972 if (ret < 0) {
973 ERR("Snapshot ustctl_get_subbuf_size");
974 goto error_put_subbuf;
975 }
976
977 ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
978 if (ret < 0) {
979 ERR("Snapshot ustctl_get_padded_subbuf_size");
980 goto error_put_subbuf;
981 }
982
983 read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
309167d2 984 padded_len - len, NULL);
10a50311
JD
985 if (use_relayd) {
986 if (read_len != len) {
56591bac 987 ret = -EPERM;
10a50311
JD
988 goto error_put_subbuf;
989 }
990 } else {
991 if (read_len != padded_len) {
56591bac 992 ret = -EPERM;
10a50311
JD
993 goto error_put_subbuf;
994 }
995 }
996
997 ret = ustctl_put_subbuf(stream->ustream);
998 if (ret < 0) {
999 ERR("Snapshot ustctl_put_subbuf");
1000 goto error_close_stream;
1001 }
1002 consumed_pos += stream->max_sb_size;
1003 }
1004
1005 /* Simply close the stream so we can use it on the next snapshot. */
1006 consumer_stream_close(stream);
1007 pthread_mutex_unlock(&stream->lock);
1008 }
1009
1010 rcu_read_unlock();
1011 return 0;
1012
1013error_put_subbuf:
1014 if (ustctl_put_subbuf(stream->ustream) < 0) {
1015 ERR("Snapshot ustctl_put_subbuf");
1016 }
1017error_close_stream:
1018 consumer_stream_close(stream);
1019error_unlock:
1020 pthread_mutex_unlock(&stream->lock);
1021error:
1022 rcu_read_unlock();
d88aee68
DG
1023 return ret;
1024}
1025
331744e3
JD
1026/*
1027 * Receive the metadata updates from the sessiond.
1028 */
1029int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
5e41ebe1 1030 uint64_t len, struct lttng_consumer_channel *channel,
94d49140 1031 int timer, int wait)
331744e3 1032{
0c759fc9 1033 int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
331744e3
JD
1034 char *metadata_str;
1035
8fd623e0 1036 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len);
331744e3
JD
1037
1038 metadata_str = zmalloc(len * sizeof(char));
1039 if (!metadata_str) {
1040 PERROR("zmalloc metadata string");
1041 ret_code = LTTCOMM_CONSUMERD_ENOMEM;
1042 goto end;
1043 }
1044
9ce5646a
MD
1045 health_code_update();
1046
331744e3
JD
1047 /* Receive metadata string. */
1048 ret = lttcomm_recv_unix_sock(sock, metadata_str, len);
1049 if (ret < 0) {
1050 /* Session daemon is dead so return gracefully. */
1051 ret_code = ret;
1052 goto end_free;
1053 }
1054
9ce5646a
MD
1055 health_code_update();
1056
331744e3
JD
1057 pthread_mutex_lock(&channel->metadata_cache->lock);
1058 ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
1059 if (ret < 0) {
1060 /* Unable to handle metadata. Notify session daemon. */
1061 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
a32bd775
DG
1062 /*
1063 * Skip metadata flush on write error since the offset and len might
1064 * not have been updated which could create an infinite loop below when
1065 * waiting for the metadata cache to be flushed.
1066 */
1067 pthread_mutex_unlock(&channel->metadata_cache->lock);
a32bd775 1068 goto end_free;
331744e3
JD
1069 }
1070 pthread_mutex_unlock(&channel->metadata_cache->lock);
1071
94d49140
JD
1072 if (!wait) {
1073 goto end_free;
1074 }
5e41ebe1 1075 while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
331744e3 1076 DBG("Waiting for metadata to be flushed");
9ce5646a
MD
1077
1078 health_code_update();
1079
331744e3
JD
1080 usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
1081 }
1082
1083end_free:
1084 free(metadata_str);
1085end:
1086 return ret_code;
1087}
1088
4cbc1a04
DG
1089/*
1090 * Receive command from session daemon and process it.
1091 *
1092 * Return 1 on success else a negative value or 0.
1093 */
3bd1e081
MD
1094int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
1095 int sock, struct pollfd *consumer_sockpoll)
1096{
1097 ssize_t ret;
0c759fc9 1098 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
3bd1e081 1099 struct lttcomm_consumer_msg msg;
ffe60014 1100 struct lttng_consumer_channel *channel = NULL;
3bd1e081 1101
9ce5646a
MD
1102 health_code_update();
1103
3bd1e081
MD
1104 ret = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg));
1105 if (ret != sizeof(msg)) {
173af62f
DG
1106 DBG("Consumer received unexpected message size %zd (expects %zu)",
1107 ret, sizeof(msg));
3be74084
DG
1108 /*
1109 * The ret value might 0 meaning an orderly shutdown but this is ok
1110 * since the caller handles this.
1111 */
489f70e9 1112 if (ret > 0) {
c6857fcf 1113 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
489f70e9
MD
1114 ret = -1;
1115 }
3bd1e081
MD
1116 return ret;
1117 }
9ce5646a
MD
1118
1119 health_code_update();
1120
84382d49
MD
1121 /* deprecated */
1122 assert(msg.cmd_type != LTTNG_CONSUMER_STOP);
3bd1e081 1123
9ce5646a
MD
1124 health_code_update();
1125
3f8e211f 1126 /* relayd needs RCU read-side lock */
b0b335c8
MD
1127 rcu_read_lock();
1128
3bd1e081 1129 switch (msg.cmd_type) {
00e2e675
DG
1130 case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
1131 {
f50f23d9 1132 /* Session daemon status message are handled in the following call. */
7735ef9e
DG
1133 ret = consumer_add_relayd_socket(msg.u.relayd_sock.net_index,
1134 msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll,
d3e2ba59
JD
1135 &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id,
1136 msg.u.relayd_sock.relayd_session_id);
00e2e675
DG
1137 goto end_nosignal;
1138 }
173af62f
DG
1139 case LTTNG_CONSUMER_DESTROY_RELAYD:
1140 {
a6ba4fe1 1141 uint64_t index = msg.u.destroy_relayd.net_seq_idx;
173af62f
DG
1142 struct consumer_relayd_sock_pair *relayd;
1143
a6ba4fe1 1144 DBG("UST consumer destroying relayd %" PRIu64, index);
173af62f
DG
1145
1146 /* Get relayd reference if exists. */
a6ba4fe1 1147 relayd = consumer_find_relayd(index);
173af62f 1148 if (relayd == NULL) {
3448e266 1149 DBG("Unable to find relayd %" PRIu64, index);
e462382a 1150 ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL;
173af62f
DG
1151 }
1152
a6ba4fe1
DG
1153 /*
1154 * Each relayd socket pair has a refcount of stream attached to it
1155 * which tells if the relayd is still active or not depending on the
1156 * refcount value.
1157 *
1158 * This will set the destroy flag of the relayd object and destroy it
1159 * if the refcount reaches zero when called.
1160 *
1161 * The destroy can happen either here or when a stream fd hangs up.
1162 */
f50f23d9
DG
1163 if (relayd) {
1164 consumer_flag_relayd_for_destroy(relayd);
1165 }
1166
d88aee68 1167 goto end_msg_sessiond;
173af62f 1168 }
3bd1e081
MD
1169 case LTTNG_CONSUMER_UPDATE_STREAM:
1170 {
3f8e211f 1171 rcu_read_unlock();
7ad0a0cb 1172 return -ENOSYS;
3bd1e081 1173 }
6d805429 1174 case LTTNG_CONSUMER_DATA_PENDING:
53632229 1175 {
3be74084 1176 int ret, is_data_pending;
6d805429 1177 uint64_t id = msg.u.data_pending.session_id;
ca22feea 1178
6d805429 1179 DBG("UST consumer data pending command for id %" PRIu64, id);
ca22feea 1180
3be74084 1181 is_data_pending = consumer_data_pending(id);
ca22feea
DG
1182
1183 /* Send back returned value to session daemon */
3be74084
DG
1184 ret = lttcomm_send_unix_sock(sock, &is_data_pending,
1185 sizeof(is_data_pending));
ca22feea 1186 if (ret < 0) {
3be74084 1187 DBG("Error when sending the data pending ret code: %d", ret);
489f70e9 1188 goto error_fatal;
ca22feea 1189 }
f50f23d9
DG
1190
1191 /*
1192 * No need to send back a status message since the data pending
1193 * returned value is the response.
1194 */
ca22feea 1195 break;
53632229 1196 }
ffe60014
DG
1197 case LTTNG_CONSUMER_ASK_CHANNEL_CREATION:
1198 {
1199 int ret;
1200 struct ustctl_consumer_channel_attr attr;
1201
1202 /* Create a plain object and reserve a channel key. */
1203 channel = allocate_channel(msg.u.ask_channel.session_id,
1204 msg.u.ask_channel.pathname, msg.u.ask_channel.name,
1205 msg.u.ask_channel.uid, msg.u.ask_channel.gid,
1206 msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
1624d5b7
JD
1207 (enum lttng_event_output) msg.u.ask_channel.output,
1208 msg.u.ask_channel.tracefile_size,
2bba9e53 1209 msg.u.ask_channel.tracefile_count,
1950109e 1210 msg.u.ask_channel.session_id_per_pid,
ecc48a90
JD
1211 msg.u.ask_channel.monitor,
1212 msg.u.ask_channel.live_timer_interval);
ffe60014
DG
1213 if (!channel) {
1214 goto end_channel_error;
1215 }
1216
567eb353
DG
1217 /*
1218 * Assign UST application UID to the channel. This value is ignored for
1219 * per PID buffers. This is specific to UST thus setting this after the
1220 * allocation.
1221 */
1222 channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
1223
ffe60014
DG
1224 /* Build channel attributes from received message. */
1225 attr.subbuf_size = msg.u.ask_channel.subbuf_size;
1226 attr.num_subbuf = msg.u.ask_channel.num_subbuf;
1227 attr.overwrite = msg.u.ask_channel.overwrite;
1228 attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
1229 attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
7972aab2 1230 attr.chan_id = msg.u.ask_channel.chan_id;
ffe60014
DG
1231 memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
1232
0c759fc9
DG
1233 /* Match channel buffer type to the UST abi. */
1234 switch (msg.u.ask_channel.output) {
1235 case LTTNG_EVENT_MMAP:
1236 default:
1237 attr.output = LTTNG_UST_MMAP;
1238 break;
1239 }
1240
ffe60014
DG
1241 /* Translate and save channel type. */
1242 switch (msg.u.ask_channel.type) {
1243 case LTTNG_UST_CHAN_PER_CPU:
1244 channel->type = CONSUMER_CHANNEL_TYPE_DATA;
1245 attr.type = LTTNG_UST_CHAN_PER_CPU;
8633d6e3
MD
1246 /*
1247 * Set refcount to 1 for owner. Below, we will
1248 * pass ownership to the
1249 * consumer_thread_channel_poll() thread.
1250 */
1251 channel->refcount = 1;
ffe60014
DG
1252 break;
1253 case LTTNG_UST_CHAN_METADATA:
1254 channel->type = CONSUMER_CHANNEL_TYPE_METADATA;
1255 attr.type = LTTNG_UST_CHAN_METADATA;
1256 break;
1257 default:
1258 assert(0);
1259 goto error_fatal;
1260 };
1261
9ce5646a
MD
1262 health_code_update();
1263
ffe60014
DG
1264 ret = ask_channel(ctx, sock, channel, &attr);
1265 if (ret < 0) {
1266 goto end_channel_error;
1267 }
1268
fc643247
MD
1269 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1270 ret = consumer_metadata_cache_allocate(channel);
1271 if (ret < 0) {
1272 ERR("Allocating metadata cache");
1273 goto end_channel_error;
1274 }
1275 consumer_timer_switch_start(channel, attr.switch_timer_interval);
1276 attr.switch_timer_interval = 0;
94d49140
JD
1277 } else {
1278 consumer_timer_live_start(channel,
1279 msg.u.ask_channel.live_timer_interval);
fc643247
MD
1280 }
1281
9ce5646a
MD
1282 health_code_update();
1283
ffe60014
DG
1284 /*
1285 * Add the channel to the internal state AFTER all streams were created
1286 * and successfully sent to session daemon. This way, all streams must
1287 * be ready before this channel is visible to the threads.
fc643247
MD
1288 * If add_channel succeeds, ownership of the channel is
1289 * passed to consumer_thread_channel_poll().
ffe60014
DG
1290 */
1291 ret = add_channel(channel, ctx);
1292 if (ret < 0) {
ea88ca2a
MD
1293 if (msg.u.ask_channel.type == LTTNG_UST_CHAN_METADATA) {
1294 if (channel->switch_timer_enabled == 1) {
1295 consumer_timer_switch_stop(channel);
1296 }
1297 consumer_metadata_cache_destroy(channel);
1298 }
d3e2ba59
JD
1299 if (channel->live_timer_enabled == 1) {
1300 consumer_timer_live_stop(channel);
1301 }
ffe60014
DG
1302 goto end_channel_error;
1303 }
1304
9ce5646a
MD
1305 health_code_update();
1306
ffe60014
DG
1307 /*
1308 * Channel and streams are now created. Inform the session daemon that
1309 * everything went well and should wait to receive the channel and
1310 * streams with ustctl API.
1311 */
1312 ret = consumer_send_status_channel(sock, channel);
1313 if (ret < 0) {
1314 /*
489f70e9 1315 * There is probably a problem on the socket.
ffe60014 1316 */
489f70e9 1317 goto error_fatal;
ffe60014
DG
1318 }
1319
1320 break;
1321 }
1322 case LTTNG_CONSUMER_GET_CHANNEL:
1323 {
1324 int ret, relayd_err = 0;
d88aee68 1325 uint64_t key = msg.u.get_channel.key;
ffe60014 1326 struct lttng_consumer_channel *channel;
ffe60014
DG
1327
1328 channel = consumer_find_channel(key);
1329 if (!channel) {
8fd623e0 1330 ERR("UST consumer get channel key %" PRIu64 " not found", key);
e462382a 1331 ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND;
ffe60014
DG
1332 goto end_msg_sessiond;
1333 }
1334
9ce5646a
MD
1335 health_code_update();
1336
ffe60014
DG
1337 /* Send everything to sessiond. */
1338 ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
1339 if (ret < 0) {
1340 if (relayd_err) {
1341 /*
1342 * We were unable to send to the relayd the stream so avoid
1343 * sending back a fatal error to the thread since this is OK
f2a444f1
DG
1344 * and the consumer can continue its work. The above call
1345 * has sent the error status message to the sessiond.
ffe60014 1346 */
f2a444f1 1347 goto end_nosignal;
ffe60014
DG
1348 }
1349 /*
1350 * The communicaton was broken hence there is a bad state between
1351 * the consumer and sessiond so stop everything.
1352 */
1353 goto error_fatal;
1354 }
1355
9ce5646a
MD
1356 health_code_update();
1357
10a50311
JD
1358 /*
1359 * In no monitor mode, the streams ownership is kept inside the channel
1360 * so don't send them to the data thread.
1361 */
1362 if (!channel->monitor) {
1363 goto end_msg_sessiond;
1364 }
1365
d88aee68
DG
1366 ret = send_streams_to_thread(channel, ctx);
1367 if (ret < 0) {
1368 /*
1369 * If we are unable to send the stream to the thread, there is
1370 * a big problem so just stop everything.
1371 */
1372 goto error_fatal;
ffe60014 1373 }
ffe60014
DG
1374 /* List MUST be empty after or else it could be reused. */
1375 assert(cds_list_empty(&channel->streams.head));
d88aee68
DG
1376 goto end_msg_sessiond;
1377 }
1378 case LTTNG_CONSUMER_DESTROY_CHANNEL:
1379 {
1380 uint64_t key = msg.u.destroy_channel.key;
d88aee68 1381
a0cbdd2e
MD
1382 /*
1383 * Only called if streams have not been sent to stream
1384 * manager thread. However, channel has been sent to
1385 * channel manager thread.
1386 */
1387 notify_thread_del_channel(ctx, key);
d88aee68 1388 goto end_msg_sessiond;
ffe60014 1389 }
d88aee68
DG
1390 case LTTNG_CONSUMER_CLOSE_METADATA:
1391 {
1392 int ret;
1393
1394 ret = close_metadata(msg.u.close_metadata.key);
1395 if (ret != 0) {
1396 ret_code = ret;
1397 }
1398
1399 goto end_msg_sessiond;
1400 }
7972aab2
DG
1401 case LTTNG_CONSUMER_FLUSH_CHANNEL:
1402 {
1403 int ret;
1404
1405 ret = flush_channel(msg.u.flush_channel.key);
1406 if (ret != 0) {
1407 ret_code = ret;
1408 }
1409
1410 goto end_msg_sessiond;
1411 }
d88aee68 1412 case LTTNG_CONSUMER_PUSH_METADATA:
ffe60014
DG
1413 {
1414 int ret;
d88aee68 1415 uint64_t len = msg.u.push_metadata.len;
d88aee68 1416 uint64_t key = msg.u.push_metadata.key;
331744e3 1417 uint64_t offset = msg.u.push_metadata.target_offset;
ffe60014
DG
1418 struct lttng_consumer_channel *channel;
1419
8fd623e0
DG
1420 DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
1421 len);
ffe60014
DG
1422
1423 channel = consumer_find_channel(key);
1424 if (!channel) {
000baf6a
DG
1425 /*
1426 * This is possible if the metadata creation on the consumer side
1427 * is in flight vis-a-vis a concurrent push metadata from the
1428 * session daemon. Simply return that the channel failed and the
1429 * session daemon will handle that message correctly considering
1430 * that this race is acceptable thus the DBG() statement here.
1431 */
1432 DBG("UST consumer push metadata %" PRIu64 " not found", key);
1433 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
4a2eb0ca 1434 goto end_msg_sessiond;
d88aee68
DG
1435 }
1436
9ce5646a
MD
1437 health_code_update();
1438
d88aee68 1439 /* Tell session daemon we are ready to receive the metadata. */
0c759fc9 1440 ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS);
ffe60014
DG
1441 if (ret < 0) {
1442 /* Somehow, the session daemon is not responding anymore. */
d88aee68
DG
1443 goto error_fatal;
1444 }
1445
9ce5646a
MD
1446 health_code_update();
1447
d88aee68 1448 /* Wait for more data. */
9ce5646a
MD
1449 health_poll_entry();
1450 ret = lttng_consumer_poll_socket(consumer_sockpoll);
1451 health_poll_exit();
84382d49 1452 if (ret) {
489f70e9 1453 goto error_fatal;
d88aee68
DG
1454 }
1455
9ce5646a
MD
1456 health_code_update();
1457
331744e3 1458 ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
94d49140 1459 len, channel, 0, 1);
d88aee68 1460 if (ret < 0) {
331744e3 1461 /* error receiving from sessiond */
489f70e9 1462 goto error_fatal;
331744e3
JD
1463 } else {
1464 ret_code = ret;
d88aee68
DG
1465 goto end_msg_sessiond;
1466 }
d88aee68
DG
1467 }
1468 case LTTNG_CONSUMER_SETUP_METADATA:
1469 {
1470 int ret;
1471
1472 ret = setup_metadata(ctx, msg.u.setup_metadata.key);
1473 if (ret) {
1474 ret_code = ret;
1475 }
1476 goto end_msg_sessiond;
ffe60014 1477 }
6dc3064a
DG
1478 case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
1479 {
10a50311
JD
1480 if (msg.u.snapshot_channel.metadata) {
1481 ret = snapshot_metadata(msg.u.snapshot_channel.key,
1482 msg.u.snapshot_channel.pathname,
1483 msg.u.snapshot_channel.relayd_id,
1484 ctx);
1485 if (ret < 0) {
1486 ERR("Snapshot metadata failed");
e462382a 1487 ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
10a50311
JD
1488 }
1489 } else {
1490 ret = snapshot_channel(msg.u.snapshot_channel.key,
1491 msg.u.snapshot_channel.pathname,
1492 msg.u.snapshot_channel.relayd_id,
5c786ded 1493 msg.u.snapshot_channel.max_stream_size,
10a50311
JD
1494 ctx);
1495 if (ret < 0) {
1496 ERR("Snapshot channel failed");
e462382a 1497 ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL;
10a50311
JD
1498 }
1499 }
1500
9ce5646a 1501 health_code_update();
6dc3064a
DG
1502 ret = consumer_send_status_msg(sock, ret_code);
1503 if (ret < 0) {
1504 /* Somehow, the session daemon is not responding anymore. */
1505 goto end_nosignal;
1506 }
9ce5646a 1507 health_code_update();
6dc3064a
DG
1508 break;
1509 }
3bd1e081
MD
1510 default:
1511 break;
1512 }
3f8e211f 1513
3bd1e081 1514end_nosignal:
b0b335c8 1515 rcu_read_unlock();
4cbc1a04 1516
9ce5646a
MD
1517 health_code_update();
1518
4cbc1a04
DG
1519 /*
1520 * Return 1 to indicate success since the 0 value can be a socket
1521 * shutdown during the recv() or send() call.
1522 */
1523 return 1;
ffe60014
DG
1524
1525end_msg_sessiond:
1526 /*
1527 * The returned value here is not useful since either way we'll return 1 to
1528 * the caller because the session daemon socket management is done
1529 * elsewhere. Returning a negative code or 0 will shutdown the consumer.
1530 */
489f70e9
MD
1531 ret = consumer_send_status_msg(sock, ret_code);
1532 if (ret < 0) {
1533 goto error_fatal;
1534 }
ffe60014 1535 rcu_read_unlock();
9ce5646a
MD
1536
1537 health_code_update();
1538
ffe60014
DG
1539 return 1;
1540end_channel_error:
1541 if (channel) {
1542 /*
1543 * Free channel here since no one has a reference to it. We don't
1544 * free after that because a stream can store this pointer.
1545 */
1546 destroy_channel(channel);
1547 }
1548 /* We have to send a status channel message indicating an error. */
1549 ret = consumer_send_status_channel(sock, NULL);
1550 if (ret < 0) {
1551 /* Stop everything if session daemon can not be notified. */
1552 goto error_fatal;
1553 }
1554 rcu_read_unlock();
9ce5646a
MD
1555
1556 health_code_update();
1557
ffe60014
DG
1558 return 1;
1559error_fatal:
1560 rcu_read_unlock();
1561 /* This will issue a consumer stop. */
1562 return -1;
3bd1e081
MD
1563}
1564
ffe60014
DG
1565/*
1566 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1567 * compiled out, we isolate it in this library.
1568 */
1569int lttng_ustctl_get_mmap_read_offset(struct lttng_consumer_stream *stream,
1570 unsigned long *off)
3bd1e081 1571{
ffe60014
DG
1572 assert(stream);
1573 assert(stream->ustream);
b5c5fc29 1574
ffe60014 1575 return ustctl_get_mmap_read_offset(stream->ustream, off);
3bd1e081
MD
1576}
1577
ffe60014
DG
1578/*
1579 * Wrapper over the mmap() read offset from ust-ctl library. Since this can be
1580 * compiled out, we isolate it in this library.
1581 */
1582void *lttng_ustctl_get_mmap_base(struct lttng_consumer_stream *stream)
d056b477 1583{
ffe60014
DG
1584 assert(stream);
1585 assert(stream->ustream);
1586
1587 return ustctl_get_mmap_base(stream->ustream);
d056b477
MD
1588}
1589
ffe60014
DG
1590/*
1591 * Take a snapshot for a specific fd
1592 *
1593 * Returns 0 on success, < 0 on error
1594 */
1595int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream)
3bd1e081 1596{
ffe60014
DG
1597 assert(stream);
1598 assert(stream->ustream);
1599
1600 return ustctl_snapshot(stream->ustream);
3bd1e081
MD
1601}
1602
ffe60014
DG
1603/*
1604 * Get the produced position
1605 *
1606 * Returns 0 on success, < 0 on error
1607 */
1608int lttng_ustconsumer_get_produced_snapshot(
1609 struct lttng_consumer_stream *stream, unsigned long *pos)
3bd1e081 1610{
ffe60014
DG
1611 assert(stream);
1612 assert(stream->ustream);
1613 assert(pos);
7a57cf92 1614
ffe60014
DG
1615 return ustctl_snapshot_get_produced(stream->ustream, pos);
1616}
7a57cf92 1617
10a50311
JD
1618/*
1619 * Get the consumed position
1620 *
1621 * Returns 0 on success, < 0 on error
1622 */
1623int lttng_ustconsumer_get_consumed_snapshot(
1624 struct lttng_consumer_stream *stream, unsigned long *pos)
1625{
1626 assert(stream);
1627 assert(stream->ustream);
1628 assert(pos);
1629
1630 return ustctl_snapshot_get_consumed(stream->ustream, pos);
1631}
1632
84a182ce
DG
1633void lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream,
1634 int producer)
1635{
1636 assert(stream);
1637 assert(stream->ustream);
1638
1639 ustctl_flush_buffer(stream->ustream, producer);
1640}
1641
1642int lttng_ustconsumer_get_current_timestamp(
1643 struct lttng_consumer_stream *stream, uint64_t *ts)
1644{
1645 assert(stream);
1646 assert(stream->ustream);
1647 assert(ts);
1648
1649 return ustctl_get_current_timestamp(stream->ustream, ts);
1650}
1651
ffe60014
DG
1652/*
1653 * Called when the stream signal the consumer that it has hang up.
1654 */
1655void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
1656{
1657 assert(stream);
1658 assert(stream->ustream);
2c1dd183 1659
ffe60014
DG
1660 ustctl_flush_buffer(stream->ustream, 0);
1661 stream->hangup_flush_done = 1;
1662}
ee77a7b0 1663
ffe60014
DG
1664void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
1665{
1666 assert(chan);
1667 assert(chan->uchan);
e316aad5 1668
ea88ca2a
MD
1669 if (chan->switch_timer_enabled == 1) {
1670 consumer_timer_switch_stop(chan);
1671 }
1672 consumer_metadata_cache_destroy(chan);
ffe60014 1673 ustctl_destroy_channel(chan->uchan);
3bd1e081
MD
1674}
1675
1676void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream)
1677{
ffe60014
DG
1678 assert(stream);
1679 assert(stream->ustream);
d41f73b7 1680
ea88ca2a
MD
1681 if (stream->chan->switch_timer_enabled == 1) {
1682 consumer_timer_switch_stop(stream->chan);
1683 }
ffe60014
DG
1684 ustctl_destroy_stream(stream->ustream);
1685}
d41f73b7 1686
6d574024
DG
1687int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream)
1688{
1689 assert(stream);
1690 assert(stream->ustream);
1691
1692 return ustctl_stream_get_wakeup_fd(stream->ustream);
1693}
1694
1695int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream)
1696{
1697 assert(stream);
1698 assert(stream->ustream);
1699
1700 return ustctl_stream_close_wakeup_fd(stream->ustream);
1701}
1702
309167d2
JD
1703/*
1704 * Populate index values of a UST stream. Values are set in big endian order.
1705 *
1706 * Return 0 on success or else a negative value.
1707 */
50adc264 1708static int get_index_values(struct ctf_packet_index *index,
309167d2
JD
1709 struct ustctl_consumer_stream *ustream)
1710{
1711 int ret;
1712
1713 ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
1714 if (ret < 0) {
1715 PERROR("ustctl_get_timestamp_begin");
1716 goto error;
1717 }
1718 index->timestamp_begin = htobe64(index->timestamp_begin);
1719
1720 ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
1721 if (ret < 0) {
1722 PERROR("ustctl_get_timestamp_end");
1723 goto error;
1724 }
1725 index->timestamp_end = htobe64(index->timestamp_end);
1726
1727 ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
1728 if (ret < 0) {
1729 PERROR("ustctl_get_events_discarded");
1730 goto error;
1731 }
1732 index->events_discarded = htobe64(index->events_discarded);
1733
1734 ret = ustctl_get_content_size(ustream, &index->content_size);
1735 if (ret < 0) {
1736 PERROR("ustctl_get_content_size");
1737 goto error;
1738 }
1739 index->content_size = htobe64(index->content_size);
1740
1741 ret = ustctl_get_packet_size(ustream, &index->packet_size);
1742 if (ret < 0) {
1743 PERROR("ustctl_get_packet_size");
1744 goto error;
1745 }
1746 index->packet_size = htobe64(index->packet_size);
1747
1748 ret = ustctl_get_stream_id(ustream, &index->stream_id);
1749 if (ret < 0) {
1750 PERROR("ustctl_get_stream_id");
1751 goto error;
1752 }
1753 index->stream_id = htobe64(index->stream_id);
1754
1755error:
1756 return ret;
1757}
1758
94d49140
JD
1759/*
1760 * Write up to one packet from the metadata cache to the channel.
1761 *
1762 * Returns the number of bytes pushed in the cache, or a negative value
1763 * on error.
1764 */
1765static
1766int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
1767{
1768 ssize_t write_len;
1769 int ret;
1770
1771 pthread_mutex_lock(&stream->chan->metadata_cache->lock);
1772 if (stream->chan->metadata_cache->contiguous
1773 == stream->ust_metadata_pushed) {
1774 ret = 0;
1775 goto end;
1776 }
1777
1778 write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
1779 &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
1780 stream->chan->metadata_cache->contiguous
1781 - stream->ust_metadata_pushed);
1782 assert(write_len != 0);
1783 if (write_len < 0) {
1784 ERR("Writing one metadata packet");
1785 ret = -1;
1786 goto end;
1787 }
1788 stream->ust_metadata_pushed += write_len;
1789
1790 assert(stream->chan->metadata_cache->contiguous >=
1791 stream->ust_metadata_pushed);
1792 ret = write_len;
1793
1794end:
1795 pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
1796 return ret;
1797}
1798
309167d2 1799
94d49140
JD
1800/*
1801 * Sync metadata meaning request them to the session daemon and snapshot to the
1802 * metadata thread can consumer them.
1803 *
1804 * Metadata stream lock MUST be acquired.
1805 *
1806 * Return 0 if new metadatda is available, EAGAIN if the metadata stream
1807 * is empty or a negative value on error.
1808 */
1809int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
1810 struct lttng_consumer_stream *metadata)
1811{
1812 int ret;
1813 int retry = 0;
1814
1815 assert(ctx);
1816 assert(metadata);
1817
1818 /*
1819 * Request metadata from the sessiond, but don't wait for the flush
1820 * because we locked the metadata thread.
1821 */
1822 ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0);
1823 if (ret < 0) {
1824 goto end;
1825 }
1826
1827 ret = commit_one_metadata_packet(metadata);
1828 if (ret <= 0) {
1829 goto end;
1830 } else if (ret > 0) {
1831 retry = 1;
1832 }
1833
1834 ustctl_flush_buffer(metadata->ustream, 1);
1835 ret = ustctl_snapshot(metadata->ustream);
1836 if (ret < 0) {
1837 if (errno != EAGAIN) {
1838 ERR("Sync metadata, taking UST snapshot");
1839 goto end;
1840 }
1841 DBG("No new metadata when syncing them.");
1842 /* No new metadata, exit. */
1843 ret = ENODATA;
1844 goto end;
1845 }
1846
1847 /*
1848 * After this flush, we still need to extract metadata.
1849 */
1850 if (retry) {
1851 ret = EAGAIN;
1852 }
1853
1854end:
1855 return ret;
1856}
1857
02b3d176
DG
1858/*
1859 * Return 0 on success else a negative value.
1860 */
1861static int notify_if_more_data(struct lttng_consumer_stream *stream,
1862 struct lttng_consumer_local_data *ctx)
1863{
1864 int ret;
1865 struct ustctl_consumer_stream *ustream;
1866
1867 assert(stream);
1868 assert(ctx);
1869
1870 ustream = stream->ustream;
1871
1872 /*
1873 * First, we are going to check if there is a new subbuffer available
1874 * before reading the stream wait_fd.
1875 */
1876 /* Get the next subbuffer */
1877 ret = ustctl_get_next_subbuf(ustream);
1878 if (ret) {
1879 /* No more data found, flag the stream. */
1880 stream->has_data = 0;
1881 ret = 0;
1882 goto end;
1883 }
1884
5420e5db 1885 ret = ustctl_put_subbuf(ustream);
02b3d176
DG
1886 assert(!ret);
1887
1888 /* This stream still has data. Flag it and wake up the data thread. */
1889 stream->has_data = 1;
1890
1891 if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) {
1892 ssize_t writelen;
1893
1894 writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1);
1895 if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
1896 ret = writelen;
1897 goto end;
1898 }
1899
1900 /* The wake up pipe has been notified. */
1901 ctx->has_wakeup = 1;
1902 }
1903 ret = 0;
1904
1905end:
1906 return ret;
1907}
1908
94d49140
JD
1909/*
1910 * Read subbuffer from the given stream.
1911 *
1912 * Stream lock MUST be acquired.
1913 *
1914 * Return 0 on success else a negative value.
1915 */
d41f73b7
MD
1916int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
1917 struct lttng_consumer_local_data *ctx)
1918{
1d4dfdef 1919 unsigned long len, subbuf_size, padding;
1c20f0e2 1920 int err, write_index = 1;
d41f73b7 1921 long ret = 0;
ffe60014 1922 struct ustctl_consumer_stream *ustream;
50adc264 1923 struct ctf_packet_index index;
ffe60014
DG
1924
1925 assert(stream);
1926 assert(stream->ustream);
1927 assert(ctx);
d41f73b7 1928
3eb914c0 1929 DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
ffe60014
DG
1930 stream->name);
1931
1932 /* Ease our life for what's next. */
1933 ustream = stream->ustream;
d41f73b7 1934
6cd525e8 1935 /*
02b3d176
DG
1936 * We can consume the 1 byte written into the wait_fd by UST. Don't trigger
1937 * error if we cannot read this one byte (read returns 0), or if the error
1938 * is EAGAIN or EWOULDBLOCK.
1939 *
1940 * This is only done when the stream is monitored by a thread, before the
1941 * flush is done after a hangup and if the stream is not flagged with data
1942 * since there might be nothing to consume in the wait fd but still have
1943 * data available flagged by the consumer wake up pipe.
6cd525e8 1944 */
02b3d176
DG
1945 if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) {
1946 char dummy;
c617c0c6
MD
1947 ssize_t readlen;
1948
6cd525e8
MD
1949 readlen = lttng_read(stream->wait_fd, &dummy, 1);
1950 if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) {
effcf122
MD
1951 ret = readlen;
1952 goto end;
1953 }
d41f73b7
MD
1954 }
1955
04ef1097 1956retry:
d41f73b7 1957 /* Get the next subbuffer */
ffe60014 1958 err = ustctl_get_next_subbuf(ustream);
d41f73b7 1959 if (err != 0) {
04ef1097
MD
1960 /*
1961 * Populate metadata info if the existing info has
1962 * already been read.
1963 */
1964 if (stream->metadata_flag) {
94d49140
JD
1965 ret = commit_one_metadata_packet(stream);
1966 if (ret <= 0) {
04ef1097
MD
1967 goto end;
1968 }
04ef1097
MD
1969 ustctl_flush_buffer(stream->ustream, 1);
1970 goto retry;
1971 }
1972
1d4dfdef 1973 ret = err; /* ustctl_get_next_subbuf returns negative, caller expect positive. */
d41f73b7
MD
1974 /*
1975 * This is a debug message even for single-threaded consumer,
1976 * because poll() have more relaxed criterions than get subbuf,
1977 * so get_subbuf may fail for short race windows where poll()
1978 * would issue wakeups.
1979 */
1980 DBG("Reserving sub buffer failed (everything is normal, "
ffe60014 1981 "it is due to concurrency) [ret: %d]", err);
d41f73b7
MD
1982 goto end;
1983 }
ffe60014 1984 assert(stream->chan->output == CONSUMER_CHANNEL_MMAP);
309167d2 1985
1c20f0e2 1986 if (!stream->metadata_flag) {
309167d2
JD
1987 index.offset = htobe64(stream->out_fd_offset);
1988 ret = get_index_values(&index, ustream);
1989 if (ret < 0) {
1990 goto end;
1991 }
1c20f0e2
JD
1992 } else {
1993 write_index = 0;
309167d2
JD
1994 }
1995
1d4dfdef 1996 /* Get the full padded subbuffer size */
ffe60014 1997 err = ustctl_get_padded_subbuf_size(ustream, &len);
effcf122 1998 assert(err == 0);
1d4dfdef
DG
1999
2000 /* Get subbuffer data size (without padding) */
ffe60014 2001 err = ustctl_get_subbuf_size(ustream, &subbuf_size);
1d4dfdef
DG
2002 assert(err == 0);
2003
2004 /* Make sure we don't get a subbuffer size bigger than the padded */
2005 assert(len >= subbuf_size);
2006
2007 padding = len - subbuf_size;
d41f73b7 2008 /* write the subbuffer to the tracefile */
309167d2 2009 ret = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, subbuf_size, padding, &index);
91dfef6e
DG
2010 /*
2011 * The mmap operation should write subbuf_size amount of data when network
2012 * streaming or the full padding (len) size when we are _not_ streaming.
2013 */
d88aee68
DG
2014 if ((ret != subbuf_size && stream->net_seq_idx != (uint64_t) -1ULL) ||
2015 (ret != len && stream->net_seq_idx == (uint64_t) -1ULL)) {
d41f73b7 2016 /*
91dfef6e 2017 * Display the error but continue processing to try to release the
c5c45efa
DG
2018 * subbuffer. This is a DBG statement since any unexpected kill or
2019 * signal, the application gets unregistered, relayd gets closed or
2020 * anything that affects the buffer lifetime will trigger this error.
2021 * So, for the sake of the user, don't print this error since it can
2022 * happen and it is OK with the code flow.
d41f73b7 2023 */
c5c45efa 2024 DBG("Error writing to tracefile "
8fd623e0 2025 "(ret: %ld != len: %lu != subbuf_size: %lu)",
91dfef6e 2026 ret, len, subbuf_size);
309167d2 2027 write_index = 0;
d41f73b7 2028 }
ffe60014 2029 err = ustctl_put_next_subbuf(ustream);
effcf122 2030 assert(err == 0);
331744e3 2031
02b3d176
DG
2032 /*
2033 * This will consumer the byte on the wait_fd if and only if there is not
2034 * next subbuffer to be acquired.
2035 */
2036 if (!stream->metadata_flag) {
2037 ret = notify_if_more_data(stream, ctx);
2038 if (ret < 0) {
2039 goto end;
2040 }
2041 }
2042
309167d2 2043 /* Write index if needed. */
1c20f0e2
JD
2044 if (!write_index) {
2045 goto end;
2046 }
2047
94d49140
JD
2048 if (stream->chan->live_timer_interval && !stream->metadata_flag) {
2049 /*
2050 * In live, block until all the metadata is sent.
2051 */
2052 err = consumer_stream_sync_metadata(ctx, stream->session_id);
2053 if (err < 0) {
2054 goto end;
2055 }
2056 }
2057
1c20f0e2
JD
2058 assert(!stream->metadata_flag);
2059 err = consumer_stream_write_index(stream, &index);
2060 if (err < 0) {
2061 goto end;
309167d2
JD
2062 }
2063
d41f73b7
MD
2064end:
2065 return ret;
2066}
2067
ffe60014
DG
2068/*
2069 * Called when a stream is created.
fe4477ee
JD
2070 *
2071 * Return 0 on success or else a negative value.
ffe60014 2072 */
d41f73b7
MD
2073int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
2074{
fe4477ee
JD
2075 int ret;
2076
10a50311
JD
2077 assert(stream);
2078
fe4477ee 2079 /* Don't create anything if this is set for streaming. */
10a50311 2080 if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
fe4477ee
JD
2081 ret = utils_create_stream_file(stream->chan->pathname, stream->name,
2082 stream->chan->tracefile_size, stream->tracefile_count_current,
309167d2 2083 stream->uid, stream->gid, NULL);
fe4477ee
JD
2084 if (ret < 0) {
2085 goto error;
2086 }
2087 stream->out_fd = ret;
2088 stream->tracefile_size_current = 0;
309167d2
JD
2089
2090 if (!stream->metadata_flag) {
2091 ret = index_create_file(stream->chan->pathname,
2092 stream->name, stream->uid, stream->gid,
2093 stream->chan->tracefile_size,
2094 stream->tracefile_count_current);
2095 if (ret < 0) {
2096 goto error;
2097 }
2098 stream->index_fd = ret;
2099 }
fe4477ee
JD
2100 }
2101 ret = 0;
2102
2103error:
2104 return ret;
d41f73b7 2105}
ca22feea
DG
2106
2107/*
2108 * Check if data is still being extracted from the buffers for a specific
4e9a4686
DG
2109 * stream. Consumer data lock MUST be acquired before calling this function
2110 * and the stream lock.
ca22feea 2111 *
6d805429 2112 * Return 1 if the traced data are still getting read else 0 meaning that the
ca22feea
DG
2113 * data is available for trace viewer reading.
2114 */
6d805429 2115int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
ca22feea
DG
2116{
2117 int ret;
2118
2119 assert(stream);
ffe60014 2120 assert(stream->ustream);
ca22feea 2121
6d805429 2122 DBG("UST consumer checking data pending");
c8f59ee5 2123
ca6b395f
MD
2124 if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
2125 ret = 0;
2126 goto end;
2127 }
2128
04ef1097 2129 if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
e6ee4eab
DG
2130 uint64_t contiguous, pushed;
2131
2132 /* Ease our life a bit. */
2133 contiguous = stream->chan->metadata_cache->contiguous;
2134 pushed = stream->ust_metadata_pushed;
2135
04ef1097
MD
2136 /*
2137 * We can simply check whether all contiguously available data
2138 * has been pushed to the ring buffer, since the push operation
2139 * is performed within get_next_subbuf(), and because both
2140 * get_next_subbuf() and put_next_subbuf() are issued atomically
2141 * thanks to the stream lock within
2142 * lttng_ustconsumer_read_subbuffer(). This basically means that
2143 * whetnever ust_metadata_pushed is incremented, the associated
2144 * metadata has been consumed from the metadata stream.
2145 */
2146 DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
e6ee4eab 2147 contiguous, pushed);
aa01b94c 2148 assert(((int64_t) (contiguous - pushed)) >= 0);
e6ee4eab 2149 if ((contiguous != pushed) ||
6acdf328 2150 (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
04ef1097
MD
2151 ret = 1; /* Data is pending */
2152 goto end;
2153 }
2154 } else {
2155 ret = ustctl_get_next_subbuf(stream->ustream);
2156 if (ret == 0) {
2157 /*
2158 * There is still data so let's put back this
2159 * subbuffer.
2160 */
2161 ret = ustctl_put_subbuf(stream->ustream);
2162 assert(ret == 0);
2163 ret = 1; /* Data is pending */
2164 goto end;
2165 }
ca22feea
DG
2166 }
2167
6d805429
DG
2168 /* Data is NOT pending so ready to be read. */
2169 ret = 0;
ca22feea 2170
6efae65e
DG
2171end:
2172 return ret;
ca22feea 2173}
d88aee68 2174
6d574024
DG
2175/*
2176 * Stop a given metadata channel timer if enabled and close the wait fd which
2177 * is the poll pipe of the metadata stream.
2178 *
2179 * This MUST be called with the metadata channel acquired.
2180 */
2181void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata)
2182{
2183 int ret;
2184
2185 assert(metadata);
2186 assert(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA);
2187
2188 DBG("Closing metadata channel key %" PRIu64, metadata->key);
2189
2190 if (metadata->switch_timer_enabled == 1) {
2191 consumer_timer_switch_stop(metadata);
2192 }
2193
2194 if (!metadata->metadata_stream) {
2195 goto end;
2196 }
2197
2198 /*
2199 * Closing write side so the thread monitoring the stream wakes up if any
2200 * and clean the metadata stream.
2201 */
2202 if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) {
2203 ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]);
2204 if (ret < 0) {
2205 PERROR("closing metadata pipe write side");
2206 }
2207 metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1;
2208 }
2209
2210end:
2211 return;
2212}
2213
d88aee68
DG
2214/*
2215 * Close every metadata stream wait fd of the metadata hash table. This
2216 * function MUST be used very carefully so not to run into a race between the
2217 * metadata thread handling streams and this function closing their wait fd.
2218 *
2219 * For UST, this is used when the session daemon hangs up. Its the metadata
2220 * producer so calling this is safe because we are assured that no state change
2221 * can occur in the metadata thread for the streams in the hash table.
2222 */
6d574024 2223void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
d88aee68 2224{
d88aee68
DG
2225 struct lttng_ht_iter iter;
2226 struct lttng_consumer_stream *stream;
2227
2228 assert(metadata_ht);
2229 assert(metadata_ht->ht);
2230
2231 DBG("UST consumer closing all metadata streams");
2232
2233 rcu_read_lock();
2234 cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
2235 node.node) {
9ce5646a
MD
2236
2237 health_code_update();
2238
be2b50c7 2239 pthread_mutex_lock(&stream->chan->lock);
6d574024 2240 lttng_ustconsumer_close_metadata(stream->chan);
be2b50c7
DG
2241 pthread_mutex_unlock(&stream->chan->lock);
2242
d88aee68
DG
2243 }
2244 rcu_read_unlock();
2245}
d8ef542d
MD
2246
2247void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
2248{
2249 int ret;
2250
2251 ret = ustctl_stream_close_wakeup_fd(stream->ustream);
2252 if (ret < 0) {
2253 ERR("Unable to close wakeup fd");
2254 }
2255}
331744e3 2256
f666ae70
MD
2257/*
2258 * Please refer to consumer-timer.c before adding any lock within this
2259 * function or any of its callees. Timers have a very strict locking
2260 * semantic with respect to teardown. Failure to respect this semantic
2261 * introduces deadlocks.
2262 */
331744e3 2263int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
94d49140 2264 struct lttng_consumer_channel *channel, int timer, int wait)
331744e3
JD
2265{
2266 struct lttcomm_metadata_request_msg request;
2267 struct lttcomm_consumer_msg msg;
0c759fc9 2268 enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
331744e3
JD
2269 uint64_t len, key, offset;
2270 int ret;
2271
2272 assert(channel);
2273 assert(channel->metadata_cache);
2274
53efb85a
MD
2275 memset(&request, 0, sizeof(request));
2276
331744e3
JD
2277 /* send the metadata request to sessiond */
2278 switch (consumer_data.type) {
2279 case LTTNG_CONSUMER64_UST:
2280 request.bits_per_long = 64;
2281 break;
2282 case LTTNG_CONSUMER32_UST:
2283 request.bits_per_long = 32;
2284 break;
2285 default:
2286 request.bits_per_long = 0;
2287 break;
2288 }
2289
2290 request.session_id = channel->session_id;
1950109e 2291 request.session_id_per_pid = channel->session_id_per_pid;
567eb353
DG
2292 /*
2293 * Request the application UID here so the metadata of that application can
2294 * be sent back. The channel UID corresponds to the user UID of the session
2295 * used for the rights on the stream file(s).
2296 */
2297 request.uid = channel->ust_app_uid;
331744e3 2298 request.key = channel->key;
567eb353 2299
1950109e 2300 DBG("Sending metadata request to sessiond, session id %" PRIu64
567eb353
DG
2301 ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
2302 request.session_id, request.session_id_per_pid, request.uid,
2303 request.key);
331744e3 2304
75d83e50 2305 pthread_mutex_lock(&ctx->metadata_socket_lock);
9ce5646a
MD
2306
2307 health_code_update();
2308
331744e3
JD
2309 ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
2310 sizeof(request));
2311 if (ret < 0) {
2312 ERR("Asking metadata to sessiond");
2313 goto end;
2314 }
2315
9ce5646a
MD
2316 health_code_update();
2317
331744e3
JD
2318 /* Receive the metadata from sessiond */
2319 ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg,
2320 sizeof(msg));
2321 if (ret != sizeof(msg)) {
8fd623e0 2322 DBG("Consumer received unexpected message size %d (expects %zu)",
331744e3
JD
2323 ret, sizeof(msg));
2324 lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
2325 /*
2326 * The ret value might 0 meaning an orderly shutdown but this is ok
2327 * since the caller handles this.
2328 */
2329 goto end;
2330 }
2331
9ce5646a
MD
2332 health_code_update();
2333
331744e3
JD
2334 if (msg.cmd_type == LTTNG_ERR_UND) {
2335 /* No registry found */
2336 (void) consumer_send_status_msg(ctx->consumer_metadata_socket,
2337 ret_code);
2338 ret = 0;
2339 goto end;
2340 } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) {
2341 ERR("Unexpected cmd_type received %d", msg.cmd_type);
2342 ret = -1;
2343 goto end;
2344 }
2345
2346 len = msg.u.push_metadata.len;
2347 key = msg.u.push_metadata.key;
2348 offset = msg.u.push_metadata.target_offset;
2349
2350 assert(key == channel->key);
2351 if (len == 0) {
2352 DBG("No new metadata to receive for key %" PRIu64, key);
2353 }
2354
9ce5646a
MD
2355 health_code_update();
2356
331744e3
JD
2357 /* Tell session daemon we are ready to receive the metadata. */
2358 ret = consumer_send_status_msg(ctx->consumer_metadata_socket,
0c759fc9 2359 LTTCOMM_CONSUMERD_SUCCESS);
331744e3
JD
2360 if (ret < 0 || len == 0) {
2361 /*
2362 * Somehow, the session daemon is not responding anymore or there is
2363 * nothing to receive.
2364 */
2365 goto end;
2366 }
2367
9ce5646a
MD
2368 health_code_update();
2369
1eb682be 2370 ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
94d49140 2371 key, offset, len, channel, timer, wait);
1eb682be 2372 if (ret >= 0) {
f2a444f1
DG
2373 /*
2374 * Only send the status msg if the sessiond is alive meaning a positive
2375 * ret code.
2376 */
1eb682be 2377 (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret);
f2a444f1 2378 }
331744e3
JD
2379 ret = 0;
2380
2381end:
9ce5646a
MD
2382 health_code_update();
2383
75d83e50 2384 pthread_mutex_unlock(&ctx->metadata_socket_lock);
331744e3
JD
2385 return ret;
2386}
70190e1c
DG
2387
2388/*
2389 * Return the ustctl call for the get stream id.
2390 */
2391int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream,
2392 uint64_t *stream_id)
2393{
2394 assert(stream);
2395 assert(stream_id);
2396
2397 return ustctl_get_stream_id(stream->ustream, stream_id);
2398}
This page took 0.169501 seconds and 4 git commands to generate.