Move ust-events.h private functions to internal
[lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 */
8
9 #include <stddef.h>
10 #include <stdint.h>
11
12 #include <ust-events-internal.h>
13 #include "ust-bitfield.h"
14 #include "ust-compat.h"
15 #include "clock.h"
16 #include "lttng-tracer.h"
17 #include "../libringbuffer/frontend_types.h"
18
19 #define LTTNG_COMPACT_EVENT_BITS 5
20 #define LTTNG_COMPACT_TSC_BITS 27
21
22 enum app_ctx_mode {
23 APP_CTX_DISABLED,
24 APP_CTX_ENABLED,
25 };
26
27 /*
28 * Keep the natural field alignment for _each field_ within this structure if
29 * you ever add/remove a field from this header. Packed attribute is not used
30 * because gcc generates poor code on at least powerpc and mips. Don't ever
31 * let gcc add padding between the structure elements.
32 */
33
34 struct packet_header {
35 /* Trace packet header */
36 uint32_t magic; /*
37 * Trace magic number.
38 * contains endianness information.
39 */
40 uint8_t uuid[LTTNG_UST_UUID_LEN];
41 uint32_t stream_id;
42 uint64_t stream_instance_id;
43
44 struct {
45 /* Stream packet context */
46 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
47 uint64_t timestamp_end; /* Cycle count at subbuffer end */
48 uint64_t content_size; /* Size of data in subbuffer */
49 uint64_t packet_size; /* Subbuffer size (include padding) */
50 uint64_t packet_seq_num; /* Packet sequence number */
51 unsigned long events_discarded; /*
52 * Events lost in this subbuffer since
53 * the beginning of the trace.
54 * (may overflow)
55 */
56 uint32_t cpu_id; /* CPU id associated with stream */
57 uint8_t header_end; /* End of header */
58 } ctx;
59 };
60
61 struct lttng_client_ctx {
62 size_t packet_context_len;
63 size_t event_context_len;
64 };
65
66 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
67 {
68 return trace_clock_read64();
69 }
70
71 static inline
72 size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
73 size_t ctx_len)
74 {
75 size_t orig_offset = offset;
76
77 if (caa_likely(!ctx))
78 return 0;
79 offset += lib_ring_buffer_align(offset, ctx->largest_align);
80 offset += ctx_len;
81 return offset - orig_offset;
82 }
83
84 static inline
85 void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
86 enum app_ctx_mode mode)
87 {
88 int i;
89 size_t offset = 0;
90
91 if (caa_likely(!ctx)) {
92 *ctx_len = 0;
93 return;
94 }
95 for (i = 0; i < ctx->nr_fields; i++) {
96 if (mode == APP_CTX_ENABLED) {
97 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
98 } else {
99 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
100 /*
101 * Before UST 2.8, we cannot use the
102 * application context, because we
103 * cannot trust that the handler used
104 * for get_size is the same used for
105 * ctx_record, which would result in
106 * corrupted traces when tracing
107 * concurrently with application context
108 * register/unregister.
109 */
110 offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
111 } else {
112 offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
113 }
114 }
115 }
116 *ctx_len = offset;
117 }
118
119 static inline
120 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
121 struct lttng_channel *chan,
122 struct lttng_ctx *ctx,
123 enum app_ctx_mode mode)
124 {
125 int i;
126
127 if (caa_likely(!ctx))
128 return;
129 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
130 for (i = 0; i < ctx->nr_fields; i++) {
131 if (mode == APP_CTX_ENABLED) {
132 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
133 } else {
134 if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
135 /*
136 * Before UST 2.8, we cannot use the
137 * application context, because we
138 * cannot trust that the handler used
139 * for get_size is the same used for
140 * ctx_record, which would result in
141 * corrupted traces when tracing
142 * concurrently with application context
143 * register/unregister.
144 */
145 lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
146 } else {
147 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
148 }
149 }
150 }
151 }
152
153 /*
154 * record_header_size - Calculate the header size and padding necessary.
155 * @config: ring buffer instance configuration
156 * @chan: channel
157 * @offset: offset in the write buffer
158 * @pre_header_padding: padding to add before the header (output)
159 * @ctx: reservation context
160 *
161 * Returns the event header size (including padding).
162 *
163 * The payload must itself determine its own alignment from the biggest type it
164 * contains.
165 */
166 static __inline__
167 size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
168 struct channel *chan, size_t offset,
169 size_t *pre_header_padding,
170 struct lttng_ust_lib_ring_buffer_ctx *ctx,
171 struct lttng_client_ctx *client_ctx)
172 {
173 struct lttng_channel *lttng_chan = channel_get_private(chan);
174 struct lttng_event *event = ctx->priv;
175 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
176 size_t orig_offset = offset;
177 size_t padding;
178
179 switch (lttng_chan->header_type) {
180 case 1: /* compact */
181 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
182 offset += padding;
183 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
184 offset += sizeof(uint32_t); /* id and timestamp */
185 } else {
186 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
187 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
188 /* Align extended struct on largest member */
189 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
190 offset += sizeof(uint32_t); /* id */
191 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
192 offset += sizeof(uint64_t); /* timestamp */
193 }
194 break;
195 case 2: /* large */
196 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
197 offset += padding;
198 offset += sizeof(uint16_t);
199 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
200 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
201 offset += sizeof(uint32_t); /* timestamp */
202 } else {
203 /* Align extended struct on largest member */
204 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
205 offset += sizeof(uint32_t); /* id */
206 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
207 offset += sizeof(uint64_t); /* timestamp */
208 }
209 break;
210 default:
211 padding = 0;
212 WARN_ON_ONCE(1);
213 }
214 if (lttng_ctx) {
215 /* 2.8+ probe ABI. */
216 offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
217 client_ctx->packet_context_len);
218 offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
219 client_ctx->event_context_len);
220 } else {
221 /* Pre 2.8 probe ABI. */
222 offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
223 client_ctx->packet_context_len);
224 offset += ctx_get_aligned_size(offset, event->ctx,
225 client_ctx->event_context_len);
226 }
227 *pre_header_padding = padding;
228 return offset - orig_offset;
229 }
230
231 #include "../libringbuffer/api.h"
232 #include "lttng-rb-clients.h"
233
234 static
235 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
236 struct lttng_ust_lib_ring_buffer_ctx *ctx,
237 uint32_t event_id);
238
239 /*
240 * lttng_write_event_header
241 *
242 * Writes the event header to the offset (already aligned on 32-bits).
243 *
244 * @config: ring buffer instance configuration
245 * @ctx: reservation context
246 * @event_id: event ID
247 */
248 static __inline__
249 void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
250 struct lttng_ust_lib_ring_buffer_ctx *ctx,
251 uint32_t event_id)
252 {
253 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
254 struct lttng_event *event = ctx->priv;
255 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
256
257 if (caa_unlikely(ctx->rflags))
258 goto slow_path;
259
260 switch (lttng_chan->header_type) {
261 case 1: /* compact */
262 {
263 uint32_t id_time = 0;
264
265 bt_bitfield_write(&id_time, uint32_t,
266 0,
267 LTTNG_COMPACT_EVENT_BITS,
268 event_id);
269 bt_bitfield_write(&id_time, uint32_t,
270 LTTNG_COMPACT_EVENT_BITS,
271 LTTNG_COMPACT_TSC_BITS,
272 ctx->tsc);
273 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
274 break;
275 }
276 case 2: /* large */
277 {
278 uint32_t timestamp = (uint32_t) ctx->tsc;
279 uint16_t id = event_id;
280
281 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
282 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
283 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
284 break;
285 }
286 default:
287 WARN_ON_ONCE(1);
288 }
289
290 if (lttng_ctx) {
291 /* 2.8+ probe ABI. */
292 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
293 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
294 } else {
295 /* Pre 2.8 probe ABI. */
296 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
297 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
298 }
299 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
300
301 return;
302
303 slow_path:
304 lttng_write_event_header_slow(config, ctx, event_id);
305 }
306
307 static
308 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
309 struct lttng_ust_lib_ring_buffer_ctx *ctx,
310 uint32_t event_id)
311 {
312 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
313 struct lttng_event *event = ctx->priv;
314 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
315
316 switch (lttng_chan->header_type) {
317 case 1: /* compact */
318 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
319 uint32_t id_time = 0;
320
321 bt_bitfield_write(&id_time, uint32_t,
322 0,
323 LTTNG_COMPACT_EVENT_BITS,
324 event_id);
325 bt_bitfield_write(&id_time, uint32_t,
326 LTTNG_COMPACT_EVENT_BITS,
327 LTTNG_COMPACT_TSC_BITS,
328 ctx->tsc);
329 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
330 } else {
331 uint8_t id = 0;
332 uint64_t timestamp = ctx->tsc;
333
334 bt_bitfield_write(&id, uint8_t,
335 0,
336 LTTNG_COMPACT_EVENT_BITS,
337 31);
338 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
339 /* Align extended struct on largest member */
340 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
341 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
342 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
343 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
344 }
345 break;
346 case 2: /* large */
347 {
348 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
349 uint32_t timestamp = (uint32_t) ctx->tsc;
350 uint16_t id = event_id;
351
352 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
353 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
354 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
355 } else {
356 uint16_t id = 65535;
357 uint64_t timestamp = ctx->tsc;
358
359 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
360 /* Align extended struct on largest member */
361 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
362 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
363 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
364 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
365 }
366 break;
367 }
368 default:
369 WARN_ON_ONCE(1);
370 }
371 if (lttng_ctx) {
372 /* 2.8+ probe ABI. */
373 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
374 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
375 } else {
376 /* Pre 2.8 probe ABI. */
377 ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
378 ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
379 }
380 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
381 }
382
383 static const struct lttng_ust_lib_ring_buffer_config client_config;
384
385 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
386 {
387 return lib_ring_buffer_clock_read(chan);
388 }
389
390 static
391 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
392 struct channel *chan, size_t offset,
393 size_t *pre_header_padding,
394 struct lttng_ust_lib_ring_buffer_ctx *ctx,
395 void *client_ctx)
396 {
397 return record_header_size(config, chan, offset,
398 pre_header_padding, ctx, client_ctx);
399 }
400
401 /**
402 * client_packet_header_size - called on buffer-switch to a new sub-buffer
403 *
404 * Return header size without padding after the structure. Don't use packed
405 * structure because gcc generates inefficient code on some architectures
406 * (powerpc, mips..)
407 */
408 static size_t client_packet_header_size(void)
409 {
410 return offsetof(struct packet_header, ctx.header_end);
411 }
412
413 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
414 unsigned int subbuf_idx,
415 struct lttng_ust_shm_handle *handle)
416 {
417 struct channel *chan = shmp(handle, buf->backend.chan);
418 struct packet_header *header =
419 (struct packet_header *)
420 lib_ring_buffer_offset_address(&buf->backend,
421 subbuf_idx * chan->backend.subbuf_size,
422 handle);
423 struct lttng_channel *lttng_chan = channel_get_private(chan);
424 uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
425
426 assert(header);
427 if (!header)
428 return;
429 header->magic = CTF_MAGIC_NUMBER;
430 memcpy(header->uuid, lttng_chan->uuid, sizeof(lttng_chan->uuid));
431 header->stream_id = lttng_chan->id;
432 header->stream_instance_id = buf->backend.cpu;
433 header->ctx.timestamp_begin = tsc;
434 header->ctx.timestamp_end = 0;
435 header->ctx.content_size = ~0ULL; /* for debugging */
436 header->ctx.packet_size = ~0ULL;
437 header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
438 header->ctx.events_discarded = 0;
439 header->ctx.cpu_id = buf->backend.cpu;
440 }
441
442 /*
443 * offset is assumed to never be 0 here : never deliver a completely empty
444 * subbuffer. data_size is between 1 and subbuf_size.
445 */
446 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
447 unsigned int subbuf_idx, unsigned long data_size,
448 struct lttng_ust_shm_handle *handle)
449 {
450 struct channel *chan = shmp(handle, buf->backend.chan);
451 struct packet_header *header =
452 (struct packet_header *)
453 lib_ring_buffer_offset_address(&buf->backend,
454 subbuf_idx * chan->backend.subbuf_size,
455 handle);
456 unsigned long records_lost = 0;
457
458 assert(header);
459 if (!header)
460 return;
461 header->ctx.timestamp_end = tsc;
462 header->ctx.content_size =
463 (uint64_t) data_size * CHAR_BIT; /* in bits */
464 header->ctx.packet_size =
465 (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
466
467 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
468 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
469 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
470 header->ctx.events_discarded = records_lost;
471 }
472
473 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
474 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
475 {
476 return 0;
477 }
478
479 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
480 {
481 }
482
483 static void client_content_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
484 size_t *offset, size_t *length)
485 {
486 *offset = offsetof(struct packet_header, ctx.content_size);
487 *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
488 }
489
490 static void client_packet_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
491 size_t *offset, size_t *length)
492 {
493 *offset = offsetof(struct packet_header, ctx.packet_size);
494 *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
495 }
496
497 static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
498 struct lttng_ust_shm_handle *handle)
499 {
500 return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
501 }
502
503 static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
504 struct lttng_ust_shm_handle *handle,
505 uint64_t *timestamp_begin)
506 {
507 struct packet_header *header;
508
509 header = client_packet_header(buf, handle);
510 if (!header)
511 return -1;
512 *timestamp_begin = header->ctx.timestamp_begin;
513 return 0;
514 }
515
516 static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
517 struct lttng_ust_shm_handle *handle,
518 uint64_t *timestamp_end)
519 {
520 struct packet_header *header;
521
522 header = client_packet_header(buf, handle);
523 if (!header)
524 return -1;
525 *timestamp_end = header->ctx.timestamp_end;
526 return 0;
527 }
528
529 static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
530 struct lttng_ust_shm_handle *handle,
531 uint64_t *events_discarded)
532 {
533 struct packet_header *header;
534
535 header = client_packet_header(buf, handle);
536 if (!header)
537 return -1;
538 *events_discarded = header->ctx.events_discarded;
539 return 0;
540 }
541
542 static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
543 struct lttng_ust_shm_handle *handle,
544 uint64_t *content_size)
545 {
546 struct packet_header *header;
547
548 header = client_packet_header(buf, handle);
549 if (!header)
550 return -1;
551 *content_size = header->ctx.content_size;
552 return 0;
553 }
554
555 static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
556 struct lttng_ust_shm_handle *handle,
557 uint64_t *packet_size)
558 {
559 struct packet_header *header;
560
561 header = client_packet_header(buf, handle);
562 if (!header)
563 return -1;
564 *packet_size = header->ctx.packet_size;
565 return 0;
566 }
567
568 static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf,
569 struct lttng_ust_shm_handle *handle,
570 uint64_t *stream_id)
571 {
572 struct channel *chan = shmp(handle, buf->backend.chan);
573 struct lttng_channel *lttng_chan = channel_get_private(chan);
574
575 *stream_id = lttng_chan->id;
576
577 return 0;
578 }
579
580 static int client_current_timestamp(struct lttng_ust_lib_ring_buffer *buf,
581 struct lttng_ust_shm_handle *handle,
582 uint64_t *ts)
583 {
584 struct channel *chan;
585
586 chan = shmp(handle, handle->chan);
587 *ts = client_ring_buffer_clock_read(chan);
588
589 return 0;
590 }
591
592 static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
593 struct lttng_ust_shm_handle *handle,
594 uint64_t *seq)
595 {
596 struct packet_header *header;
597
598 header = client_packet_header(buf, handle);
599 if (!header)
600 return -1;
601 *seq = header->ctx.packet_seq_num;
602 return 0;
603 }
604
605 static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
606 struct lttng_ust_shm_handle *handle,
607 uint64_t *id)
608 {
609 *id = buf->backend.cpu;
610
611 return 0;
612 }
613
614 static const
615 struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
616 .parent = {
617 .ring_buffer_clock_read = client_ring_buffer_clock_read,
618 .record_header_size = client_record_header_size,
619 .subbuffer_header_size = client_packet_header_size,
620 .buffer_begin = client_buffer_begin,
621 .buffer_end = client_buffer_end,
622 .buffer_create = client_buffer_create,
623 .buffer_finalize = client_buffer_finalize,
624 .content_size_field = client_content_size_field,
625 .packet_size_field = client_packet_size_field,
626 },
627 .timestamp_begin = client_timestamp_begin,
628 .timestamp_end = client_timestamp_end,
629 .events_discarded = client_events_discarded,
630 .content_size = client_content_size,
631 .packet_size = client_packet_size,
632 .stream_id = client_stream_id,
633 .current_timestamp = client_current_timestamp,
634 .sequence_number = client_sequence_number,
635 .instance_id = client_instance_id,
636 };
637
638 static const struct lttng_ust_lib_ring_buffer_config client_config = {
639 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
640 .cb.record_header_size = client_record_header_size,
641 .cb.subbuffer_header_size = client_packet_header_size,
642 .cb.buffer_begin = client_buffer_begin,
643 .cb.buffer_end = client_buffer_end,
644 .cb.buffer_create = client_buffer_create,
645 .cb.buffer_finalize = client_buffer_finalize,
646 .cb.content_size_field = client_content_size_field,
647 .cb.packet_size_field = client_packet_size_field,
648
649 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
650 .alloc = RING_BUFFER_ALLOC_PER_CPU,
651 .sync = RING_BUFFER_SYNC_GLOBAL,
652 .mode = RING_BUFFER_MODE_TEMPLATE,
653 .backend = RING_BUFFER_PAGE,
654 .output = RING_BUFFER_MMAP,
655 .oops = RING_BUFFER_OOPS_CONSISTENCY,
656 .ipi = RING_BUFFER_NO_IPI_BARRIER,
657 .wakeup = LTTNG_CLIENT_WAKEUP,
658 .client_type = LTTNG_CLIENT_TYPE,
659
660 .cb_ptr = &client_cb.parent,
661 };
662
663 static
664 struct lttng_channel *_channel_create(const char *name,
665 void *buf_addr,
666 size_t subbuf_size, size_t num_subbuf,
667 unsigned int switch_timer_interval,
668 unsigned int read_timer_interval,
669 unsigned char *uuid,
670 uint32_t chan_id,
671 const int *stream_fds, int nr_stream_fds,
672 int64_t blocking_timeout)
673 {
674 struct lttng_channel chan_priv_init;
675 struct lttng_ust_shm_handle *handle;
676 struct lttng_channel *lttng_chan;
677 void *priv;
678
679 memset(&chan_priv_init, 0, sizeof(chan_priv_init));
680 memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
681 chan_priv_init.id = chan_id;
682 handle = channel_create(&client_config, name,
683 &priv, __alignof__(struct lttng_channel),
684 sizeof(struct lttng_channel),
685 &chan_priv_init,
686 buf_addr, subbuf_size, num_subbuf,
687 switch_timer_interval, read_timer_interval,
688 stream_fds, nr_stream_fds, blocking_timeout);
689 if (!handle)
690 return NULL;
691 lttng_chan = priv;
692 lttng_chan->handle = handle;
693 lttng_chan->chan = shmp(handle, handle->chan);
694 return lttng_chan;
695 }
696
697 static
698 void lttng_channel_destroy(struct lttng_channel *chan)
699 {
700 channel_destroy(chan->chan, chan->handle, 1);
701 }
702
703 static
704 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
705 uint32_t event_id)
706 {
707 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
708 struct lttng_event *event = ctx->priv;
709 struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
710 struct lttng_client_ctx client_ctx;
711 int ret, cpu;
712
713 /* Compute internal size of context structures. */
714
715 if (lttng_ctx) {
716 /* 2.8+ probe ABI. */
717 ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
718 APP_CTX_ENABLED);
719 ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
720 APP_CTX_ENABLED);
721 } else {
722 /* Pre 2.8 probe ABI. */
723 ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len,
724 APP_CTX_DISABLED);
725 ctx_get_struct_size(event->ctx, &client_ctx.event_context_len,
726 APP_CTX_DISABLED);
727 }
728
729 cpu = lib_ring_buffer_get_cpu(&client_config);
730 if (cpu < 0)
731 return -EPERM;
732 ctx->cpu = cpu;
733
734 switch (lttng_chan->header_type) {
735 case 1: /* compact */
736 if (event_id > 30)
737 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
738 break;
739 case 2: /* large */
740 if (event_id > 65534)
741 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
742 break;
743 default:
744 WARN_ON_ONCE(1);
745 }
746
747 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
748 if (caa_unlikely(ret))
749 goto put;
750 if (caa_likely(ctx->ctx_len
751 >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
752 if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
753 &ctx->backend_pages)) {
754 ret = -EPERM;
755 goto put;
756 }
757 }
758 lttng_write_event_header(&client_config, ctx, event_id);
759 return 0;
760 put:
761 lib_ring_buffer_put_cpu(&client_config);
762 return ret;
763 }
764
765 static
766 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
767 {
768 lib_ring_buffer_commit(&client_config, ctx);
769 lib_ring_buffer_put_cpu(&client_config);
770 }
771
772 static
773 void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
774 size_t len)
775 {
776 lib_ring_buffer_write(&client_config, ctx, src, len);
777 }
778
779 static
780 void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src,
781 size_t len)
782 {
783 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
784 }
785
786 #if 0
787 static
788 wait_queue_head_t *lttng_get_reader_wait_queue(struct channel *chan)
789 {
790 return &chan->read_wait;
791 }
792
793 static
794 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
795 {
796 return &chan->hp_wait;
797 }
798 #endif //0
799
800 static
801 int lttng_is_finalized(struct channel *chan)
802 {
803 return lib_ring_buffer_channel_is_finalized(chan);
804 }
805
806 static
807 int lttng_is_disabled(struct channel *chan)
808 {
809 return lib_ring_buffer_channel_is_disabled(chan);
810 }
811
812 static
813 int lttng_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
814 {
815 struct lttng_ust_lib_ring_buffer *buf;
816 int cpu;
817
818 for_each_channel_cpu(cpu, chan) {
819 int shm_fd, wait_fd, wakeup_fd;
820 uint64_t memory_map_size;
821
822 buf = channel_get_ring_buffer(&client_config, chan,
823 cpu, handle, &shm_fd, &wait_fd,
824 &wakeup_fd, &memory_map_size);
825 lib_ring_buffer_switch(&client_config, buf,
826 SWITCH_ACTIVE, handle);
827 }
828 return 0;
829 }
830
831 static struct lttng_transport lttng_relay_transport = {
832 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
833 .ops = {
834 .channel_create = _channel_create,
835 .channel_destroy = lttng_channel_destroy,
836 .event_reserve = lttng_event_reserve,
837 .event_commit = lttng_event_commit,
838 .event_write = lttng_event_write,
839 .packet_avail_size = NULL, /* Would be racy anyway */
840 .is_finalized = lttng_is_finalized,
841 .is_disabled = lttng_is_disabled,
842 .flush_buffer = lttng_flush_buffer,
843 .event_strcpy = lttng_event_strcpy,
844 },
845 .client_config = &client_config,
846 };
847
848 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
849 {
850 DBG("LTT : ltt ring buffer client \"%s\" init\n",
851 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
852 lttng_transport_register(&lttng_relay_transport);
853 }
854
855 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
856 {
857 DBG("LTT : ltt ring buffer client \"%s\" exit\n",
858 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
859 lttng_transport_unregister(&lttng_relay_transport);
860 }
This page took 0.049232 seconds and 5 git commands to generate.