Refactoring: struct lttng_ust_channel_ops
[lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 */
8
9 #include <stddef.h>
10 #include <stdint.h>
11
12 #include <ust-events-internal.h>
13 #include "ust-bitfield.h"
14 #include "ust-compat.h"
15 #include "clock.h"
16 #include "context-internal.h"
17 #include "lttng-tracer.h"
18 #include "../libringbuffer/frontend_types.h"
19
20 #define LTTNG_COMPACT_EVENT_BITS 5
21 #define LTTNG_COMPACT_TSC_BITS 27
22
23 enum app_ctx_mode {
24 APP_CTX_DISABLED,
25 APP_CTX_ENABLED,
26 };
27
28 /*
29 * Keep the natural field alignment for _each field_ within this structure if
30 * you ever add/remove a field from this header. Packed attribute is not used
31 * because gcc generates poor code on at least powerpc and mips. Don't ever
32 * let gcc add padding between the structure elements.
33 */
34
35 struct packet_header {
36 /* Trace packet header */
37 uint32_t magic; /*
38 * Trace magic number.
39 * contains endianness information.
40 */
41 uint8_t uuid[LTTNG_UST_UUID_LEN];
42 uint32_t stream_id;
43 uint64_t stream_instance_id;
44
45 struct {
46 /* Stream packet context */
47 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
48 uint64_t timestamp_end; /* Cycle count at subbuffer end */
49 uint64_t content_size; /* Size of data in subbuffer */
50 uint64_t packet_size; /* Subbuffer size (include padding) */
51 uint64_t packet_seq_num; /* Packet sequence number */
52 unsigned long events_discarded; /*
53 * Events lost in this subbuffer since
54 * the beginning of the trace.
55 * (may overflow)
56 */
57 uint32_t cpu_id; /* CPU id associated with stream */
58 uint8_t header_end; /* End of header */
59 } ctx;
60 };
61
62 struct lttng_client_ctx {
63 size_t packet_context_len;
64 size_t event_context_len;
65 };
66
67 static inline uint64_t lib_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
68 {
69 return trace_clock_read64();
70 }
71
72 static inline
73 size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
74 size_t ctx_len)
75 {
76 size_t orig_offset = offset;
77
78 if (caa_likely(!ctx))
79 return 0;
80 offset += lib_ring_buffer_align(offset, ctx->largest_align);
81 offset += ctx_len;
82 return offset - orig_offset;
83 }
84
85 static inline
86 void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len,
87 enum app_ctx_mode mode)
88 {
89 int i;
90 size_t offset = 0;
91
92 if (caa_likely(!ctx)) {
93 *ctx_len = 0;
94 return;
95 }
96 for (i = 0; i < ctx->nr_fields; i++) {
97 if (mode == APP_CTX_ENABLED) {
98 offset += ctx->fields[i]->get_size(ctx->fields[i], offset);
99 } else {
100 if (lttng_context_is_app(ctx->fields[i]->event_field->name)) {
101 /*
102 * Before UST 2.8, we cannot use the
103 * application context, because we
104 * cannot trust that the handler used
105 * for get_size is the same used for
106 * ctx_record, which would result in
107 * corrupted traces when tracing
108 * concurrently with application context
109 * register/unregister.
110 */
111 offset += lttng_ust_dummy_get_size(ctx->fields[i], offset);
112 } else {
113 offset += ctx->fields[i]->get_size(ctx->fields[i], offset);
114 }
115 }
116 }
117 *ctx_len = offset;
118 }
119
120 static inline
121 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
122 struct lttng_channel *chan,
123 struct lttng_ust_ctx *ctx,
124 enum app_ctx_mode mode)
125 {
126 int i;
127
128 if (caa_likely(!ctx))
129 return;
130 lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
131 for (i = 0; i < ctx->nr_fields; i++) {
132 if (mode == APP_CTX_ENABLED) {
133 ctx->fields[i]->record(ctx->fields[i], bufctx, chan);
134 } else {
135 if (lttng_context_is_app(ctx->fields[i]->event_field->name)) {
136 /*
137 * Before UST 2.8, we cannot use the
138 * application context, because we
139 * cannot trust that the handler used
140 * for get_size is the same used for
141 * ctx_record, which would result in
142 * corrupted traces when tracing
143 * concurrently with application context
144 * register/unregister.
145 */
146 lttng_ust_dummy_record(ctx->fields[i], bufctx, chan);
147 } else {
148 ctx->fields[i]->record(ctx->fields[i], bufctx, chan);
149 }
150 }
151 }
152 }
153
154 /*
155 * record_header_size - Calculate the header size and padding necessary.
156 * @config: ring buffer instance configuration
157 * @chan: channel
158 * @offset: offset in the write buffer
159 * @pre_header_padding: padding to add before the header (output)
160 * @ctx: reservation context
161 *
162 * Returns the event header size (including padding).
163 *
164 * The payload must itself determine its own alignment from the biggest type it
165 * contains.
166 */
167 static __inline__
168 size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
169 struct lttng_ust_lib_ring_buffer_channel *chan,
170 size_t offset,
171 size_t *pre_header_padding,
172 struct lttng_ust_lib_ring_buffer_ctx *ctx,
173 struct lttng_client_ctx *client_ctx)
174 {
175 struct lttng_channel *lttng_chan = channel_get_private(chan);
176 struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
177 size_t orig_offset = offset;
178 size_t padding;
179
180 switch (lttng_chan->header_type) {
181 case 1: /* compact */
182 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
183 offset += padding;
184 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
185 offset += sizeof(uint32_t); /* id and timestamp */
186 } else {
187 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
188 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
189 /* Align extended struct on largest member */
190 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
191 offset += sizeof(uint32_t); /* id */
192 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
193 offset += sizeof(uint64_t); /* timestamp */
194 }
195 break;
196 case 2: /* large */
197 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
198 offset += padding;
199 offset += sizeof(uint16_t);
200 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
201 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
202 offset += sizeof(uint32_t); /* timestamp */
203 } else {
204 /* Align extended struct on largest member */
205 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
206 offset += sizeof(uint32_t); /* id */
207 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
208 offset += sizeof(uint64_t); /* timestamp */
209 }
210 break;
211 default:
212 padding = 0;
213 WARN_ON_ONCE(1);
214 }
215 offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
216 client_ctx->packet_context_len);
217 offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
218 client_ctx->event_context_len);
219 *pre_header_padding = padding;
220 return offset - orig_offset;
221 }
222
223 #include "../libringbuffer/api.h"
224 #include "lttng-rb-clients.h"
225
226 static
227 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
228 struct lttng_ust_lib_ring_buffer_ctx *ctx,
229 uint32_t event_id);
230
231 /*
232 * lttng_write_event_header
233 *
234 * Writes the event header to the offset (already aligned on 32-bits).
235 *
236 * @config: ring buffer instance configuration
237 * @ctx: reservation context
238 * @event_id: event ID
239 */
240 static __inline__
241 void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
242 struct lttng_ust_lib_ring_buffer_ctx *ctx,
243 uint32_t event_id)
244 {
245 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
246 struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
247
248 if (caa_unlikely(ctx->rflags))
249 goto slow_path;
250
251 switch (lttng_chan->header_type) {
252 case 1: /* compact */
253 {
254 uint32_t id_time = 0;
255
256 bt_bitfield_write(&id_time, uint32_t,
257 0,
258 LTTNG_COMPACT_EVENT_BITS,
259 event_id);
260 bt_bitfield_write(&id_time, uint32_t,
261 LTTNG_COMPACT_EVENT_BITS,
262 LTTNG_COMPACT_TSC_BITS,
263 ctx->tsc);
264 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
265 break;
266 }
267 case 2: /* large */
268 {
269 uint32_t timestamp = (uint32_t) ctx->tsc;
270 uint16_t id = event_id;
271
272 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
273 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
274 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
275 break;
276 }
277 default:
278 WARN_ON_ONCE(1);
279 }
280
281 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
282 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
283 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
284
285 return;
286
287 slow_path:
288 lttng_write_event_header_slow(config, ctx, event_id);
289 }
290
291 static
292 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
293 struct lttng_ust_lib_ring_buffer_ctx *ctx,
294 uint32_t event_id)
295 {
296 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
297 struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
298
299 switch (lttng_chan->header_type) {
300 case 1: /* compact */
301 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
302 uint32_t id_time = 0;
303
304 bt_bitfield_write(&id_time, uint32_t,
305 0,
306 LTTNG_COMPACT_EVENT_BITS,
307 event_id);
308 bt_bitfield_write(&id_time, uint32_t,
309 LTTNG_COMPACT_EVENT_BITS,
310 LTTNG_COMPACT_TSC_BITS,
311 ctx->tsc);
312 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
313 } else {
314 uint8_t id = 0;
315 uint64_t timestamp = ctx->tsc;
316
317 bt_bitfield_write(&id, uint8_t,
318 0,
319 LTTNG_COMPACT_EVENT_BITS,
320 31);
321 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
322 /* Align extended struct on largest member */
323 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
324 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
325 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
326 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
327 }
328 break;
329 case 2: /* large */
330 {
331 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
332 uint32_t timestamp = (uint32_t) ctx->tsc;
333 uint16_t id = event_id;
334
335 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
336 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
337 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
338 } else {
339 uint16_t id = 65535;
340 uint64_t timestamp = ctx->tsc;
341
342 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
343 /* Align extended struct on largest member */
344 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
345 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
346 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
347 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
348 }
349 break;
350 }
351 default:
352 WARN_ON_ONCE(1);
353 }
354 ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
355 ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
356 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
357 }
358
359 static const struct lttng_ust_lib_ring_buffer_config client_config;
360
361 static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
362 {
363 return lib_ring_buffer_clock_read(chan);
364 }
365
366 static
367 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
368 struct lttng_ust_lib_ring_buffer_channel *chan,
369 size_t offset,
370 size_t *pre_header_padding,
371 struct lttng_ust_lib_ring_buffer_ctx *ctx,
372 void *client_ctx)
373 {
374 return record_header_size(config, chan, offset,
375 pre_header_padding, ctx, client_ctx);
376 }
377
378 /**
379 * client_packet_header_size - called on buffer-switch to a new sub-buffer
380 *
381 * Return header size without padding after the structure. Don't use packed
382 * structure because gcc generates inefficient code on some architectures
383 * (powerpc, mips..)
384 */
385 static size_t client_packet_header_size(void)
386 {
387 return offsetof(struct packet_header, ctx.header_end);
388 }
389
390 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
391 unsigned int subbuf_idx,
392 struct lttng_ust_shm_handle *handle)
393 {
394 struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
395 struct packet_header *header =
396 (struct packet_header *)
397 lib_ring_buffer_offset_address(&buf->backend,
398 subbuf_idx * chan->backend.subbuf_size,
399 handle);
400 struct lttng_channel *lttng_chan = channel_get_private(chan);
401 uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
402
403 assert(header);
404 if (!header)
405 return;
406 header->magic = CTF_MAGIC_NUMBER;
407 memcpy(header->uuid, lttng_chan->uuid, sizeof(lttng_chan->uuid));
408 header->stream_id = lttng_chan->id;
409 header->stream_instance_id = buf->backend.cpu;
410 header->ctx.timestamp_begin = tsc;
411 header->ctx.timestamp_end = 0;
412 header->ctx.content_size = ~0ULL; /* for debugging */
413 header->ctx.packet_size = ~0ULL;
414 header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
415 header->ctx.events_discarded = 0;
416 header->ctx.cpu_id = buf->backend.cpu;
417 }
418
419 /*
420 * offset is assumed to never be 0 here : never deliver a completely empty
421 * subbuffer. data_size is between 1 and subbuf_size.
422 */
423 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
424 unsigned int subbuf_idx, unsigned long data_size,
425 struct lttng_ust_shm_handle *handle)
426 {
427 struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
428 struct packet_header *header =
429 (struct packet_header *)
430 lib_ring_buffer_offset_address(&buf->backend,
431 subbuf_idx * chan->backend.subbuf_size,
432 handle);
433 unsigned long records_lost = 0;
434
435 assert(header);
436 if (!header)
437 return;
438 header->ctx.timestamp_end = tsc;
439 header->ctx.content_size =
440 (uint64_t) data_size * CHAR_BIT; /* in bits */
441 header->ctx.packet_size =
442 (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
443
444 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
445 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
446 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
447 header->ctx.events_discarded = records_lost;
448 }
449
450 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
451 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
452 {
453 return 0;
454 }
455
456 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
457 {
458 }
459
460 static void client_content_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
461 size_t *offset, size_t *length)
462 {
463 *offset = offsetof(struct packet_header, ctx.content_size);
464 *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
465 }
466
467 static void client_packet_size_field(const struct lttng_ust_lib_ring_buffer_config *config,
468 size_t *offset, size_t *length)
469 {
470 *offset = offsetof(struct packet_header, ctx.packet_size);
471 *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
472 }
473
474 static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
475 struct lttng_ust_shm_handle *handle)
476 {
477 return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
478 }
479
480 static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
481 struct lttng_ust_shm_handle *handle,
482 uint64_t *timestamp_begin)
483 {
484 struct packet_header *header;
485
486 header = client_packet_header(buf, handle);
487 if (!header)
488 return -1;
489 *timestamp_begin = header->ctx.timestamp_begin;
490 return 0;
491 }
492
493 static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
494 struct lttng_ust_shm_handle *handle,
495 uint64_t *timestamp_end)
496 {
497 struct packet_header *header;
498
499 header = client_packet_header(buf, handle);
500 if (!header)
501 return -1;
502 *timestamp_end = header->ctx.timestamp_end;
503 return 0;
504 }
505
506 static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
507 struct lttng_ust_shm_handle *handle,
508 uint64_t *events_discarded)
509 {
510 struct packet_header *header;
511
512 header = client_packet_header(buf, handle);
513 if (!header)
514 return -1;
515 *events_discarded = header->ctx.events_discarded;
516 return 0;
517 }
518
519 static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
520 struct lttng_ust_shm_handle *handle,
521 uint64_t *content_size)
522 {
523 struct packet_header *header;
524
525 header = client_packet_header(buf, handle);
526 if (!header)
527 return -1;
528 *content_size = header->ctx.content_size;
529 return 0;
530 }
531
532 static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
533 struct lttng_ust_shm_handle *handle,
534 uint64_t *packet_size)
535 {
536 struct packet_header *header;
537
538 header = client_packet_header(buf, handle);
539 if (!header)
540 return -1;
541 *packet_size = header->ctx.packet_size;
542 return 0;
543 }
544
545 static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf,
546 struct lttng_ust_shm_handle *handle,
547 uint64_t *stream_id)
548 {
549 struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle,
550 buf->backend.chan);
551 struct lttng_channel *lttng_chan = channel_get_private(chan);
552
553 *stream_id = lttng_chan->id;
554
555 return 0;
556 }
557
558 static int client_current_timestamp(struct lttng_ust_lib_ring_buffer *buf,
559 struct lttng_ust_shm_handle *handle,
560 uint64_t *ts)
561 {
562 struct lttng_ust_lib_ring_buffer_channel *chan;
563
564 chan = shmp(handle, handle->chan);
565 *ts = client_ring_buffer_clock_read(chan);
566
567 return 0;
568 }
569
570 static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
571 struct lttng_ust_shm_handle *handle,
572 uint64_t *seq)
573 {
574 struct packet_header *header;
575
576 header = client_packet_header(buf, handle);
577 if (!header)
578 return -1;
579 *seq = header->ctx.packet_seq_num;
580 return 0;
581 }
582
583 static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
584 struct lttng_ust_shm_handle *handle,
585 uint64_t *id)
586 {
587 *id = buf->backend.cpu;
588
589 return 0;
590 }
591
592 static const
593 struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
594 .parent = {
595 .ring_buffer_clock_read = client_ring_buffer_clock_read,
596 .record_header_size = client_record_header_size,
597 .subbuffer_header_size = client_packet_header_size,
598 .buffer_begin = client_buffer_begin,
599 .buffer_end = client_buffer_end,
600 .buffer_create = client_buffer_create,
601 .buffer_finalize = client_buffer_finalize,
602 .content_size_field = client_content_size_field,
603 .packet_size_field = client_packet_size_field,
604 },
605 .timestamp_begin = client_timestamp_begin,
606 .timestamp_end = client_timestamp_end,
607 .events_discarded = client_events_discarded,
608 .content_size = client_content_size,
609 .packet_size = client_packet_size,
610 .stream_id = client_stream_id,
611 .current_timestamp = client_current_timestamp,
612 .sequence_number = client_sequence_number,
613 .instance_id = client_instance_id,
614 };
615
616 static const struct lttng_ust_lib_ring_buffer_config client_config = {
617 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
618 .cb.record_header_size = client_record_header_size,
619 .cb.subbuffer_header_size = client_packet_header_size,
620 .cb.buffer_begin = client_buffer_begin,
621 .cb.buffer_end = client_buffer_end,
622 .cb.buffer_create = client_buffer_create,
623 .cb.buffer_finalize = client_buffer_finalize,
624 .cb.content_size_field = client_content_size_field,
625 .cb.packet_size_field = client_packet_size_field,
626
627 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
628 .alloc = RING_BUFFER_ALLOC_PER_CPU,
629 .sync = RING_BUFFER_SYNC_GLOBAL,
630 .mode = RING_BUFFER_MODE_TEMPLATE,
631 .backend = RING_BUFFER_PAGE,
632 .output = RING_BUFFER_MMAP,
633 .oops = RING_BUFFER_OOPS_CONSISTENCY,
634 .ipi = RING_BUFFER_NO_IPI_BARRIER,
635 .wakeup = LTTNG_CLIENT_WAKEUP,
636 .client_type = LTTNG_CLIENT_TYPE,
637
638 .cb_ptr = &client_cb.parent,
639 };
640
641 static
642 struct lttng_channel *_channel_create(const char *name,
643 void *buf_addr,
644 size_t subbuf_size, size_t num_subbuf,
645 unsigned int switch_timer_interval,
646 unsigned int read_timer_interval,
647 unsigned char *uuid,
648 uint32_t chan_id,
649 const int *stream_fds, int nr_stream_fds,
650 int64_t blocking_timeout)
651 {
652 struct lttng_channel chan_priv_init;
653 struct lttng_ust_shm_handle *handle;
654 struct lttng_channel *lttng_chan;
655 void *priv;
656
657 memset(&chan_priv_init, 0, sizeof(chan_priv_init));
658 memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
659 chan_priv_init.id = chan_id;
660 handle = channel_create(&client_config, name,
661 &priv, __alignof__(struct lttng_channel),
662 sizeof(struct lttng_channel),
663 &chan_priv_init,
664 buf_addr, subbuf_size, num_subbuf,
665 switch_timer_interval, read_timer_interval,
666 stream_fds, nr_stream_fds, blocking_timeout);
667 if (!handle)
668 return NULL;
669 lttng_chan = priv;
670 lttng_chan->handle = handle;
671 lttng_chan->chan = shmp(handle, handle->chan);
672 return lttng_chan;
673 }
674
675 static
676 void lttng_channel_destroy(struct lttng_channel *chan)
677 {
678 channel_destroy(chan->chan, chan->handle, 1);
679 }
680
681 static
682 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
683 uint32_t event_id)
684 {
685 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
686 struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
687 struct lttng_client_ctx client_ctx;
688 int ret, cpu;
689
690 /* Compute internal size of context structures. */
691 ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
692 APP_CTX_ENABLED);
693 ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
694 APP_CTX_ENABLED);
695
696 cpu = lib_ring_buffer_get_cpu(&client_config);
697 if (cpu < 0)
698 return -EPERM;
699 ctx->cpu = cpu;
700
701 switch (lttng_chan->header_type) {
702 case 1: /* compact */
703 if (event_id > 30)
704 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
705 break;
706 case 2: /* large */
707 if (event_id > 65534)
708 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
709 break;
710 default:
711 WARN_ON_ONCE(1);
712 }
713
714 ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
715 if (caa_unlikely(ret))
716 goto put;
717 if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
718 &ctx->backend_pages)) {
719 ret = -EPERM;
720 goto put;
721 }
722 lttng_write_event_header(&client_config, ctx, event_id);
723 return 0;
724 put:
725 lib_ring_buffer_put_cpu(&client_config);
726 return ret;
727 }
728
729 static
730 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
731 {
732 lib_ring_buffer_commit(&client_config, ctx);
733 lib_ring_buffer_put_cpu(&client_config);
734 }
735
736 static
737 void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
738 size_t len)
739 {
740 lib_ring_buffer_write(&client_config, ctx, src, len);
741 }
742
743 static
744 void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx, const char *src,
745 size_t len)
746 {
747 lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
748 }
749
750 #if 0
751 static
752 wait_queue_head_t *lttng_get_reader_wait_queue(struct lttng_ust_lib_ring_buffer_channel *chan)
753 {
754 return &chan->read_wait;
755 }
756
757 static
758 wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_ust_lib_ring_buffer_channel *chan)
759 {
760 return &chan->hp_wait;
761 }
762 #endif //0
763
764 static
765 int lttng_is_finalized(struct lttng_ust_lib_ring_buffer_channel *chan)
766 {
767 return lib_ring_buffer_channel_is_finalized(chan);
768 }
769
770 static
771 int lttng_is_disabled(struct lttng_ust_lib_ring_buffer_channel *chan)
772 {
773 return lib_ring_buffer_channel_is_disabled(chan);
774 }
775
776 static
777 int lttng_flush_buffer(struct lttng_ust_lib_ring_buffer_channel *chan,
778 struct lttng_ust_shm_handle *handle)
779 {
780 struct lttng_ust_lib_ring_buffer *buf;
781 int cpu;
782
783 for_each_channel_cpu(cpu, chan) {
784 int shm_fd, wait_fd, wakeup_fd;
785 uint64_t memory_map_size;
786
787 buf = channel_get_ring_buffer(&client_config, chan,
788 cpu, handle, &shm_fd, &wait_fd,
789 &wakeup_fd, &memory_map_size);
790 lib_ring_buffer_switch(&client_config, buf,
791 SWITCH_ACTIVE, handle);
792 }
793 return 0;
794 }
795
796 static struct lttng_transport lttng_relay_transport = {
797 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
798 .ops = {
799 .struct_size = sizeof(struct lttng_ust_channel_ops),
800 .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_ops_private, {
801 .pub = &lttng_relay_transport.ops,
802 .channel_create = _channel_create,
803 .channel_destroy = lttng_channel_destroy,
804 .packet_avail_size = NULL, /* Would be racy anyway */
805 .is_finalized = lttng_is_finalized,
806 .is_disabled = lttng_is_disabled,
807 .flush_buffer = lttng_flush_buffer,
808 }),
809 .event_reserve = lttng_event_reserve,
810 .event_commit = lttng_event_commit,
811 .event_write = lttng_event_write,
812 .event_strcpy = lttng_event_strcpy,
813 },
814 .client_config = &client_config,
815 };
816
817 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
818 {
819 DBG("LTT : ltt ring buffer client \"%s\" init\n",
820 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
821 lttng_transport_register(&lttng_relay_transport);
822 }
823
824 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
825 {
826 DBG("LTT : ltt ring buffer client \"%s\" exit\n",
827 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
828 lttng_transport_unregister(&lttng_relay_transport);
829 }
This page took 0.046069 seconds and 5 git commands to generate.