Add missing alignment after header write
[lttng-modules.git] / ltt-ring-buffer-client.h
CommitLineData
7514523f 1/*
3d084699 2 * ltt-ring-buffer-client.h
7514523f
MD
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
3d084699 6 * LTTng lib ring buffer client template.
7514523f
MD
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
c0e31d2e 12#include <linux/types.h>
9115fbdc 13#include "lib/bitfield.h"
b13f3ebe 14#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
f3bc08c5 15#include "wrapper/trace-clock.h"
c0e31d2e 16#include "ltt-events.h"
7514523f 17#include "ltt-tracer.h"
9115fbdc 18#include "wrapper/ringbuffer/frontend_types.h"
7514523f 19
d793d5e1
MD
20/*
21 * Keep the natural field alignment for _each field_ within this structure if
22 * you ever add/remove a field from this header. Packed attribute is not used
23 * because gcc generates poor code on at least powerpc and mips. Don't ever
24 * let gcc add padding between the structure elements.
fcf74578
MD
25 *
26 * The guarantee we have with timestamps is that all the events in a
27 * packet are included (inclusive) within the begin/end timestamps of
28 * the packet. Another guarantee we have is that the "timestamp begin",
29 * as well as the event timestamps, are monotonically increasing (never
30 * decrease) when moving forward in a stream (physically). But this
31 * guarantee does not apply to "timestamp end", because it is sampled at
32 * commit time, which is not ordered with respect to space reservation.
d793d5e1 33 */
9115fbdc 34
d793d5e1 35struct packet_header {
9115fbdc 36 /* Trace packet header */
d793d5e1
MD
37 uint32_t magic; /*
38 * Trace magic number.
39 * contains endianness information.
40 */
1ec3f75a 41 uint8_t uuid[16];
d793d5e1 42 uint32_t stream_id;
9115fbdc
MD
43
44 struct {
45 /* Stream packet context */
46 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
47 uint64_t timestamp_end; /* Cycle count at subbuffer end */
48 uint32_t events_discarded; /*
49 * Events lost in this subbuffer since
50 * the beginning of the trace.
51 * (may overflow)
52 */
53 uint32_t content_size; /* Size of data in subbuffer */
54 uint32_t packet_size; /* Subbuffer size (include padding) */
55 uint32_t cpu_id; /* CPU id associated with stream */
56 uint8_t header_end; /* End of header */
57 } ctx;
d793d5e1
MD
58};
59
60
881833e3
MD
61static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
62{
63 return trace_clock_read64();
64}
65
f1676205
MD
66static inline
67size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
68{
69 int i;
70 size_t orig_offset = offset;
71
72 if (likely(!ctx))
73 return 0;
74 for (i = 0; i < ctx->nr_fields; i++)
75 offset += ctx->fields[i].get_size(offset);
76 return offset - orig_offset;
77}
78
79static inline
80void ctx_record(struct lib_ring_buffer_ctx *bufctx,
81 struct ltt_channel *chan,
82 struct lttng_ctx *ctx)
83{
84 int i;
85
86 if (likely(!ctx))
87 return;
88 for (i = 0; i < ctx->nr_fields; i++)
89 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
90}
91
881833e3
MD
92/*
93 * record_header_size - Calculate the header size and padding necessary.
94 * @config: ring buffer instance configuration
95 * @chan: channel
96 * @offset: offset in the write buffer
881833e3 97 * @pre_header_padding: padding to add before the header (output)
881833e3
MD
98 * @ctx: reservation context
99 *
100 * Returns the event header size (including padding).
101 *
881833e3
MD
102 * The payload must itself determine its own alignment from the biggest type it
103 * contains.
104 */
105static __inline__
106unsigned char record_header_size(const struct lib_ring_buffer_config *config,
107 struct channel *chan, size_t offset,
64c796d8 108 size_t *pre_header_padding,
881833e3
MD
109 struct lib_ring_buffer_ctx *ctx)
110{
9115fbdc 111 struct ltt_channel *ltt_chan = channel_get_private(chan);
f1676205 112 struct ltt_event *event = ctx->priv;
881833e3
MD
113 size_t orig_offset = offset;
114 size_t padding;
115
9115fbdc
MD
116 switch (ltt_chan->header_type) {
117 case 1: /* compact */
118 padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
119 offset += padding;
64c796d8 120 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
9115fbdc
MD
121 offset += sizeof(uint32_t); /* id and timestamp */
122 } else {
123 /* Minimum space taken by 5-bit id */
124 offset += sizeof(uint8_t);
125 /* Align extended struct on largest member */
126 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
127 offset += sizeof(uint32_t); /* id */
128 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
129 offset += sizeof(uint64_t); /* timestamp */
130 }
131 break;
132 case 2: /* large */
133 padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
134 offset += padding;
135 offset += sizeof(uint16_t);
64c796d8 136 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
9115fbdc
MD
137 offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
138 offset += sizeof(uint32_t); /* timestamp */
139 } else {
140 /* Align extended struct on largest member */
141 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
142 offset += sizeof(uint32_t); /* id */
143 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
144 offset += sizeof(uint64_t); /* timestamp */
881833e3 145 }
9115fbdc
MD
146 break;
147 default:
1b2e041f 148 padding = 0;
64c796d8 149 WARN_ON_ONCE(1);
881833e3 150 }
f1676205
MD
151 offset += ctx_get_size(offset, event->ctx);
152 offset += ctx_get_size(offset, ltt_chan->ctx);
881833e3
MD
153
154 *pre_header_padding = padding;
155 return offset - orig_offset;
156}
157
158#include "wrapper/ringbuffer/api.h"
159
eb9a7857 160static
881833e3
MD
161void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
162 struct lib_ring_buffer_ctx *ctx,
64c796d8 163 uint32_t event_id);
881833e3
MD
164
165/*
166 * ltt_write_event_header
167 *
168 * Writes the event header to the offset (already aligned on 32-bits).
169 *
170 * @config: ring buffer instance configuration
171 * @ctx: reservation context
4e1f08f4 172 * @event_id: event ID
881833e3
MD
173 */
174static __inline__
175void ltt_write_event_header(const struct lib_ring_buffer_config *config,
176 struct lib_ring_buffer_ctx *ctx,
64c796d8 177 uint32_t event_id)
881833e3 178{
9115fbdc 179 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
f1676205 180 struct ltt_event *event = ctx->priv;
881833e3
MD
181
182 if (unlikely(ctx->rflags))
183 goto slow_path;
184
9115fbdc
MD
185 switch (ltt_chan->header_type) {
186 case 1: /* compact */
187 {
188 uint32_t id_time = 0;
189
4e1f08f4 190 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
9115fbdc
MD
191 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
192 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
193 break;
194 }
195 case 2: /* large */
196 {
9115fbdc 197 uint32_t timestamp = (uint32_t) ctx->tsc;
7e855749 198 uint16_t id = event_id;
9115fbdc 199
7e855749 200 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
9115fbdc
MD
201 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
202 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
203 break;
204 }
205 default:
64c796d8 206 WARN_ON_ONCE(1);
9115fbdc 207 }
f1676205 208
f1676205 209 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
da4e2332 210 ctx_record(ctx, ltt_chan, event->ctx);
c595c36f 211 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
f1676205 212
9115fbdc 213 return;
881833e3
MD
214
215slow_path:
4e1f08f4 216 ltt_write_event_header_slow(config, ctx, event_id);
881833e3
MD
217}
218
eb9a7857 219static
881833e3 220void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
64c796d8
MD
221 struct lib_ring_buffer_ctx *ctx,
222 uint32_t event_id)
881833e3 223{
9115fbdc 224 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
f1676205 225 struct ltt_event *event = ctx->priv;
9115fbdc
MD
226
227 switch (ltt_chan->header_type) {
228 case 1: /* compact */
64c796d8 229 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
9115fbdc
MD
230 uint32_t id_time = 0;
231
4e1f08f4 232 bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id);
9115fbdc
MD
233 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
234 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
235 } else {
236 uint8_t id = 0;
9115fbdc
MD
237 uint64_t timestamp = ctx->tsc;
238
239 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
240 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
241 /* Align extended struct on largest member */
242 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
243 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
244 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
245 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
246 }
881833e3 247 break;
9115fbdc
MD
248 case 2: /* large */
249 {
64c796d8 250 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) {
9115fbdc 251 uint32_t timestamp = (uint32_t) ctx->tsc;
7e855749 252 uint16_t id = event_id;
9115fbdc 253
7e855749 254 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
9115fbdc
MD
255 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
256 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
257 } else {
64c796d8 258 uint16_t id = 65535;
9115fbdc
MD
259 uint64_t timestamp = ctx->tsc;
260
64c796d8 261 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
9115fbdc
MD
262 /* Align extended struct on largest member */
263 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
64c796d8 264 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
9115fbdc
MD
265 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
266 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
267 }
881833e3 268 break;
881833e3 269 }
9115fbdc 270 default:
64c796d8 271 WARN_ON_ONCE(1);
881833e3 272 }
f1676205 273 ctx_record(ctx, ltt_chan, ltt_chan->ctx);
da4e2332 274 ctx_record(ctx, ltt_chan, event->ctx);
c595c36f 275 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
881833e3
MD
276}
277
7514523f
MD
278static const struct lib_ring_buffer_config client_config;
279
280static u64 client_ring_buffer_clock_read(struct channel *chan)
281{
282 return lib_ring_buffer_clock_read(chan);
283}
284
1e2015dc 285static
7514523f
MD
286size_t client_record_header_size(const struct lib_ring_buffer_config *config,
287 struct channel *chan, size_t offset,
7514523f 288 size_t *pre_header_padding,
7514523f
MD
289 struct lib_ring_buffer_ctx *ctx)
290{
64c796d8
MD
291 return record_header_size(config, chan, offset,
292 pre_header_padding, ctx);
7514523f
MD
293}
294
295/**
1c25284c 296 * client_packet_header_size - called on buffer-switch to a new sub-buffer
7514523f
MD
297 *
298 * Return header size without padding after the structure. Don't use packed
299 * structure because gcc generates inefficient code on some architectures
300 * (powerpc, mips..)
301 */
1c25284c 302static size_t client_packet_header_size(void)
7514523f 303{
9115fbdc 304 return offsetof(struct packet_header, ctx.header_end);
7514523f
MD
305}
306
307static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
308 unsigned int subbuf_idx)
309{
310 struct channel *chan = buf->backend.chan;
1c25284c
MD
311 struct packet_header *header =
312 (struct packet_header *)
7514523f
MD
313 lib_ring_buffer_offset_address(&buf->backend,
314 subbuf_idx * chan->backend.subbuf_size);
9115fbdc
MD
315 struct ltt_channel *ltt_chan = channel_get_private(chan);
316 struct ltt_session *session = ltt_chan->session;
7514523f 317
d793d5e1 318 header->magic = CTF_MAGIC_NUMBER;
1ec3f75a 319 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
9115fbdc
MD
320 header->stream_id = ltt_chan->id;
321 header->ctx.timestamp_begin = tsc;
322 header->ctx.timestamp_end = 0;
323 header->ctx.events_discarded = 0;
324 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
325 header->ctx.packet_size = 0xFFFFFFFF;
326 header->ctx.cpu_id = buf->backend.cpu;
7514523f
MD
327}
328
329/*
330 * offset is assumed to never be 0 here : never deliver a completely empty
331 * subbuffer. data_size is between 1 and subbuf_size.
332 */
333static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
334 unsigned int subbuf_idx, unsigned long data_size)
335{
336 struct channel *chan = buf->backend.chan;
1c25284c
MD
337 struct packet_header *header =
338 (struct packet_header *)
7514523f
MD
339 lib_ring_buffer_offset_address(&buf->backend,
340 subbuf_idx * chan->backend.subbuf_size);
341 unsigned long records_lost = 0;
342
9115fbdc 343 header->ctx.timestamp_end = tsc;
05d32c64
MD
344 header->ctx.content_size = data_size * CHAR_BIT; /* in bits */
345 header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
7514523f
MD
346 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
347 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
348 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
9115fbdc 349 header->ctx.events_discarded = records_lost;
7514523f
MD
350}
351
352static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
353 int cpu, const char *name)
354{
1c25284c 355 return 0;
7514523f
MD
356}
357
358static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
359{
7514523f
MD
360}
361
362static const struct lib_ring_buffer_config client_config = {
363 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
364 .cb.record_header_size = client_record_header_size,
1c25284c 365 .cb.subbuffer_header_size = client_packet_header_size,
7514523f
MD
366 .cb.buffer_begin = client_buffer_begin,
367 .cb.buffer_end = client_buffer_end,
368 .cb.buffer_create = client_buffer_create,
369 .cb.buffer_finalize = client_buffer_finalize,
370
371 .tsc_bits = 32,
372 .alloc = RING_BUFFER_ALLOC_PER_CPU,
373 .sync = RING_BUFFER_SYNC_PER_CPU,
3d084699 374 .mode = RING_BUFFER_MODE_TEMPLATE,
7514523f 375 .backend = RING_BUFFER_PAGE,
2db1399a 376 .output = RING_BUFFER_OUTPUT_TEMPLATE,
7514523f
MD
377 .oops = RING_BUFFER_OOPS_CONSISTENCY,
378 .ipi = RING_BUFFER_IPI_BARRIER,
379 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
380};
381
1e2015dc 382static
1c25284c 383struct channel *_channel_create(const char *name,
9115fbdc 384 struct ltt_channel *ltt_chan, void *buf_addr,
1c25284c
MD
385 size_t subbuf_size, size_t num_subbuf,
386 unsigned int switch_timer_interval,
387 unsigned int read_timer_interval)
7514523f 388{
9115fbdc 389 return channel_create(&client_config, name, ltt_chan, buf_addr,
7514523f
MD
390 subbuf_size, num_subbuf, switch_timer_interval,
391 read_timer_interval);
7514523f
MD
392}
393
1e2015dc 394static
7514523f
MD
395void ltt_channel_destroy(struct channel *chan)
396{
7514523f 397 channel_destroy(chan);
7514523f
MD
398}
399
ad1c05e1
MD
400static
401struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
402{
403 struct lib_ring_buffer *buf;
404 int cpu;
405
1c25284c
MD
406 for_each_channel_cpu(cpu, chan) {
407 buf = channel_get_ring_buffer(&client_config, chan, cpu);
ad1c05e1
MD
408 if (!lib_ring_buffer_open_read(buf))
409 return buf;
410 }
411 return NULL;
412}
413
f71ecafa
MD
414static
415int ltt_buffer_has_read_closed_stream(struct channel *chan)
416{
417 struct lib_ring_buffer *buf;
418 int cpu;
419
420 for_each_channel_cpu(cpu, chan) {
421 buf = channel_get_ring_buffer(&client_config, chan, cpu);
422 if (!atomic_long_read(&buf->active_readers))
423 return 1;
424 }
425 return 0;
426}
427
ad1c05e1 428static
1c25284c 429void ltt_buffer_read_close(struct lib_ring_buffer *buf)
ad1c05e1
MD
430{
431 lib_ring_buffer_release_read(buf);
1c25284c
MD
432}
433
c099397a 434static
4e1f08f4 435int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx,
64c796d8 436 uint32_t event_id)
1c25284c 437{
64c796d8 438 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
1c25284c
MD
439 int ret, cpu;
440
441 cpu = lib_ring_buffer_get_cpu(&client_config);
442 if (cpu < 0)
443 return -EPERM;
444 ctx->cpu = cpu;
445
64c796d8
MD
446 switch (ltt_chan->header_type) {
447 case 1: /* compact */
448 if (event_id > 30)
449 ctx->rflags |= LTT_RFLAG_EXTENDED;
450 break;
451 case 2: /* large */
452 if (event_id > 65534)
453 ctx->rflags |= LTT_RFLAG_EXTENDED;
454 break;
455 default:
456 WARN_ON_ONCE(1);
457 }
458
1c25284c
MD
459 ret = lib_ring_buffer_reserve(&client_config, ctx);
460 if (ret)
461 goto put;
4e1f08f4
MD
462 ltt_write_event_header(&client_config, ctx, event_id);
463 return 0;
1c25284c
MD
464put:
465 lib_ring_buffer_put_cpu(&client_config);
466 return ret;
ad1c05e1
MD
467}
468
c099397a 469static
1c25284c
MD
470void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
471{
472 lib_ring_buffer_commit(&client_config, ctx);
473 lib_ring_buffer_put_cpu(&client_config);
474}
475
c099397a 476static
e763dbf5
MD
477void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
478 size_t len)
479{
480 lib_ring_buffer_write(&client_config, ctx, src, len);
481}
1c25284c 482
c099397a 483static
71c1d843 484wait_queue_head_t *ltt_get_writer_buf_wait_queue(struct channel *chan, int cpu)
c099397a 485{
71c1d843
MD
486 struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
487 chan, cpu);
488 return &buf->write_wait;
24cedcfe
MD
489}
490
491static
492wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan)
493{
494 return &chan->hp_wait;
495}
496
497static
498int ltt_is_finalized(struct channel *chan)
499{
500 return lib_ring_buffer_channel_is_finalized(chan);
c099397a
MD
501}
502
254ec7bc
MD
503static
504int ltt_is_disabled(struct channel *chan)
505{
506 return lib_ring_buffer_channel_is_disabled(chan);
507}
508
7514523f 509static struct ltt_transport ltt_relay_transport = {
3d084699 510 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
7514523f
MD
511 .owner = THIS_MODULE,
512 .ops = {
1c25284c
MD
513 .channel_create = _channel_create,
514 .channel_destroy = ltt_channel_destroy,
ad1c05e1 515 .buffer_read_open = ltt_buffer_read_open,
f71ecafa
MD
516 .buffer_has_read_closed_stream =
517 ltt_buffer_has_read_closed_stream,
ad1c05e1 518 .buffer_read_close = ltt_buffer_read_close,
1c25284c
MD
519 .event_reserve = ltt_event_reserve,
520 .event_commit = ltt_event_commit,
e763dbf5 521 .event_write = ltt_event_write,
1ec3f75a 522 .packet_avail_size = NULL, /* Would be racy anyway */
71c1d843 523 .get_writer_buf_wait_queue = ltt_get_writer_buf_wait_queue,
24cedcfe
MD
524 .get_hp_wait_queue = ltt_get_hp_wait_queue,
525 .is_finalized = ltt_is_finalized,
254ec7bc 526 .is_disabled = ltt_is_disabled,
7514523f
MD
527 },
528};
529
3d084699 530static int __init ltt_ring_buffer_client_init(void)
7514523f 531{
a509e133
MD
532 /*
533 * This vmalloc sync all also takes care of the lib ring buffer
534 * vmalloc'd module pages when it is built as a module into LTTng.
535 */
6d2a620c 536 wrapper_vmalloc_sync_all();
7514523f
MD
537 ltt_transport_register(&ltt_relay_transport);
538 return 0;
539}
540
1c25284c
MD
541module_init(ltt_ring_buffer_client_init);
542
3d084699 543static void __exit ltt_ring_buffer_client_exit(void)
7514523f 544{
7514523f
MD
545 ltt_transport_unregister(&ltt_relay_transport);
546}
547
1c25284c
MD
548module_exit(ltt_ring_buffer_client_exit);
549
7514523f
MD
550MODULE_LICENSE("GPL and additional rights");
551MODULE_AUTHOR("Mathieu Desnoyers");
3d084699
MD
552MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
553 " client");
This page took 0.055556 seconds and 4 git commands to generate.