Update stream packet and event headers
[lttng-modules.git] / ltt-ring-buffer-client.h
1 /*
2 * ltt-ring-buffer-client.h
3 *
4 * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng lib ring buffer client template.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include "lib/bitfield.h"
14 #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
15 #include "wrapper/trace-clock.h"
16 #include "ltt-events.h"
17 #include "ltt-tracer.h"
18 #include "wrapper/ringbuffer/frontend_types.h"
19
20 /*
21 * Keep the natural field alignment for _each field_ within this structure if
22 * you ever add/remove a field from this header. Packed attribute is not used
23 * because gcc generates poor code on at least powerpc and mips. Don't ever
24 * let gcc add padding between the structure elements.
25 */
26
27 struct packet_header {
28 /* Trace packet header */
29 uint32_t magic; /*
30 * Trace magic number.
31 * contains endianness information.
32 */
33 uint8_t uuid[16];
34 uint32_t stream_id;
35
36 struct {
37 /* Stream packet context */
38 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
39 uint64_t timestamp_end; /* Cycle count at subbuffer end */
40 uint32_t events_discarded; /*
41 * Events lost in this subbuffer since
42 * the beginning of the trace.
43 * (may overflow)
44 */
45 uint32_t content_size; /* Size of data in subbuffer */
46 uint32_t packet_size; /* Subbuffer size (include padding) */
47 uint32_t cpu_id; /* CPU id associated with stream */
48 uint8_t header_end; /* End of header */
49 } ctx;
50 };
51
52
53 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
54 {
55 return trace_clock_read64();
56 }
57
58 /*
59 * record_header_size - Calculate the header size and padding necessary.
60 * @config: ring buffer instance configuration
61 * @chan: channel
62 * @offset: offset in the write buffer
63 * @data_size: size of the payload
64 * @pre_header_padding: padding to add before the header (output)
65 * @rflags: reservation flags
66 * @ctx: reservation context
67 *
68 * Returns the event header size (including padding).
69 *
70 * The payload must itself determine its own alignment from the biggest type it
71 * contains.
72 */
73 static __inline__
74 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
75 struct channel *chan, size_t offset,
76 size_t data_size, size_t *pre_header_padding,
77 unsigned int rflags,
78 struct lib_ring_buffer_ctx *ctx)
79 {
80 struct ltt_channel *ltt_chan = channel_get_private(chan);
81 size_t orig_offset = offset;
82 size_t padding;
83
84 switch (ltt_chan->header_type) {
85 case 1: /* compact */
86 padding = lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
87 offset += padding;
88 if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
89 offset += sizeof(uint32_t); /* id and timestamp */
90 } else {
91 /* Minimum space taken by 5-bit id */
92 offset += sizeof(uint8_t);
93 /* Align extended struct on largest member */
94 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
95 offset += sizeof(uint32_t); /* id */
96 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
97 offset += sizeof(uint64_t); /* timestamp */
98 }
99 break;
100 case 2: /* large */
101 padding = lib_ring_buffer_align(offset, ltt_alignof(uint16_t));
102 offset += padding;
103 offset += sizeof(uint16_t);
104 if (!(rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
105 offset += lib_ring_buffer_align(offset, ltt_alignof(uint32_t));
106 offset += sizeof(uint32_t); /* timestamp */
107 } else {
108 /* Align extended struct on largest member */
109 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
110 offset += sizeof(uint32_t); /* id */
111 offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
112 offset += sizeof(uint64_t); /* timestamp */
113
114 }
115 break;
116 default:
117 WARN_ON(1);
118 }
119
120 *pre_header_padding = padding;
121 return offset - orig_offset;
122 }
123
124 #include "wrapper/ringbuffer/api.h"
125
126 extern
127 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
128 struct lib_ring_buffer_ctx *ctx,
129 u16 eID, u32 event_size);
130
131 /*
132 * ltt_write_event_header
133 *
134 * Writes the event header to the offset (already aligned on 32-bits).
135 *
136 * @config: ring buffer instance configuration
137 * @ctx: reservation context
138 * @eID : event ID
139 * @event_size : size of the event, excluding the event header.
140 */
141 static __inline__
142 void ltt_write_event_header(const struct lib_ring_buffer_config *config,
143 struct lib_ring_buffer_ctx *ctx,
144 u16 eID, u32 event_size)
145 {
146 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
147
148 if (unlikely(ctx->rflags))
149 goto slow_path;
150
151 switch (ltt_chan->header_type) {
152 case 1: /* compact */
153 {
154 uint32_t id_time = 0;
155
156 bt_bitfield_write(&id_time, uint32_t, 0, 5, eID);
157 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
158 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
159 break;
160 }
161 case 2: /* large */
162 {
163 uint16_t event_id = eID;
164 uint32_t timestamp = (uint32_t) ctx->tsc;
165
166 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
167 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
168 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
169 break;
170 }
171 default:
172 WARN_ON(1);
173 }
174 return;
175
176 slow_path:
177 ltt_write_event_header_slow(config, ctx, eID, event_size);
178 }
179
180 /*
181 * TODO: For now, we only support 65536 event ids per channel.
182 */
183 void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config,
184 struct lib_ring_buffer_ctx *ctx,
185 u16 eID, u32 event_size)
186 {
187 struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
188
189 switch (ltt_chan->header_type) {
190 case 1: /* compact */
191 if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
192 uint32_t id_time = 0;
193
194 bt_bitfield_write(&id_time, uint32_t, 0, 5, eID);
195 bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc);
196 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
197 } else {
198 uint8_t id = 0;
199 uint32_t event_id = (uint32_t) eID;
200 uint64_t timestamp = ctx->tsc;
201
202 bt_bitfield_write(&id, uint8_t, 0, 5, 31);
203 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
204 /* Align extended struct on largest member */
205 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
206 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
207 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
208 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
209 }
210 break;
211 case 2: /* large */
212 {
213 if (!(ctx->rflags & RING_BUFFER_RFLAG_FULL_TSC)) {
214 uint16_t event_id = eID;
215 uint32_t timestamp = (uint32_t) ctx->tsc;
216
217 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
218 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint32_t));
219 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
220 } else {
221 uint16_t event_id = 65535;
222 uint32_t event_id_ext = (uint32_t) eID;
223 uint64_t timestamp = ctx->tsc;
224
225 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
226 /* Align extended struct on largest member */
227 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
228 lib_ring_buffer_write(config, ctx, &event_id_ext, sizeof(event_id_ext));
229 lib_ring_buffer_align_ctx(ctx, ltt_alignof(uint64_t));
230 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
231 }
232 break;
233 }
234 default:
235 WARN_ON(1);
236 }
237 }
238
239 static const struct lib_ring_buffer_config client_config;
240
241 static u64 client_ring_buffer_clock_read(struct channel *chan)
242 {
243 return lib_ring_buffer_clock_read(chan);
244 }
245
246 static
247 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
248 struct channel *chan, size_t offset,
249 size_t data_size,
250 size_t *pre_header_padding,
251 unsigned int rflags,
252 struct lib_ring_buffer_ctx *ctx)
253 {
254 return record_header_size(config, chan, offset, data_size,
255 pre_header_padding, rflags, ctx);
256 }
257
258 /**
259 * client_packet_header_size - called on buffer-switch to a new sub-buffer
260 *
261 * Return header size without padding after the structure. Don't use packed
262 * structure because gcc generates inefficient code on some architectures
263 * (powerpc, mips..)
264 */
265 static size_t client_packet_header_size(void)
266 {
267 return offsetof(struct packet_header, ctx.header_end);
268 }
269
270 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
271 unsigned int subbuf_idx)
272 {
273 struct channel *chan = buf->backend.chan;
274 struct packet_header *header =
275 (struct packet_header *)
276 lib_ring_buffer_offset_address(&buf->backend,
277 subbuf_idx * chan->backend.subbuf_size);
278 struct ltt_channel *ltt_chan = channel_get_private(chan);
279 struct ltt_session *session = ltt_chan->session;
280
281 header->magic = CTF_MAGIC_NUMBER;
282 memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
283 header->stream_id = ltt_chan->id;
284 header->ctx.timestamp_begin = tsc;
285 header->ctx.timestamp_end = 0;
286 header->ctx.events_discarded = 0;
287 header->ctx.content_size = 0xFFFFFFFF; /* for debugging */
288 header->ctx.packet_size = 0xFFFFFFFF;
289 header->ctx.cpu_id = buf->backend.cpu;
290 }
291
292 /*
293 * offset is assumed to never be 0 here : never deliver a completely empty
294 * subbuffer. data_size is between 1 and subbuf_size.
295 */
296 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
297 unsigned int subbuf_idx, unsigned long data_size)
298 {
299 struct channel *chan = buf->backend.chan;
300 struct packet_header *header =
301 (struct packet_header *)
302 lib_ring_buffer_offset_address(&buf->backend,
303 subbuf_idx * chan->backend.subbuf_size);
304 unsigned long records_lost = 0;
305
306 header->ctx.timestamp_end = tsc;
307 header->ctx.content_size = data_size;
308 header->ctx.packet_size = PAGE_ALIGN(data_size);
309 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
310 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
311 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
312 header->ctx.events_discarded = records_lost;
313 }
314
315 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
316 int cpu, const char *name)
317 {
318 return 0;
319 }
320
321 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
322 {
323 }
324
325 static const struct lib_ring_buffer_config client_config = {
326 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
327 .cb.record_header_size = client_record_header_size,
328 .cb.subbuffer_header_size = client_packet_header_size,
329 .cb.buffer_begin = client_buffer_begin,
330 .cb.buffer_end = client_buffer_end,
331 .cb.buffer_create = client_buffer_create,
332 .cb.buffer_finalize = client_buffer_finalize,
333
334 .tsc_bits = 32,
335 .alloc = RING_BUFFER_ALLOC_PER_CPU,
336 .sync = RING_BUFFER_SYNC_PER_CPU,
337 .mode = RING_BUFFER_MODE_TEMPLATE,
338 .backend = RING_BUFFER_PAGE,
339 .output = RING_BUFFER_SPLICE,
340 .oops = RING_BUFFER_OOPS_CONSISTENCY,
341 .ipi = RING_BUFFER_IPI_BARRIER,
342 .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
343 };
344
345 static
346 struct channel *_channel_create(const char *name,
347 struct ltt_channel *ltt_chan, void *buf_addr,
348 size_t subbuf_size, size_t num_subbuf,
349 unsigned int switch_timer_interval,
350 unsigned int read_timer_interval)
351 {
352 return channel_create(&client_config, name, ltt_chan, buf_addr,
353 subbuf_size, num_subbuf, switch_timer_interval,
354 read_timer_interval);
355 }
356
357 static
358 void ltt_channel_destroy(struct channel *chan)
359 {
360 channel_destroy(chan);
361 }
362
363 static
364 struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan)
365 {
366 struct lib_ring_buffer *buf;
367 int cpu;
368
369 for_each_channel_cpu(cpu, chan) {
370 buf = channel_get_ring_buffer(&client_config, chan, cpu);
371 if (!lib_ring_buffer_open_read(buf))
372 return buf;
373 }
374 return NULL;
375 }
376
377 static
378 void ltt_buffer_read_close(struct lib_ring_buffer *buf)
379 {
380 lib_ring_buffer_release_read(buf);
381
382 }
383
384 static
385 int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx)
386 {
387 int ret, cpu;
388
389 cpu = lib_ring_buffer_get_cpu(&client_config);
390 if (cpu < 0)
391 return -EPERM;
392 ctx->cpu = cpu;
393
394 ret = lib_ring_buffer_reserve(&client_config, ctx);
395 if (ret)
396 goto put;
397 return ret;
398
399 put:
400 lib_ring_buffer_put_cpu(&client_config);
401 return ret;
402 }
403
404 static
405 void ltt_event_commit(struct lib_ring_buffer_ctx *ctx)
406 {
407 lib_ring_buffer_commit(&client_config, ctx);
408 lib_ring_buffer_put_cpu(&client_config);
409 }
410
411 static
412 void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
413 size_t len)
414 {
415 lib_ring_buffer_write(&client_config, ctx, src, len);
416 }
417
418 static
419 wait_queue_head_t *ltt_get_reader_wait_queue(struct ltt_channel *chan)
420 {
421 return &chan->chan->read_wait;
422 }
423
424 static struct ltt_transport ltt_relay_transport = {
425 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
426 .owner = THIS_MODULE,
427 .ops = {
428 .channel_create = _channel_create,
429 .channel_destroy = ltt_channel_destroy,
430 .buffer_read_open = ltt_buffer_read_open,
431 .buffer_read_close = ltt_buffer_read_close,
432 .event_reserve = ltt_event_reserve,
433 .event_commit = ltt_event_commit,
434 .event_write = ltt_event_write,
435 .packet_avail_size = NULL, /* Would be racy anyway */
436 .get_reader_wait_queue = ltt_get_reader_wait_queue,
437 },
438 };
439
440 static int __init ltt_ring_buffer_client_init(void)
441 {
442 /*
443 * This vmalloc sync all also takes care of the lib ring buffer
444 * vmalloc'd module pages when it is built as a module into LTTng.
445 */
446 wrapper_vmalloc_sync_all();
447 printk(KERN_INFO "LTT : ltt ring buffer client init\n");
448 ltt_transport_register(&ltt_relay_transport);
449 return 0;
450 }
451
452 module_init(ltt_ring_buffer_client_init);
453
454 static void __exit ltt_ring_buffer_client_exit(void)
455 {
456 printk(KERN_INFO "LTT : ltt ring buffer client exit\n");
457 ltt_transport_unregister(&ltt_relay_transport);
458 }
459
460 module_exit(ltt_ring_buffer_client_exit);
461
462 MODULE_LICENSE("GPL and additional rights");
463 MODULE_AUTHOR("Mathieu Desnoyers");
464 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
465 " client");
This page took 0.039103 seconds and 5 git commands to generate.