c65a50ef57964d92568e1ae49dfe239f6154b03a
[lttng-ust.git] / liblttng-ust / lttng-ring-buffer-client.h
1 /*
2 * lttng-ring-buffer-client.h
3 *
4 * LTTng lib ring buffer client template.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <stdint.h>
24 #include <lttng/ust-events.h>
25 #include "lttng/bitfield.h"
26 #include "clock.h"
27 #include "lttng-tracer.h"
28 #include "../libringbuffer/frontend_types.h"
29
30 #define LTTNG_COMPACT_EVENT_BITS 5
31 #define LTTNG_COMPACT_TSC_BITS 27
32
33 /*
34 * Keep the natural field alignment for _each field_ within this structure if
35 * you ever add/remove a field from this header. Packed attribute is not used
36 * because gcc generates poor code on at least powerpc and mips. Don't ever
37 * let gcc add padding between the structure elements.
38 */
39
40 struct packet_header {
41 /* Trace packet header */
42 uint32_t magic; /*
43 * Trace magic number.
44 * contains endianness information.
45 */
46 uint8_t uuid[LTTNG_UST_UUID_LEN];
47 uint32_t stream_id;
48
49 struct {
50 /* Stream packet context */
51 uint64_t timestamp_begin; /* Cycle count at subbuffer start */
52 uint64_t timestamp_end; /* Cycle count at subbuffer end */
53 uint64_t content_size; /* Size of data in subbuffer */
54 uint64_t packet_size; /* Subbuffer size (include padding) */
55 unsigned long events_discarded; /*
56 * Events lost in this subbuffer since
57 * the beginning of the trace.
58 * (may overflow)
59 */
60 uint32_t cpu_id; /* CPU id associated with stream */
61 uint8_t header_end; /* End of header */
62 } ctx;
63 };
64
65
66 static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
67 {
68 return trace_clock_read64();
69 }
70
71 static inline
72 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
73 {
74 int i;
75 size_t orig_offset = offset;
76
77 if (caa_likely(!ctx))
78 return 0;
79 for (i = 0; i < ctx->nr_fields; i++)
80 offset += ctx->fields[i].get_size(offset);
81 return offset - orig_offset;
82 }
83
84 static inline
85 void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
86 struct lttng_channel *chan,
87 struct lttng_ctx *ctx)
88 {
89 int i;
90
91 if (caa_likely(!ctx))
92 return;
93 for (i = 0; i < ctx->nr_fields; i++)
94 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
95 }
96
97 /*
98 * record_header_size - Calculate the header size and padding necessary.
99 * @config: ring buffer instance configuration
100 * @chan: channel
101 * @offset: offset in the write buffer
102 * @pre_header_padding: padding to add before the header (output)
103 * @ctx: reservation context
104 *
105 * Returns the event header size (including padding).
106 *
107 * The payload must itself determine its own alignment from the biggest type it
108 * contains.
109 */
110 static __inline__
111 unsigned char record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
112 struct channel *chan, size_t offset,
113 size_t *pre_header_padding,
114 struct lttng_ust_lib_ring_buffer_ctx *ctx)
115 {
116 struct lttng_channel *lttng_chan = channel_get_private(chan);
117 struct lttng_event *event = ctx->priv;
118 size_t orig_offset = offset;
119 size_t padding;
120
121 switch (lttng_chan->header_type) {
122 case 1: /* compact */
123 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
124 offset += padding;
125 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
126 offset += sizeof(uint32_t); /* id and timestamp */
127 } else {
128 /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
129 offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
130 /* Align extended struct on largest member */
131 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
132 offset += sizeof(uint32_t); /* id */
133 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
134 offset += sizeof(uint64_t); /* timestamp */
135 }
136 break;
137 case 2: /* large */
138 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
139 offset += padding;
140 offset += sizeof(uint16_t);
141 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
142 offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
143 offset += sizeof(uint32_t); /* timestamp */
144 } else {
145 /* Align extended struct on largest member */
146 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
147 offset += sizeof(uint32_t); /* id */
148 offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
149 offset += sizeof(uint64_t); /* timestamp */
150 }
151 break;
152 default:
153 padding = 0;
154 WARN_ON_ONCE(1);
155 }
156 offset += ctx_get_size(offset, event->ctx);
157 offset += ctx_get_size(offset, lttng_chan->ctx);
158
159 *pre_header_padding = padding;
160 return offset - orig_offset;
161 }
162
163 #include "../libringbuffer/api.h"
164
165 static
166 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
167 struct lttng_ust_lib_ring_buffer_ctx *ctx,
168 uint32_t event_id);
169
170 /*
171 * lttng_write_event_header
172 *
173 * Writes the event header to the offset (already aligned on 32-bits).
174 *
175 * @config: ring buffer instance configuration
176 * @ctx: reservation context
177 * @event_id: event ID
178 */
179 static __inline__
180 void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
181 struct lttng_ust_lib_ring_buffer_ctx *ctx,
182 uint32_t event_id)
183 {
184 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
185 struct lttng_event *event = ctx->priv;
186
187 if (caa_unlikely(ctx->rflags))
188 goto slow_path;
189
190 switch (lttng_chan->header_type) {
191 case 1: /* compact */
192 {
193 uint32_t id_time = 0;
194
195 bt_bitfield_write(&id_time, uint32_t,
196 0,
197 LTTNG_COMPACT_EVENT_BITS,
198 event_id);
199 bt_bitfield_write(&id_time, uint32_t,
200 LTTNG_COMPACT_EVENT_BITS,
201 LTTNG_COMPACT_TSC_BITS,
202 ctx->tsc);
203 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
204 break;
205 }
206 case 2: /* large */
207 {
208 uint32_t timestamp = (uint32_t) ctx->tsc;
209 uint16_t id = event_id;
210
211 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
212 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
213 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
214 break;
215 }
216 default:
217 WARN_ON_ONCE(1);
218 }
219
220 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
221 ctx_record(ctx, lttng_chan, event->ctx);
222 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
223
224 return;
225
226 slow_path:
227 lttng_write_event_header_slow(config, ctx, event_id);
228 }
229
230 static
231 void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
232 struct lttng_ust_lib_ring_buffer_ctx *ctx,
233 uint32_t event_id)
234 {
235 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
236 struct lttng_event *event = ctx->priv;
237
238 switch (lttng_chan->header_type) {
239 case 1: /* compact */
240 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
241 uint32_t id_time = 0;
242
243 bt_bitfield_write(&id_time, uint32_t,
244 0,
245 LTTNG_COMPACT_EVENT_BITS,
246 event_id);
247 bt_bitfield_write(&id_time, uint32_t,
248 LTTNG_COMPACT_EVENT_BITS,
249 LTTNG_COMPACT_TSC_BITS,
250 ctx->tsc);
251 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
252 } else {
253 uint8_t id = 0;
254 uint64_t timestamp = ctx->tsc;
255
256 bt_bitfield_write(&id, uint8_t,
257 0,
258 LTTNG_COMPACT_EVENT_BITS,
259 31);
260 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
261 /* Align extended struct on largest member */
262 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
263 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
264 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
265 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
266 }
267 break;
268 case 2: /* large */
269 {
270 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
271 uint32_t timestamp = (uint32_t) ctx->tsc;
272 uint16_t id = event_id;
273
274 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
275 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
276 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
277 } else {
278 uint16_t id = 65535;
279 uint64_t timestamp = ctx->tsc;
280
281 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
282 /* Align extended struct on largest member */
283 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
284 lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
285 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
286 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
287 }
288 break;
289 }
290 default:
291 WARN_ON_ONCE(1);
292 }
293 ctx_record(ctx, lttng_chan, lttng_chan->ctx);
294 ctx_record(ctx, lttng_chan, event->ctx);
295 lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
296 }
297
298 static const struct lttng_ust_lib_ring_buffer_config client_config;
299
300 static uint64_t client_ring_buffer_clock_read(struct channel *chan)
301 {
302 return lib_ring_buffer_clock_read(chan);
303 }
304
305 static
306 size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
307 struct channel *chan, size_t offset,
308 size_t *pre_header_padding,
309 struct lttng_ust_lib_ring_buffer_ctx *ctx)
310 {
311 return record_header_size(config, chan, offset,
312 pre_header_padding, ctx);
313 }
314
315 /**
316 * client_packet_header_size - called on buffer-switch to a new sub-buffer
317 *
318 * Return header size without padding after the structure. Don't use packed
319 * structure because gcc generates inefficient code on some architectures
320 * (powerpc, mips..)
321 */
322 static size_t client_packet_header_size(void)
323 {
324 return offsetof(struct packet_header, ctx.header_end);
325 }
326
327 static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
328 unsigned int subbuf_idx,
329 struct lttng_ust_shm_handle *handle)
330 {
331 struct channel *chan = shmp(handle, buf->backend.chan);
332 struct packet_header *header =
333 (struct packet_header *)
334 lib_ring_buffer_offset_address(&buf->backend,
335 subbuf_idx * chan->backend.subbuf_size,
336 handle);
337 struct lttng_channel *lttng_chan = channel_get_private(chan);
338
339 header->magic = CTF_MAGIC_NUMBER;
340 memcpy(header->uuid, lttng_chan->uuid, sizeof(lttng_chan->uuid));
341 header->stream_id = lttng_chan->id;
342 header->ctx.timestamp_begin = tsc;
343 header->ctx.timestamp_end = 0;
344 header->ctx.content_size = ~0ULL; /* for debugging */
345 header->ctx.packet_size = ~0ULL;
346 header->ctx.events_discarded = 0;
347 header->ctx.cpu_id = buf->backend.cpu;
348 }
349
350 /*
351 * offset is assumed to never be 0 here : never deliver a completely empty
352 * subbuffer. data_size is between 1 and subbuf_size.
353 */
354 static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
355 unsigned int subbuf_idx, unsigned long data_size,
356 struct lttng_ust_shm_handle *handle)
357 {
358 struct channel *chan = shmp(handle, buf->backend.chan);
359 struct packet_header *header =
360 (struct packet_header *)
361 lib_ring_buffer_offset_address(&buf->backend,
362 subbuf_idx * chan->backend.subbuf_size,
363 handle);
364 unsigned long records_lost = 0;
365
366 header->ctx.timestamp_end = tsc;
367 header->ctx.content_size =
368 (uint64_t) data_size * CHAR_BIT; /* in bits */
369 header->ctx.packet_size =
370 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
371
372 records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
373 records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
374 records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
375 header->ctx.events_discarded = records_lost;
376 }
377
378 static int client_buffer_create(struct lttng_ust_lib_ring_buffer *buf, void *priv,
379 int cpu, const char *name, struct lttng_ust_shm_handle *handle)
380 {
381 return 0;
382 }
383
384 static void client_buffer_finalize(struct lttng_ust_lib_ring_buffer *buf, void *priv, int cpu, struct lttng_ust_shm_handle *handle)
385 {
386 }
387
388 static const struct lttng_ust_lib_ring_buffer_config client_config = {
389 .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
390 .cb.record_header_size = client_record_header_size,
391 .cb.subbuffer_header_size = client_packet_header_size,
392 .cb.buffer_begin = client_buffer_begin,
393 .cb.buffer_end = client_buffer_end,
394 .cb.buffer_create = client_buffer_create,
395 .cb.buffer_finalize = client_buffer_finalize,
396
397 .tsc_bits = LTTNG_COMPACT_TSC_BITS,
398 .alloc = RING_BUFFER_ALLOC_PER_CPU,
399 .sync = RING_BUFFER_SYNC_GLOBAL,
400 .mode = RING_BUFFER_MODE_TEMPLATE,
401 .backend = RING_BUFFER_PAGE,
402 .output = RING_BUFFER_MMAP,
403 .oops = RING_BUFFER_OOPS_CONSISTENCY,
404 .ipi = RING_BUFFER_NO_IPI_BARRIER,
405 .wakeup = LTTNG_CLIENT_WAKEUP,
406 .client_type = LTTNG_CLIENT_TYPE,
407 };
408
409 const struct lttng_ust_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_config.cb;
410
411 static
412 struct lttng_channel *_channel_create(const char *name,
413 void *buf_addr,
414 size_t subbuf_size, size_t num_subbuf,
415 unsigned int switch_timer_interval,
416 unsigned int read_timer_interval,
417 unsigned char *uuid,
418 uint32_t chan_id)
419 {
420 struct lttng_channel chan_priv_init;
421 struct lttng_ust_shm_handle *handle;
422 struct lttng_channel *lttng_chan;
423 void *priv;
424
425 memset(&chan_priv_init, 0, sizeof(chan_priv_init));
426 memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
427 chan_priv_init.id = chan_id;
428 handle = channel_create(&client_config, name,
429 &priv, __alignof__(*lttng_chan), sizeof(*lttng_chan),
430 &chan_priv_init,
431 buf_addr, subbuf_size, num_subbuf,
432 switch_timer_interval, read_timer_interval);
433 if (!handle)
434 return NULL;
435 lttng_chan = priv;
436 lttng_chan->handle = handle;
437 lttng_chan->chan = shmp(handle, handle->chan);
438 return lttng_chan;
439 }
440
441 static
442 void lttng_channel_destroy(struct lttng_channel *chan)
443 {
444 channel_destroy(chan->chan, chan->handle, 1);
445 }
446
447 static
448 int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx,
449 uint32_t event_id)
450 {
451 struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
452 int ret, cpu;
453
454 cpu = lib_ring_buffer_get_cpu(&client_config);
455 if (cpu < 0)
456 return -EPERM;
457 ctx->cpu = cpu;
458
459 switch (lttng_chan->header_type) {
460 case 1: /* compact */
461 if (event_id > 30)
462 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
463 break;
464 case 2: /* large */
465 if (event_id > 65534)
466 ctx->rflags |= LTTNG_RFLAG_EXTENDED;
467 break;
468 default:
469 WARN_ON_ONCE(1);
470 }
471
472 ret = lib_ring_buffer_reserve(&client_config, ctx);
473 if (ret)
474 goto put;
475 lttng_write_event_header(&client_config, ctx, event_id);
476 return 0;
477 put:
478 lib_ring_buffer_put_cpu(&client_config);
479 return ret;
480 }
481
482 static
483 void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
484 {
485 lib_ring_buffer_commit(&client_config, ctx);
486 lib_ring_buffer_put_cpu(&client_config);
487 }
488
489 static
490 void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx, const void *src,
491 size_t len)
492 {
493 lib_ring_buffer_write(&client_config, ctx, src, len);
494 }
495
496 #if 0
497 static
498 wait_queue_head_t *lttng_get_reader_wait_queue(struct channel *chan)
499 {
500 return &chan->read_wait;
501 }
502
503 static
504 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
505 {
506 return &chan->hp_wait;
507 }
508 #endif //0
509
510 static
511 int lttng_is_finalized(struct channel *chan)
512 {
513 return lib_ring_buffer_channel_is_finalized(chan);
514 }
515
516 static
517 int lttng_is_disabled(struct channel *chan)
518 {
519 return lib_ring_buffer_channel_is_disabled(chan);
520 }
521
522 static
523 int lttng_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
524 {
525 struct lttng_ust_lib_ring_buffer *buf;
526 int cpu;
527
528 for_each_channel_cpu(cpu, chan) {
529 int shm_fd, wait_fd, wakeup_fd;
530 uint64_t memory_map_size;
531
532 buf = channel_get_ring_buffer(&client_config, chan,
533 cpu, handle, &shm_fd, &wait_fd,
534 &wakeup_fd, &memory_map_size);
535 lib_ring_buffer_switch(&client_config, buf,
536 SWITCH_ACTIVE, handle);
537 }
538 return 0;
539 }
540
541 static struct lttng_transport lttng_relay_transport = {
542 .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
543 .ops = {
544 .channel_create = _channel_create,
545 .channel_destroy = lttng_channel_destroy,
546 .event_reserve = lttng_event_reserve,
547 .event_commit = lttng_event_commit,
548 .event_write = lttng_event_write,
549 .packet_avail_size = NULL, /* Would be racy anyway */
550 //.get_reader_wait_queue = lttng_get_reader_wait_queue,
551 //.get_hp_wait_queue = lttng_get_hp_wait_queue,
552 .is_finalized = lttng_is_finalized,
553 .is_disabled = lttng_is_disabled,
554 .flush_buffer = lttng_flush_buffer,
555 },
556 .client_config = &client_config,
557 };
558
559 void RING_BUFFER_MODE_TEMPLATE_INIT(void)
560 {
561 DBG("LTT : ltt ring buffer client \"%s\" init\n",
562 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
563 lttng_transport_register(&lttng_relay_transport);
564 }
565
566 void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
567 {
568 DBG("LTT : ltt ring buffer client \"%s\" exit\n",
569 "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
570 lttng_transport_unregister(&lttng_relay_transport);
571 }
This page took 0.040551 seconds and 3 git commands to generate.