Commit | Line | Data |
---|---|---|
3d1fc7fd MD |
1 | /* |
2 | * ltt-ring-buffer-client.h | |
3 | * | |
4 | * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
6 | * LTTng lib ring buffer client template. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | ||
9f3fdbc6 | 11 | #include <stdint.h> |
1ea172a1 | 12 | #include <ust/lttng-events.h> |
9f3fdbc6 MD |
13 | #include "ust/bitfield.h" |
14 | #include "ust/clock.h" | |
3d1fc7fd | 15 | #include "ltt-tracer.h" |
9f3fdbc6 | 16 | #include "../libringbuffer/frontend_types.h" |
3d1fc7fd MD |
17 | |
18 | /* | |
19 | * Keep the natural field alignment for _each field_ within this structure if | |
20 | * you ever add/remove a field from this header. Packed attribute is not used | |
21 | * because gcc generates poor code on at least powerpc and mips. Don't ever | |
22 | * let gcc add padding between the structure elements. | |
23 | */ | |
24 | ||
25 | struct packet_header { | |
26 | /* Trace packet header */ | |
27 | uint32_t magic; /* | |
28 | * Trace magic number. | |
29 | * contains endianness information. | |
30 | */ | |
31 | uint8_t uuid[16]; | |
32 | uint32_t stream_id; | |
33 | ||
34 | struct { | |
35 | /* Stream packet context */ | |
36 | uint64_t timestamp_begin; /* Cycle count at subbuffer start */ | |
37 | uint64_t timestamp_end; /* Cycle count at subbuffer end */ | |
38 | uint32_t events_discarded; /* | |
39 | * Events lost in this subbuffer since | |
40 | * the beginning of the trace. | |
41 | * (may overflow) | |
42 | */ | |
43 | uint32_t content_size; /* Size of data in subbuffer */ | |
44 | uint32_t packet_size; /* Subbuffer size (include padding) */ | |
45 | uint32_t cpu_id; /* CPU id associated with stream */ | |
46 | uint8_t header_end; /* End of header */ | |
47 | } ctx; | |
48 | }; | |
49 | ||
50 | ||
51 | static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) | |
52 | { | |
53 | return trace_clock_read64(); | |
54 | } | |
55 | ||
56 | static inline | |
57 | size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) | |
58 | { | |
59 | int i; | |
60 | size_t orig_offset = offset; | |
61 | ||
62 | if (likely(!ctx)) | |
63 | return 0; | |
64 | for (i = 0; i < ctx->nr_fields; i++) | |
65 | offset += ctx->fields[i].get_size(offset); | |
66 | return offset - orig_offset; | |
67 | } | |
68 | ||
69 | static inline | |
70 | void ctx_record(struct lib_ring_buffer_ctx *bufctx, | |
71 | struct ltt_channel *chan, | |
72 | struct lttng_ctx *ctx) | |
73 | { | |
74 | int i; | |
75 | ||
76 | if (likely(!ctx)) | |
77 | return; | |
78 | for (i = 0; i < ctx->nr_fields; i++) | |
79 | ctx->fields[i].record(&ctx->fields[i], bufctx, chan); | |
80 | } | |
81 | ||
82 | /* | |
83 | * record_header_size - Calculate the header size and padding necessary. | |
84 | * @config: ring buffer instance configuration | |
85 | * @chan: channel | |
86 | * @offset: offset in the write buffer | |
87 | * @pre_header_padding: padding to add before the header (output) | |
88 | * @ctx: reservation context | |
89 | * | |
90 | * Returns the event header size (including padding). | |
91 | * | |
92 | * The payload must itself determine its own alignment from the biggest type it | |
93 | * contains. | |
94 | */ | |
95 | static __inline__ | |
96 | unsigned char record_header_size(const struct lib_ring_buffer_config *config, | |
97 | struct channel *chan, size_t offset, | |
98 | size_t *pre_header_padding, | |
99 | struct lib_ring_buffer_ctx *ctx) | |
100 | { | |
101 | struct ltt_channel *ltt_chan = channel_get_private(chan); | |
102 | struct ltt_event *event = ctx->priv; | |
103 | size_t orig_offset = offset; | |
104 | size_t padding; | |
105 | ||
106 | switch (ltt_chan->header_type) { | |
107 | case 1: /* compact */ | |
1dbfff0c | 108 | padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); |
3d1fc7fd MD |
109 | offset += padding; |
110 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { | |
111 | offset += sizeof(uint32_t); /* id and timestamp */ | |
112 | } else { | |
113 | /* Minimum space taken by 5-bit id */ | |
114 | offset += sizeof(uint8_t); | |
115 | /* Align extended struct on largest member */ | |
1dbfff0c | 116 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
3d1fc7fd | 117 | offset += sizeof(uint32_t); /* id */ |
1dbfff0c | 118 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
3d1fc7fd MD |
119 | offset += sizeof(uint64_t); /* timestamp */ |
120 | } | |
121 | break; | |
122 | case 2: /* large */ | |
1dbfff0c | 123 | padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t)); |
3d1fc7fd MD |
124 | offset += padding; |
125 | offset += sizeof(uint16_t); | |
126 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { | |
1dbfff0c | 127 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); |
3d1fc7fd MD |
128 | offset += sizeof(uint32_t); /* timestamp */ |
129 | } else { | |
130 | /* Align extended struct on largest member */ | |
1dbfff0c | 131 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
3d1fc7fd | 132 | offset += sizeof(uint32_t); /* id */ |
1dbfff0c | 133 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
3d1fc7fd MD |
134 | offset += sizeof(uint64_t); /* timestamp */ |
135 | } | |
136 | break; | |
137 | default: | |
9f3fdbc6 | 138 | padding = 0; |
3d1fc7fd MD |
139 | WARN_ON_ONCE(1); |
140 | } | |
141 | offset += ctx_get_size(offset, event->ctx); | |
142 | offset += ctx_get_size(offset, ltt_chan->ctx); | |
143 | ||
144 | *pre_header_padding = padding; | |
145 | return offset - orig_offset; | |
146 | } | |
147 | ||
9f3fdbc6 | 148 | #include "../libringbuffer/api.h" |
3d1fc7fd | 149 | |
9f3fdbc6 | 150 | static |
3d1fc7fd MD |
151 | void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, |
152 | struct lib_ring_buffer_ctx *ctx, | |
153 | uint32_t event_id); | |
154 | ||
155 | /* | |
156 | * ltt_write_event_header | |
157 | * | |
158 | * Writes the event header to the offset (already aligned on 32-bits). | |
159 | * | |
160 | * @config: ring buffer instance configuration | |
161 | * @ctx: reservation context | |
162 | * @event_id: event ID | |
163 | */ | |
164 | static __inline__ | |
165 | void ltt_write_event_header(const struct lib_ring_buffer_config *config, | |
166 | struct lib_ring_buffer_ctx *ctx, | |
167 | uint32_t event_id) | |
168 | { | |
169 | struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); | |
170 | struct ltt_event *event = ctx->priv; | |
171 | ||
172 | if (unlikely(ctx->rflags)) | |
173 | goto slow_path; | |
174 | ||
175 | switch (ltt_chan->header_type) { | |
176 | case 1: /* compact */ | |
177 | { | |
178 | uint32_t id_time = 0; | |
179 | ||
180 | bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); | |
181 | bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); | |
182 | lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); | |
183 | break; | |
184 | } | |
185 | case 2: /* large */ | |
186 | { | |
187 | uint32_t timestamp = (uint32_t) ctx->tsc; | |
188 | uint16_t id = event_id; | |
189 | ||
190 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); | |
1dbfff0c | 191 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t)); |
3d1fc7fd MD |
192 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
193 | break; | |
194 | } | |
195 | default: | |
196 | WARN_ON_ONCE(1); | |
197 | } | |
198 | ||
3d1fc7fd | 199 | ctx_record(ctx, ltt_chan, ltt_chan->ctx); |
1ea172a1 | 200 | ctx_record(ctx, ltt_chan, event->ctx); |
3d1fc7fd MD |
201 | |
202 | return; | |
203 | ||
204 | slow_path: | |
205 | ltt_write_event_header_slow(config, ctx, event_id); | |
206 | } | |
207 | ||
9f3fdbc6 | 208 | static |
3d1fc7fd MD |
209 | void ltt_write_event_header_slow(const struct lib_ring_buffer_config *config, |
210 | struct lib_ring_buffer_ctx *ctx, | |
211 | uint32_t event_id) | |
212 | { | |
213 | struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); | |
214 | struct ltt_event *event = ctx->priv; | |
215 | ||
216 | switch (ltt_chan->header_type) { | |
217 | case 1: /* compact */ | |
218 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { | |
219 | uint32_t id_time = 0; | |
220 | ||
221 | bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); | |
222 | bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); | |
223 | lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); | |
224 | } else { | |
225 | uint8_t id = 0; | |
226 | uint64_t timestamp = ctx->tsc; | |
227 | ||
228 | bt_bitfield_write(&id, uint8_t, 0, 5, 31); | |
229 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); | |
230 | /* Align extended struct on largest member */ | |
1dbfff0c | 231 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
3d1fc7fd | 232 | lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); |
1dbfff0c | 233 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
3d1fc7fd MD |
234 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
235 | } | |
236 | break; | |
237 | case 2: /* large */ | |
238 | { | |
239 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTT_RFLAG_EXTENDED))) { | |
240 | uint32_t timestamp = (uint32_t) ctx->tsc; | |
241 | uint16_t id = event_id; | |
242 | ||
243 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); | |
1dbfff0c | 244 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t)); |
3d1fc7fd MD |
245 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
246 | } else { | |
247 | uint16_t id = 65535; | |
248 | uint64_t timestamp = ctx->tsc; | |
249 | ||
250 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); | |
251 | /* Align extended struct on largest member */ | |
1dbfff0c | 252 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
3d1fc7fd | 253 | lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); |
1dbfff0c | 254 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
3d1fc7fd MD |
255 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
256 | } | |
257 | break; | |
258 | } | |
259 | default: | |
260 | WARN_ON_ONCE(1); | |
261 | } | |
3d1fc7fd | 262 | ctx_record(ctx, ltt_chan, ltt_chan->ctx); |
1ea172a1 | 263 | ctx_record(ctx, ltt_chan, event->ctx); |
3d1fc7fd MD |
264 | } |
265 | ||
266 | static const struct lib_ring_buffer_config client_config; | |
267 | ||
268 | static u64 client_ring_buffer_clock_read(struct channel *chan) | |
269 | { | |
270 | return lib_ring_buffer_clock_read(chan); | |
271 | } | |
272 | ||
273 | static | |
274 | size_t client_record_header_size(const struct lib_ring_buffer_config *config, | |
275 | struct channel *chan, size_t offset, | |
276 | size_t *pre_header_padding, | |
277 | struct lib_ring_buffer_ctx *ctx) | |
278 | { | |
279 | return record_header_size(config, chan, offset, | |
280 | pre_header_padding, ctx); | |
281 | } | |
282 | ||
283 | /** | |
284 | * client_packet_header_size - called on buffer-switch to a new sub-buffer | |
285 | * | |
286 | * Return header size without padding after the structure. Don't use packed | |
287 | * structure because gcc generates inefficient code on some architectures | |
288 | * (powerpc, mips..) | |
289 | */ | |
290 | static size_t client_packet_header_size(void) | |
291 | { | |
292 | return offsetof(struct packet_header, ctx.header_end); | |
293 | } | |
294 | ||
295 | static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc, | |
1d498196 MD |
296 | unsigned int subbuf_idx, |
297 | struct shm_handle *handle) | |
3d1fc7fd | 298 | { |
1d498196 | 299 | struct channel *chan = shmp(handle, buf->backend.chan); |
3d1fc7fd MD |
300 | struct packet_header *header = |
301 | (struct packet_header *) | |
302 | lib_ring_buffer_offset_address(&buf->backend, | |
1d498196 MD |
303 | subbuf_idx * chan->backend.subbuf_size, |
304 | handle); | |
3d1fc7fd MD |
305 | struct ltt_channel *ltt_chan = channel_get_private(chan); |
306 | struct ltt_session *session = ltt_chan->session; | |
307 | ||
308 | header->magic = CTF_MAGIC_NUMBER; | |
9f3fdbc6 | 309 | memcpy(header->uuid, session->uuid, sizeof(session->uuid)); |
3d1fc7fd MD |
310 | header->stream_id = ltt_chan->id; |
311 | header->ctx.timestamp_begin = tsc; | |
312 | header->ctx.timestamp_end = 0; | |
313 | header->ctx.events_discarded = 0; | |
314 | header->ctx.content_size = 0xFFFFFFFF; /* for debugging */ | |
315 | header->ctx.packet_size = 0xFFFFFFFF; | |
316 | header->ctx.cpu_id = buf->backend.cpu; | |
317 | } | |
318 | ||
319 | /* | |
320 | * offset is assumed to never be 0 here : never deliver a completely empty | |
321 | * subbuffer. data_size is between 1 and subbuf_size. | |
322 | */ | |
323 | static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc, | |
1d498196 MD |
324 | unsigned int subbuf_idx, unsigned long data_size, |
325 | struct shm_handle *handle) | |
3d1fc7fd | 326 | { |
1d498196 | 327 | struct channel *chan = shmp(handle, buf->backend.chan); |
3d1fc7fd MD |
328 | struct packet_header *header = |
329 | (struct packet_header *) | |
330 | lib_ring_buffer_offset_address(&buf->backend, | |
1d498196 MD |
331 | subbuf_idx * chan->backend.subbuf_size, |
332 | handle); | |
3d1fc7fd MD |
333 | unsigned long records_lost = 0; |
334 | ||
335 | header->ctx.timestamp_end = tsc; | |
336 | header->ctx.content_size = data_size * CHAR_BIT; /* in bits */ | |
337 | header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */ | |
9dcb02ef MD |
338 | /* |
339 | * We do not care about the records lost count, because the metadata | |
340 | * channel waits and retry. | |
341 | */ | |
342 | (void) lib_ring_buffer_get_records_lost_full(&client_config, buf); | |
3d1fc7fd MD |
343 | records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf); |
344 | records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf); | |
345 | header->ctx.events_discarded = records_lost; | |
346 | } | |
347 | ||
348 | static int client_buffer_create(struct lib_ring_buffer *buf, void *priv, | |
1d498196 | 349 | int cpu, const char *name, struct shm_handle *handle) |
3d1fc7fd MD |
350 | { |
351 | return 0; | |
352 | } | |
353 | ||
1d498196 | 354 | static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu, struct shm_handle *handle) |
3d1fc7fd MD |
355 | { |
356 | } | |
357 | ||
358 | static const struct lib_ring_buffer_config client_config = { | |
359 | .cb.ring_buffer_clock_read = client_ring_buffer_clock_read, | |
360 | .cb.record_header_size = client_record_header_size, | |
361 | .cb.subbuffer_header_size = client_packet_header_size, | |
362 | .cb.buffer_begin = client_buffer_begin, | |
363 | .cb.buffer_end = client_buffer_end, | |
364 | .cb.buffer_create = client_buffer_create, | |
365 | .cb.buffer_finalize = client_buffer_finalize, | |
366 | ||
367 | .tsc_bits = 32, | |
368 | .alloc = RING_BUFFER_ALLOC_PER_CPU, | |
5d61a504 | 369 | .sync = RING_BUFFER_SYNC_GLOBAL, |
3d1fc7fd MD |
370 | .mode = RING_BUFFER_MODE_TEMPLATE, |
371 | .backend = RING_BUFFER_PAGE, | |
5d61a504 | 372 | .output = RING_BUFFER_MMAP, |
3d1fc7fd | 373 | .oops = RING_BUFFER_OOPS_CONSISTENCY, |
5d61a504 MD |
374 | .ipi = RING_BUFFER_NO_IPI_BARRIER, |
375 | .wakeup = RING_BUFFER_WAKEUP_BY_WRITER, | |
3d1fc7fd MD |
376 | }; |
377 | ||
378 | static | |
1d498196 | 379 | struct ltt_channel *_channel_create(const char *name, |
3d1fc7fd MD |
380 | struct ltt_channel *ltt_chan, void *buf_addr, |
381 | size_t subbuf_size, size_t num_subbuf, | |
382 | unsigned int switch_timer_interval, | |
8d8a24c8 | 383 | unsigned int read_timer_interval) |
3d1fc7fd | 384 | { |
1d498196 | 385 | ltt_chan->handle = channel_create(&client_config, name, ltt_chan, buf_addr, |
3d1fc7fd | 386 | subbuf_size, num_subbuf, switch_timer_interval, |
8d8a24c8 | 387 | read_timer_interval); |
afdf9825 MD |
388 | if (!ltt_chan->handle) |
389 | return NULL; | |
dc613eb9 | 390 | ltt_chan->chan = shmp(ltt_chan->handle, ltt_chan->handle->chan); |
1d498196 | 391 | return ltt_chan; |
3d1fc7fd MD |
392 | } |
393 | ||
394 | static | |
1d498196 | 395 | void ltt_channel_destroy(struct ltt_channel *ltt_chan) |
3d1fc7fd | 396 | { |
1d498196 | 397 | channel_destroy(ltt_chan->chan, ltt_chan->handle); |
3d1fc7fd MD |
398 | } |
399 | ||
400 | static | |
1d498196 MD |
401 | struct lib_ring_buffer *ltt_buffer_read_open(struct channel *chan, |
402 | struct shm_handle *handle) | |
3d1fc7fd MD |
403 | { |
404 | struct lib_ring_buffer *buf; | |
405 | int cpu; | |
406 | ||
407 | for_each_channel_cpu(cpu, chan) { | |
1d498196 MD |
408 | buf = channel_get_ring_buffer(&client_config, chan, cpu, handle); |
409 | if (!lib_ring_buffer_open_read(buf, handle)) | |
3d1fc7fd MD |
410 | return buf; |
411 | } | |
412 | return NULL; | |
413 | } | |
414 | ||
415 | static | |
1d498196 MD |
416 | void ltt_buffer_read_close(struct lib_ring_buffer *buf, |
417 | struct shm_handle *handle) | |
3d1fc7fd | 418 | { |
1d498196 | 419 | lib_ring_buffer_release_read(buf, handle); |
3d1fc7fd MD |
420 | } |
421 | ||
422 | static | |
423 | int ltt_event_reserve(struct lib_ring_buffer_ctx *ctx, | |
424 | uint32_t event_id) | |
425 | { | |
426 | struct ltt_channel *ltt_chan = channel_get_private(ctx->chan); | |
427 | int ret, cpu; | |
428 | ||
429 | cpu = lib_ring_buffer_get_cpu(&client_config); | |
430 | if (cpu < 0) | |
431 | return -EPERM; | |
432 | ctx->cpu = cpu; | |
433 | ||
434 | switch (ltt_chan->header_type) { | |
435 | case 1: /* compact */ | |
436 | if (event_id > 30) | |
437 | ctx->rflags |= LTT_RFLAG_EXTENDED; | |
438 | break; | |
439 | case 2: /* large */ | |
440 | if (event_id > 65534) | |
441 | ctx->rflags |= LTT_RFLAG_EXTENDED; | |
442 | break; | |
443 | default: | |
444 | WARN_ON_ONCE(1); | |
445 | } | |
446 | ||
447 | ret = lib_ring_buffer_reserve(&client_config, ctx); | |
448 | if (ret) | |
449 | goto put; | |
450 | ltt_write_event_header(&client_config, ctx, event_id); | |
451 | return 0; | |
452 | put: | |
453 | lib_ring_buffer_put_cpu(&client_config); | |
454 | return ret; | |
455 | } | |
456 | ||
457 | static | |
458 | void ltt_event_commit(struct lib_ring_buffer_ctx *ctx) | |
459 | { | |
460 | lib_ring_buffer_commit(&client_config, ctx); | |
461 | lib_ring_buffer_put_cpu(&client_config); | |
462 | } | |
463 | ||
464 | static | |
465 | void ltt_event_write(struct lib_ring_buffer_ctx *ctx, const void *src, | |
466 | size_t len) | |
467 | { | |
468 | lib_ring_buffer_write(&client_config, ctx, src, len); | |
469 | } | |
470 | ||
9f3fdbc6 | 471 | #if 0 |
3d1fc7fd MD |
472 | static |
473 | wait_queue_head_t *ltt_get_reader_wait_queue(struct channel *chan) | |
474 | { | |
475 | return &chan->read_wait; | |
476 | } | |
477 | ||
478 | static | |
479 | wait_queue_head_t *ltt_get_hp_wait_queue(struct channel *chan) | |
480 | { | |
481 | return &chan->hp_wait; | |
482 | } | |
9f3fdbc6 | 483 | #endif //0 |
3d1fc7fd MD |
484 | |
485 | static | |
486 | int ltt_is_finalized(struct channel *chan) | |
487 | { | |
488 | return lib_ring_buffer_channel_is_finalized(chan); | |
489 | } | |
490 | ||
491 | static | |
492 | int ltt_is_disabled(struct channel *chan) | |
493 | { | |
494 | return lib_ring_buffer_channel_is_disabled(chan); | |
495 | } | |
496 | ||
497 | static struct ltt_transport ltt_relay_transport = { | |
818173b9 | 498 | .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap", |
3d1fc7fd MD |
499 | .ops = { |
500 | .channel_create = _channel_create, | |
501 | .channel_destroy = ltt_channel_destroy, | |
502 | .buffer_read_open = ltt_buffer_read_open, | |
503 | .buffer_read_close = ltt_buffer_read_close, | |
504 | .event_reserve = ltt_event_reserve, | |
505 | .event_commit = ltt_event_commit, | |
506 | .event_write = ltt_event_write, | |
507 | .packet_avail_size = NULL, /* Would be racy anyway */ | |
9f3fdbc6 MD |
508 | //.get_reader_wait_queue = ltt_get_reader_wait_queue, |
509 | //.get_hp_wait_queue = ltt_get_hp_wait_queue, | |
3d1fc7fd MD |
510 | .is_finalized = ltt_is_finalized, |
511 | .is_disabled = ltt_is_disabled, | |
512 | }, | |
513 | }; | |
514 | ||
edaa1431 | 515 | void RING_BUFFER_MODE_TEMPLATE_INIT(void) |
3d1fc7fd | 516 | { |
18c1cd87 | 517 | DBG("LTT : ltt ring buffer client init\n"); |
3d1fc7fd | 518 | ltt_transport_register(<t_relay_transport); |
3d1fc7fd MD |
519 | } |
520 | ||
edaa1431 | 521 | void RING_BUFFER_MODE_TEMPLATE_EXIT(void) |
3d1fc7fd | 522 | { |
18c1cd87 | 523 | DBG("LTT : ltt ring buffer client exit\n"); |
3d1fc7fd MD |
524 | ltt_transport_unregister(<t_relay_transport); |
525 | } |