Commit | Line | Data |
---|---|---|
7514523f | 1 | /* |
a90917c3 | 2 | * lttng-ring-buffer-client.h |
7514523f MD |
3 | * |
4 | * Copyright (C) 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * | |
3d084699 | 6 | * LTTng lib ring buffer client template. |
7514523f MD |
7 | * |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | ||
11 | #include <linux/module.h> | |
c0e31d2e | 12 | #include <linux/types.h> |
9115fbdc | 13 | #include "lib/bitfield.h" |
b13f3ebe | 14 | #include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ |
f3bc08c5 | 15 | #include "wrapper/trace-clock.h" |
a90917c3 MD |
16 | #include "lttng-events.h" |
17 | #include "lttng-tracer.h" | |
9115fbdc | 18 | #include "wrapper/ringbuffer/frontend_types.h" |
7514523f | 19 | |
d793d5e1 MD |
20 | /* |
21 | * Keep the natural field alignment for _each field_ within this structure if | |
22 | * you ever add/remove a field from this header. Packed attribute is not used | |
23 | * because gcc generates poor code on at least powerpc and mips. Don't ever | |
24 | * let gcc add padding between the structure elements. | |
fcf74578 MD |
25 | * |
26 | * The guarantee we have with timestamps is that all the events in a | |
27 | * packet are included (inclusive) within the begin/end timestamps of | |
28 | * the packet. Another guarantee we have is that the "timestamp begin", | |
29 | * as well as the event timestamps, are monotonically increasing (never | |
30 | * decrease) when moving forward in a stream (physically). But this | |
31 | * guarantee does not apply to "timestamp end", because it is sampled at | |
32 | * commit time, which is not ordered with respect to space reservation. | |
d793d5e1 | 33 | */ |
9115fbdc | 34 | |
d793d5e1 | 35 | struct packet_header { |
9115fbdc | 36 | /* Trace packet header */ |
d793d5e1 MD |
37 | uint32_t magic; /* |
38 | * Trace magic number. | |
39 | * contains endianness information. | |
40 | */ | |
1ec3f75a | 41 | uint8_t uuid[16]; |
d793d5e1 | 42 | uint32_t stream_id; |
9115fbdc MD |
43 | |
44 | struct { | |
45 | /* Stream packet context */ | |
46 | uint64_t timestamp_begin; /* Cycle count at subbuffer start */ | |
47 | uint64_t timestamp_end; /* Cycle count at subbuffer end */ | |
48 | uint32_t events_discarded; /* | |
49 | * Events lost in this subbuffer since | |
50 | * the beginning of the trace. | |
51 | * (may overflow) | |
52 | */ | |
53 | uint32_t content_size; /* Size of data in subbuffer */ | |
54 | uint32_t packet_size; /* Subbuffer size (include padding) */ | |
55 | uint32_t cpu_id; /* CPU id associated with stream */ | |
56 | uint8_t header_end; /* End of header */ | |
57 | } ctx; | |
d793d5e1 MD |
58 | }; |
59 | ||
60 | ||
881833e3 MD |
61 | static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan) |
62 | { | |
63 | return trace_clock_read64(); | |
64 | } | |
65 | ||
f1676205 MD |
66 | static inline |
67 | size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx) | |
68 | { | |
69 | int i; | |
70 | size_t orig_offset = offset; | |
71 | ||
72 | if (likely(!ctx)) | |
73 | return 0; | |
74 | for (i = 0; i < ctx->nr_fields; i++) | |
75 | offset += ctx->fields[i].get_size(offset); | |
76 | return offset - orig_offset; | |
77 | } | |
78 | ||
79 | static inline | |
80 | void ctx_record(struct lib_ring_buffer_ctx *bufctx, | |
a90917c3 | 81 | struct lttng_channel *chan, |
f1676205 MD |
82 | struct lttng_ctx *ctx) |
83 | { | |
84 | int i; | |
85 | ||
86 | if (likely(!ctx)) | |
87 | return; | |
88 | for (i = 0; i < ctx->nr_fields; i++) | |
89 | ctx->fields[i].record(&ctx->fields[i], bufctx, chan); | |
90 | } | |
91 | ||
881833e3 MD |
92 | /* |
93 | * record_header_size - Calculate the header size and padding necessary. | |
94 | * @config: ring buffer instance configuration | |
95 | * @chan: channel | |
96 | * @offset: offset in the write buffer | |
881833e3 | 97 | * @pre_header_padding: padding to add before the header (output) |
881833e3 MD |
98 | * @ctx: reservation context |
99 | * | |
100 | * Returns the event header size (including padding). | |
101 | * | |
881833e3 MD |
102 | * The payload must itself determine its own alignment from the biggest type it |
103 | * contains. | |
104 | */ | |
105 | static __inline__ | |
106 | unsigned char record_header_size(const struct lib_ring_buffer_config *config, | |
107 | struct channel *chan, size_t offset, | |
64c796d8 | 108 | size_t *pre_header_padding, |
881833e3 MD |
109 | struct lib_ring_buffer_ctx *ctx) |
110 | { | |
a90917c3 MD |
111 | struct lttng_channel *lttng_chan = channel_get_private(chan); |
112 | struct lttng_event *event = ctx->priv; | |
881833e3 MD |
113 | size_t orig_offset = offset; |
114 | size_t padding; | |
115 | ||
a90917c3 | 116 | switch (lttng_chan->header_type) { |
9115fbdc | 117 | case 1: /* compact */ |
a90917c3 | 118 | padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); |
9115fbdc | 119 | offset += padding; |
a90917c3 | 120 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { |
9115fbdc MD |
121 | offset += sizeof(uint32_t); /* id and timestamp */ |
122 | } else { | |
123 | /* Minimum space taken by 5-bit id */ | |
124 | offset += sizeof(uint8_t); | |
125 | /* Align extended struct on largest member */ | |
a90917c3 | 126 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
9115fbdc | 127 | offset += sizeof(uint32_t); /* id */ |
a90917c3 | 128 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
9115fbdc MD |
129 | offset += sizeof(uint64_t); /* timestamp */ |
130 | } | |
131 | break; | |
132 | case 2: /* large */ | |
a90917c3 | 133 | padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t)); |
9115fbdc MD |
134 | offset += padding; |
135 | offset += sizeof(uint16_t); | |
a90917c3 MD |
136 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { |
137 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); | |
9115fbdc MD |
138 | offset += sizeof(uint32_t); /* timestamp */ |
139 | } else { | |
140 | /* Align extended struct on largest member */ | |
a90917c3 | 141 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
9115fbdc | 142 | offset += sizeof(uint32_t); /* id */ |
a90917c3 | 143 | offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t)); |
9115fbdc | 144 | offset += sizeof(uint64_t); /* timestamp */ |
881833e3 | 145 | } |
9115fbdc MD |
146 | break; |
147 | default: | |
1b2e041f | 148 | padding = 0; |
64c796d8 | 149 | WARN_ON_ONCE(1); |
881833e3 | 150 | } |
f1676205 | 151 | offset += ctx_get_size(offset, event->ctx); |
a90917c3 | 152 | offset += ctx_get_size(offset, lttng_chan->ctx); |
881833e3 MD |
153 | |
154 | *pre_header_padding = padding; | |
155 | return offset - orig_offset; | |
156 | } | |
157 | ||
158 | #include "wrapper/ringbuffer/api.h" | |
159 | ||
eb9a7857 | 160 | static |
a90917c3 | 161 | void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, |
881833e3 | 162 | struct lib_ring_buffer_ctx *ctx, |
64c796d8 | 163 | uint32_t event_id); |
881833e3 MD |
164 | |
165 | /* | |
a90917c3 | 166 | * lttng_write_event_header |
881833e3 MD |
167 | * |
168 | * Writes the event header to the offset (already aligned on 32-bits). | |
169 | * | |
170 | * @config: ring buffer instance configuration | |
171 | * @ctx: reservation context | |
4e1f08f4 | 172 | * @event_id: event ID |
881833e3 MD |
173 | */ |
174 | static __inline__ | |
a90917c3 | 175 | void lttng_write_event_header(const struct lib_ring_buffer_config *config, |
881833e3 | 176 | struct lib_ring_buffer_ctx *ctx, |
64c796d8 | 177 | uint32_t event_id) |
881833e3 | 178 | { |
a90917c3 MD |
179 | struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); |
180 | struct lttng_event *event = ctx->priv; | |
881833e3 MD |
181 | |
182 | if (unlikely(ctx->rflags)) | |
183 | goto slow_path; | |
184 | ||
a90917c3 | 185 | switch (lttng_chan->header_type) { |
9115fbdc MD |
186 | case 1: /* compact */ |
187 | { | |
188 | uint32_t id_time = 0; | |
189 | ||
4e1f08f4 | 190 | bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); |
9115fbdc MD |
191 | bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); |
192 | lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); | |
193 | break; | |
194 | } | |
195 | case 2: /* large */ | |
196 | { | |
9115fbdc | 197 | uint32_t timestamp = (uint32_t) ctx->tsc; |
7e855749 | 198 | uint16_t id = event_id; |
9115fbdc | 199 | |
7e855749 | 200 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); |
a90917c3 | 201 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t)); |
9115fbdc MD |
202 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
203 | break; | |
204 | } | |
205 | default: | |
64c796d8 | 206 | WARN_ON_ONCE(1); |
9115fbdc | 207 | } |
f1676205 | 208 | |
a90917c3 MD |
209 | ctx_record(ctx, lttng_chan, lttng_chan->ctx); |
210 | ctx_record(ctx, lttng_chan, event->ctx); | |
c595c36f | 211 | lib_ring_buffer_align_ctx(ctx, ctx->largest_align); |
f1676205 | 212 | |
9115fbdc | 213 | return; |
881833e3 MD |
214 | |
215 | slow_path: | |
a90917c3 | 216 | lttng_write_event_header_slow(config, ctx, event_id); |
881833e3 MD |
217 | } |
218 | ||
eb9a7857 | 219 | static |
a90917c3 | 220 | void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, |
64c796d8 MD |
221 | struct lib_ring_buffer_ctx *ctx, |
222 | uint32_t event_id) | |
881833e3 | 223 | { |
a90917c3 MD |
224 | struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); |
225 | struct lttng_event *event = ctx->priv; | |
9115fbdc | 226 | |
a90917c3 | 227 | switch (lttng_chan->header_type) { |
9115fbdc | 228 | case 1: /* compact */ |
a90917c3 | 229 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { |
9115fbdc MD |
230 | uint32_t id_time = 0; |
231 | ||
4e1f08f4 | 232 | bt_bitfield_write(&id_time, uint32_t, 0, 5, event_id); |
9115fbdc MD |
233 | bt_bitfield_write(&id_time, uint32_t, 5, 27, ctx->tsc); |
234 | lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); | |
235 | } else { | |
236 | uint8_t id = 0; | |
9115fbdc MD |
237 | uint64_t timestamp = ctx->tsc; |
238 | ||
239 | bt_bitfield_write(&id, uint8_t, 0, 5, 31); | |
240 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); | |
241 | /* Align extended struct on largest member */ | |
a90917c3 | 242 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
9115fbdc | 243 | lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); |
a90917c3 | 244 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
9115fbdc MD |
245 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
246 | } | |
881833e3 | 247 | break; |
9115fbdc MD |
248 | case 2: /* large */ |
249 | { | |
a90917c3 | 250 | if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { |
9115fbdc | 251 | uint32_t timestamp = (uint32_t) ctx->tsc; |
7e855749 | 252 | uint16_t id = event_id; |
9115fbdc | 253 | |
7e855749 | 254 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); |
a90917c3 | 255 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t)); |
9115fbdc MD |
256 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
257 | } else { | |
64c796d8 | 258 | uint16_t id = 65535; |
9115fbdc MD |
259 | uint64_t timestamp = ctx->tsc; |
260 | ||
64c796d8 | 261 | lib_ring_buffer_write(config, ctx, &id, sizeof(id)); |
9115fbdc | 262 | /* Align extended struct on largest member */ |
a90917c3 | 263 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
64c796d8 | 264 | lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id)); |
a90917c3 | 265 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t)); |
9115fbdc MD |
266 | lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); |
267 | } | |
881833e3 | 268 | break; |
881833e3 | 269 | } |
9115fbdc | 270 | default: |
64c796d8 | 271 | WARN_ON_ONCE(1); |
881833e3 | 272 | } |
a90917c3 MD |
273 | ctx_record(ctx, lttng_chan, lttng_chan->ctx); |
274 | ctx_record(ctx, lttng_chan, event->ctx); | |
c595c36f | 275 | lib_ring_buffer_align_ctx(ctx, ctx->largest_align); |
881833e3 MD |
276 | } |
277 | ||
7514523f MD |
278 | static const struct lib_ring_buffer_config client_config; |
279 | ||
280 | static u64 client_ring_buffer_clock_read(struct channel *chan) | |
281 | { | |
282 | return lib_ring_buffer_clock_read(chan); | |
283 | } | |
284 | ||
1e2015dc | 285 | static |
7514523f MD |
286 | size_t client_record_header_size(const struct lib_ring_buffer_config *config, |
287 | struct channel *chan, size_t offset, | |
7514523f | 288 | size_t *pre_header_padding, |
7514523f MD |
289 | struct lib_ring_buffer_ctx *ctx) |
290 | { | |
64c796d8 MD |
291 | return record_header_size(config, chan, offset, |
292 | pre_header_padding, ctx); | |
7514523f MD |
293 | } |
294 | ||
295 | /** | |
1c25284c | 296 | * client_packet_header_size - called on buffer-switch to a new sub-buffer |
7514523f MD |
297 | * |
298 | * Return header size without padding after the structure. Don't use packed | |
299 | * structure because gcc generates inefficient code on some architectures | |
300 | * (powerpc, mips..) | |
301 | */ | |
1c25284c | 302 | static size_t client_packet_header_size(void) |
7514523f | 303 | { |
9115fbdc | 304 | return offsetof(struct packet_header, ctx.header_end); |
7514523f MD |
305 | } |
306 | ||
307 | static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc, | |
308 | unsigned int subbuf_idx) | |
309 | { | |
310 | struct channel *chan = buf->backend.chan; | |
1c25284c MD |
311 | struct packet_header *header = |
312 | (struct packet_header *) | |
7514523f MD |
313 | lib_ring_buffer_offset_address(&buf->backend, |
314 | subbuf_idx * chan->backend.subbuf_size); | |
a90917c3 MD |
315 | struct lttng_channel *lttng_chan = channel_get_private(chan); |
316 | struct lttng_session *session = lttng_chan->session; | |
7514523f | 317 | |
d793d5e1 | 318 | header->magic = CTF_MAGIC_NUMBER; |
1ec3f75a | 319 | memcpy(header->uuid, session->uuid.b, sizeof(session->uuid)); |
a90917c3 | 320 | header->stream_id = lttng_chan->id; |
9115fbdc MD |
321 | header->ctx.timestamp_begin = tsc; |
322 | header->ctx.timestamp_end = 0; | |
323 | header->ctx.events_discarded = 0; | |
324 | header->ctx.content_size = 0xFFFFFFFF; /* for debugging */ | |
325 | header->ctx.packet_size = 0xFFFFFFFF; | |
326 | header->ctx.cpu_id = buf->backend.cpu; | |
7514523f MD |
327 | } |
328 | ||
329 | /* | |
330 | * offset is assumed to never be 0 here : never deliver a completely empty | |
331 | * subbuffer. data_size is between 1 and subbuf_size. | |
332 | */ | |
333 | static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc, | |
334 | unsigned int subbuf_idx, unsigned long data_size) | |
335 | { | |
336 | struct channel *chan = buf->backend.chan; | |
1c25284c MD |
337 | struct packet_header *header = |
338 | (struct packet_header *) | |
7514523f MD |
339 | lib_ring_buffer_offset_address(&buf->backend, |
340 | subbuf_idx * chan->backend.subbuf_size); | |
341 | unsigned long records_lost = 0; | |
342 | ||
9115fbdc | 343 | header->ctx.timestamp_end = tsc; |
05d32c64 MD |
344 | header->ctx.content_size = data_size * CHAR_BIT; /* in bits */ |
345 | header->ctx.packet_size = PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */ | |
7514523f MD |
346 | records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf); |
347 | records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf); | |
348 | records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf); | |
9115fbdc | 349 | header->ctx.events_discarded = records_lost; |
7514523f MD |
350 | } |
351 | ||
352 | static int client_buffer_create(struct lib_ring_buffer *buf, void *priv, | |
353 | int cpu, const char *name) | |
354 | { | |
1c25284c | 355 | return 0; |
7514523f MD |
356 | } |
357 | ||
358 | static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu) | |
359 | { | |
7514523f MD |
360 | } |
361 | ||
362 | static const struct lib_ring_buffer_config client_config = { | |
363 | .cb.ring_buffer_clock_read = client_ring_buffer_clock_read, | |
364 | .cb.record_header_size = client_record_header_size, | |
1c25284c | 365 | .cb.subbuffer_header_size = client_packet_header_size, |
7514523f MD |
366 | .cb.buffer_begin = client_buffer_begin, |
367 | .cb.buffer_end = client_buffer_end, | |
368 | .cb.buffer_create = client_buffer_create, | |
369 | .cb.buffer_finalize = client_buffer_finalize, | |
370 | ||
371 | .tsc_bits = 32, | |
372 | .alloc = RING_BUFFER_ALLOC_PER_CPU, | |
373 | .sync = RING_BUFFER_SYNC_PER_CPU, | |
3d084699 | 374 | .mode = RING_BUFFER_MODE_TEMPLATE, |
7514523f | 375 | .backend = RING_BUFFER_PAGE, |
2db1399a | 376 | .output = RING_BUFFER_OUTPUT_TEMPLATE, |
7514523f MD |
377 | .oops = RING_BUFFER_OOPS_CONSISTENCY, |
378 | .ipi = RING_BUFFER_IPI_BARRIER, | |
379 | .wakeup = RING_BUFFER_WAKEUP_BY_TIMER, | |
380 | }; | |
381 | ||
1e2015dc | 382 | static |
1c25284c | 383 | struct channel *_channel_create(const char *name, |
a90917c3 | 384 | struct lttng_channel *lttng_chan, void *buf_addr, |
1c25284c MD |
385 | size_t subbuf_size, size_t num_subbuf, |
386 | unsigned int switch_timer_interval, | |
387 | unsigned int read_timer_interval) | |
7514523f | 388 | { |
a90917c3 | 389 | return channel_create(&client_config, name, lttng_chan, buf_addr, |
7514523f MD |
390 | subbuf_size, num_subbuf, switch_timer_interval, |
391 | read_timer_interval); | |
7514523f MD |
392 | } |
393 | ||
1e2015dc | 394 | static |
a90917c3 | 395 | void lttng_channel_destroy(struct channel *chan) |
7514523f | 396 | { |
7514523f | 397 | channel_destroy(chan); |
7514523f MD |
398 | } |
399 | ||
ad1c05e1 | 400 | static |
a90917c3 | 401 | struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan) |
ad1c05e1 MD |
402 | { |
403 | struct lib_ring_buffer *buf; | |
404 | int cpu; | |
405 | ||
1c25284c MD |
406 | for_each_channel_cpu(cpu, chan) { |
407 | buf = channel_get_ring_buffer(&client_config, chan, cpu); | |
ad1c05e1 MD |
408 | if (!lib_ring_buffer_open_read(buf)) |
409 | return buf; | |
410 | } | |
411 | return NULL; | |
412 | } | |
413 | ||
f71ecafa | 414 | static |
a90917c3 | 415 | int lttng_buffer_has_read_closed_stream(struct channel *chan) |
f71ecafa MD |
416 | { |
417 | struct lib_ring_buffer *buf; | |
418 | int cpu; | |
419 | ||
420 | for_each_channel_cpu(cpu, chan) { | |
421 | buf = channel_get_ring_buffer(&client_config, chan, cpu); | |
422 | if (!atomic_long_read(&buf->active_readers)) | |
423 | return 1; | |
424 | } | |
425 | return 0; | |
426 | } | |
427 | ||
ad1c05e1 | 428 | static |
a90917c3 | 429 | void lttng_buffer_read_close(struct lib_ring_buffer *buf) |
ad1c05e1 MD |
430 | { |
431 | lib_ring_buffer_release_read(buf); | |
1c25284c MD |
432 | } |
433 | ||
c099397a | 434 | static |
a90917c3 | 435 | int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, |
64c796d8 | 436 | uint32_t event_id) |
1c25284c | 437 | { |
a90917c3 | 438 | struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); |
1c25284c MD |
439 | int ret, cpu; |
440 | ||
441 | cpu = lib_ring_buffer_get_cpu(&client_config); | |
442 | if (cpu < 0) | |
443 | return -EPERM; | |
444 | ctx->cpu = cpu; | |
445 | ||
a90917c3 | 446 | switch (lttng_chan->header_type) { |
64c796d8 MD |
447 | case 1: /* compact */ |
448 | if (event_id > 30) | |
a90917c3 | 449 | ctx->rflags |= LTTNG_RFLAG_EXTENDED; |
64c796d8 MD |
450 | break; |
451 | case 2: /* large */ | |
452 | if (event_id > 65534) | |
a90917c3 | 453 | ctx->rflags |= LTTNG_RFLAG_EXTENDED; |
64c796d8 MD |
454 | break; |
455 | default: | |
456 | WARN_ON_ONCE(1); | |
457 | } | |
458 | ||
1c25284c MD |
459 | ret = lib_ring_buffer_reserve(&client_config, ctx); |
460 | if (ret) | |
461 | goto put; | |
a90917c3 | 462 | lttng_write_event_header(&client_config, ctx, event_id); |
4e1f08f4 | 463 | return 0; |
1c25284c MD |
464 | put: |
465 | lib_ring_buffer_put_cpu(&client_config); | |
466 | return ret; | |
ad1c05e1 MD |
467 | } |
468 | ||
c099397a | 469 | static |
a90917c3 | 470 | void lttng_event_commit(struct lib_ring_buffer_ctx *ctx) |
1c25284c MD |
471 | { |
472 | lib_ring_buffer_commit(&client_config, ctx); | |
473 | lib_ring_buffer_put_cpu(&client_config); | |
474 | } | |
475 | ||
c099397a | 476 | static |
a90917c3 | 477 | void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src, |
e763dbf5 MD |
478 | size_t len) |
479 | { | |
480 | lib_ring_buffer_write(&client_config, ctx, src, len); | |
481 | } | |
1c25284c | 482 | |
4ea00e4f | 483 | static |
a90917c3 | 484 | void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx, |
4ea00e4f JD |
485 | const void __user *src, size_t len) |
486 | { | |
487 | lib_ring_buffer_copy_from_user(&client_config, ctx, src, len); | |
488 | } | |
489 | ||
58aa5d24 | 490 | static |
a90917c3 | 491 | void lttng_event_memset(struct lib_ring_buffer_ctx *ctx, |
58aa5d24 MD |
492 | int c, size_t len) |
493 | { | |
494 | lib_ring_buffer_memset(&client_config, ctx, c, len); | |
495 | } | |
496 | ||
c099397a | 497 | static |
a90917c3 | 498 | wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu) |
c099397a | 499 | { |
71c1d843 MD |
500 | struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config, |
501 | chan, cpu); | |
502 | return &buf->write_wait; | |
24cedcfe MD |
503 | } |
504 | ||
505 | static | |
a90917c3 | 506 | wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan) |
24cedcfe MD |
507 | { |
508 | return &chan->hp_wait; | |
509 | } | |
510 | ||
511 | static | |
a90917c3 | 512 | int lttng_is_finalized(struct channel *chan) |
24cedcfe MD |
513 | { |
514 | return lib_ring_buffer_channel_is_finalized(chan); | |
c099397a MD |
515 | } |
516 | ||
254ec7bc | 517 | static |
a90917c3 | 518 | int lttng_is_disabled(struct channel *chan) |
254ec7bc MD |
519 | { |
520 | return lib_ring_buffer_channel_is_disabled(chan); | |
521 | } | |
522 | ||
a90917c3 | 523 | static struct lttng_transport lttng_relay_transport = { |
3d084699 | 524 | .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING, |
7514523f MD |
525 | .owner = THIS_MODULE, |
526 | .ops = { | |
1c25284c | 527 | .channel_create = _channel_create, |
a90917c3 MD |
528 | .channel_destroy = lttng_channel_destroy, |
529 | .buffer_read_open = lttng_buffer_read_open, | |
f71ecafa | 530 | .buffer_has_read_closed_stream = |
a90917c3 MD |
531 | lttng_buffer_has_read_closed_stream, |
532 | .buffer_read_close = lttng_buffer_read_close, | |
533 | .event_reserve = lttng_event_reserve, | |
534 | .event_commit = lttng_event_commit, | |
535 | .event_write = lttng_event_write, | |
536 | .event_write_from_user = lttng_event_write_from_user, | |
537 | .event_memset = lttng_event_memset, | |
1ec3f75a | 538 | .packet_avail_size = NULL, /* Would be racy anyway */ |
a90917c3 MD |
539 | .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue, |
540 | .get_hp_wait_queue = lttng_get_hp_wait_queue, | |
541 | .is_finalized = lttng_is_finalized, | |
542 | .is_disabled = lttng_is_disabled, | |
7514523f MD |
543 | }, |
544 | }; | |
545 | ||
a90917c3 | 546 | static int __init lttng_ring_buffer_client_init(void) |
7514523f | 547 | { |
a509e133 MD |
548 | /* |
549 | * This vmalloc sync all also takes care of the lib ring buffer | |
550 | * vmalloc'd module pages when it is built as a module into LTTng. | |
551 | */ | |
6d2a620c | 552 | wrapper_vmalloc_sync_all(); |
a90917c3 | 553 | lttng_transport_register(<tng_relay_transport); |
7514523f MD |
554 | return 0; |
555 | } | |
556 | ||
a90917c3 | 557 | module_init(lttng_ring_buffer_client_init); |
1c25284c | 558 | |
a90917c3 | 559 | static void __exit lttng_ring_buffer_client_exit(void) |
7514523f | 560 | { |
a90917c3 | 561 | lttng_transport_unregister(<tng_relay_transport); |
7514523f MD |
562 | } |
563 | ||
a90917c3 | 564 | module_exit(lttng_ring_buffer_client_exit); |
1c25284c | 565 | |
7514523f MD |
566 | MODULE_LICENSE("GPL and additional rights"); |
567 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
3d084699 MD |
568 | MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING |
569 | " client"); |