Migrate tracepoint instrumentation to TP_FIELDS
[lttng-modules.git] / lib / ringbuffer / backend.h
CommitLineData
886d51a3
MD
1#ifndef _LIB_RING_BUFFER_BACKEND_H
2#define _LIB_RING_BUFFER_BACKEND_H
f3bc08c5
MD
3
4/*
886d51a3 5 * lib/ringbuffer/backend.h
f3bc08c5
MD
6 *
7 * Ring buffer backend (API).
8 *
886d51a3
MD
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
24 *
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
27 */
28
29#include <linux/types.h>
30#include <linux/sched.h>
31#include <linux/timer.h>
32#include <linux/wait.h>
33#include <linux/poll.h>
34#include <linux/list.h>
35#include <linux/fs.h>
36#include <linux/mm.h>
7b8ea3a5 37#include <linux/uaccess.h>
f3bc08c5
MD
38
39/* Internal helpers */
40#include "../../wrapper/ringbuffer/backend_internal.h"
41#include "../../wrapper/ringbuffer/frontend_internal.h"
42
43/* Ring buffer backend API */
44
45/* Ring buffer backend access (read/write) */
46
47extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
48 size_t offset, void *dest, size_t len);
49
50extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
51 size_t offset, void __user *dest,
52 size_t len);
53
54extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
55 size_t offset, void *dest, size_t len);
56
57extern struct page **
58lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb, size_t offset,
59 void ***virt);
60
61/*
62 * Return the address where a given offset is located.
63 * Should be used to get the current subbuffer header pointer. Given we know
64 * it's never on a page boundary, it's safe to write directly to this address,
65 * as long as the write is never bigger than a page size.
66 */
67extern void *
68lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
69 size_t offset);
70extern void *
71lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
72 size_t offset);
73
74/**
75 * lib_ring_buffer_write - write data to a buffer backend
76 * @config : ring buffer instance configuration
77 * @ctx: ring buffer context. (input arguments only)
78 * @src : source pointer to copy from
79 * @len : length of data to copy
80 *
81 * This function copies "len" bytes of data from a source pointer to a buffer
82 * backend, at the current context offset. This is more or less a buffer
83 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
84 * if copy is crossing a page boundary.
85 */
86static inline
87void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
88 struct lib_ring_buffer_ctx *ctx,
89 const void *src, size_t len)
90{
91 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
92 struct channel_backend *chanb = &ctx->chan->backend;
bfe529f9 93 size_t sbidx, index, pagecpy;
f3bc08c5 94 size_t offset = ctx->buf_offset;
f3bc08c5
MD
95 struct lib_ring_buffer_backend_pages *rpages;
96 unsigned long sb_bindex, id;
97
61eb4c39
MD
98 if (unlikely(!len))
99 return;
f3bc08c5
MD
100 offset &= chanb->buf_size - 1;
101 sbidx = offset >> chanb->subbuf_size_order;
102 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
103 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
104 id = bufb->buf_wsb[sbidx].id;
105 sb_bindex = subbuffer_id_get_index(config, id);
106 rpages = bufb->array[sb_bindex];
107 CHAN_WARN_ON(ctx->chan,
108 config->mode == RING_BUFFER_OVERWRITE
109 && subbuffer_id_is_noref(config, id));
110 if (likely(pagecpy == len))
111 lib_ring_buffer_do_copy(config,
112 rpages->p[index].virt
113 + (offset & ~PAGE_MASK),
114 src, len);
115 else
116 _lib_ring_buffer_write(bufb, offset, src, len, 0);
117 ctx->buf_offset += len;
118}
119
4ea00e4f
JD
120/**
121 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
122 * @config : ring buffer instance configuration
123 * @bufb : ring buffer backend
124 * @offset : offset within the buffer
125 * @c : the byte to copy
126 * @len : number of bytes to copy
127 *
128 * This function writes "len" bytes of "c" to a buffer backend, at a specific
129 * offset. This is more or less a buffer backend-specific memset() operation.
130 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
131 * boundary.
132 */
133static inline
134void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
135 struct lib_ring_buffer_ctx *ctx, int c, size_t len)
136{
137
138 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
139 struct channel_backend *chanb = &ctx->chan->backend;
bfe529f9 140 size_t sbidx, index, pagecpy;
4ea00e4f 141 size_t offset = ctx->buf_offset;
4ea00e4f
JD
142 struct lib_ring_buffer_backend_pages *rpages;
143 unsigned long sb_bindex, id;
144
61eb4c39
MD
145 if (unlikely(!len))
146 return;
4ea00e4f
JD
147 offset &= chanb->buf_size - 1;
148 sbidx = offset >> chanb->subbuf_size_order;
149 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
150 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
151 id = bufb->buf_wsb[sbidx].id;
152 sb_bindex = subbuffer_id_get_index(config, id);
153 rpages = bufb->array[sb_bindex];
154 CHAN_WARN_ON(ctx->chan,
155 config->mode == RING_BUFFER_OVERWRITE
156 && subbuffer_id_is_noref(config, id));
157 if (likely(pagecpy == len))
158 lib_ring_buffer_do_memset(rpages->p[index].virt
159 + (offset & ~PAGE_MASK),
160 c, len);
161 else
162 _lib_ring_buffer_memset(bufb, offset, c, len, 0);
163 ctx->buf_offset += len;
164}
165
16f78f3a
MD
166/*
167 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
168 * terminating character is found in @src. Returns the number of bytes
169 * copied. Does *not* terminate @dest with NULL terminating character.
170 */
171static inline
172size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
173 char *dest, const char *src, size_t len)
174{
175 size_t count;
176
177 for (count = 0; count < len; count++) {
178 char c;
179
180 /*
181 * Only read source character once, in case it is
182 * modified concurrently.
183 */
184 c = ACCESS_ONCE(src[count]);
185 if (!c)
186 break;
187 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
188 }
189 return count;
190}
191
192/*
193 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
194 * terminating character is found in @src, or when a fault occurs.
195 * Returns the number of bytes copied. Does *not* terminate @dest with
196 * NULL terminating character.
197 *
198 * This function deals with userspace pointers, it should never be called
199 * directly without having the src pointer checked with access_ok()
200 * previously.
201 */
202static inline
203size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
204 char *dest, const char __user *src, size_t len)
205{
206 size_t count;
207
208 for (count = 0; count < len; count++) {
209 int ret;
210 char c;
211
f127e61e 212 ret = __copy_from_user_inatomic(&c, src + count, 1);
16f78f3a
MD
213 if (ret || !c)
214 break;
215 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
216 }
217 return count;
218}
219
220/**
221 * lib_ring_buffer_strcpy - write string data to a buffer backend
222 * @config : ring buffer instance configuration
223 * @ctx: ring buffer context. (input arguments only)
224 * @src : source pointer to copy from
225 * @len : length of data to copy
226 * @pad : character to use for padding
227 *
228 * This function copies @len - 1 bytes of string data from a source
229 * pointer to a buffer backend, followed by a terminating '\0'
230 * character, at the current context offset. This is more or less a
231 * buffer backend-specific strncpy() operation. If a terminating '\0'
232 * character is found in @src before @len - 1 characters are copied, pad
233 * the buffer with @pad characters (e.g. '#'). Calls the slow path
234 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
235 */
236static inline
237void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
238 struct lib_ring_buffer_ctx *ctx,
239 const char *src, size_t len, int pad)
240{
241 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
242 struct channel_backend *chanb = &ctx->chan->backend;
243 size_t sbidx, index, pagecpy;
244 size_t offset = ctx->buf_offset;
245 struct lib_ring_buffer_backend_pages *rpages;
246 unsigned long sb_bindex, id;
247
248 if (unlikely(!len))
249 return;
250 offset &= chanb->buf_size - 1;
251 sbidx = offset >> chanb->subbuf_size_order;
252 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
253 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
254 id = bufb->buf_wsb[sbidx].id;
255 sb_bindex = subbuffer_id_get_index(config, id);
256 rpages = bufb->array[sb_bindex];
257 CHAN_WARN_ON(ctx->chan,
258 config->mode == RING_BUFFER_OVERWRITE
259 && subbuffer_id_is_noref(config, id));
260 if (likely(pagecpy == len)) {
261 size_t count;
262
263 count = lib_ring_buffer_do_strcpy(config,
264 rpages->p[index].virt
265 + (offset & ~PAGE_MASK),
266 src, len - 1);
267 offset += count;
268 /* Padding */
269 if (unlikely(count < len - 1)) {
270 size_t pad_len = len - 1 - count;
271
272 lib_ring_buffer_do_memset(rpages->p[index].virt
273 + (offset & ~PAGE_MASK),
274 pad, pad_len);
275 offset += pad_len;
276 }
277 /* Ending '\0' */
278 lib_ring_buffer_do_memset(rpages->p[index].virt
279 + (offset & ~PAGE_MASK),
280 '\0', 1);
281 } else {
282 _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
283 }
284 ctx->buf_offset += len;
285}
286
4ea00e4f 287/**
7b8ea3a5 288 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
4ea00e4f
JD
289 * @config : ring buffer instance configuration
290 * @ctx: ring buffer context. (input arguments only)
291 * @src : userspace source pointer to copy from
292 * @len : length of data to copy
293 *
294 * This function copies "len" bytes of data from a userspace pointer to a
295 * buffer backend, at the current context offset. This is more or less a buffer
296 * backend-specific memcpy() operation. Calls the slow path
7b8ea3a5
MD
297 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
298 * Disable the page fault handler to ensure we never try to take the mmap_sem.
4ea00e4f
JD
299 */
300static inline
7b8ea3a5 301void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
4ea00e4f
JD
302 struct lib_ring_buffer_ctx *ctx,
303 const void __user *src, size_t len)
304{
305 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
306 struct channel_backend *chanb = &ctx->chan->backend;
bfe529f9 307 size_t sbidx, index, pagecpy;
4ea00e4f 308 size_t offset = ctx->buf_offset;
4ea00e4f
JD
309 struct lib_ring_buffer_backend_pages *rpages;
310 unsigned long sb_bindex, id;
311 unsigned long ret;
7b8ea3a5 312 mm_segment_t old_fs = get_fs();
4ea00e4f 313
61eb4c39
MD
314 if (unlikely(!len))
315 return;
4ea00e4f
JD
316 offset &= chanb->buf_size - 1;
317 sbidx = offset >> chanb->subbuf_size_order;
318 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
319 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
320 id = bufb->buf_wsb[sbidx].id;
321 sb_bindex = subbuffer_id_get_index(config, id);
322 rpages = bufb->array[sb_bindex];
323 CHAN_WARN_ON(ctx->chan,
324 config->mode == RING_BUFFER_OVERWRITE
325 && subbuffer_id_is_noref(config, id));
326
7b8ea3a5
MD
327 set_fs(KERNEL_DS);
328 pagefault_disable();
4ea00e4f
JD
329 if (unlikely(!access_ok(VERIFY_READ, src, len)))
330 goto fill_buffer;
331
332 if (likely(pagecpy == len)) {
7b8ea3a5 333 ret = lib_ring_buffer_do_copy_from_user_inatomic(
4ea00e4f
JD
334 rpages->p[index].virt + (offset & ~PAGE_MASK),
335 src, len);
336 if (unlikely(ret > 0)) {
337 len -= (pagecpy - ret);
338 offset += (pagecpy - ret);
339 goto fill_buffer;
340 }
341 } else {
7b8ea3a5 342 _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
4ea00e4f 343 }
7b8ea3a5
MD
344 pagefault_enable();
345 set_fs(old_fs);
4ea00e4f
JD
346 ctx->buf_offset += len;
347
348 return;
349
350fill_buffer:
7b8ea3a5
MD
351 pagefault_enable();
352 set_fs(old_fs);
4ea00e4f
JD
353 /*
354 * In the error path we call the slow path version to avoid
355 * the pollution of static inline code.
356 */
357 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
358}
359
16f78f3a
MD
360/**
361 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
362 * @config : ring buffer instance configuration
363 * @ctx: ring buffer context (input arguments only)
364 * @src : userspace source pointer to copy from
365 * @len : length of data to copy
366 * @pad : character to use for padding
367 *
368 * This function copies @len - 1 bytes of string data from a userspace
369 * source pointer to a buffer backend, followed by a terminating '\0'
370 * character, at the current context offset. This is more or less a
371 * buffer backend-specific strncpy() operation. If a terminating '\0'
372 * character is found in @src before @len - 1 characters are copied, pad
373 * the buffer with @pad characters (e.g. '#'). Calls the slow path
374 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
375 * boundary. Disable the page fault handler to ensure we never try to
376 * take the mmap_sem.
377 */
378static inline
379void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
380 struct lib_ring_buffer_ctx *ctx,
381 const void __user *src, size_t len, int pad)
382{
383 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
384 struct channel_backend *chanb = &ctx->chan->backend;
385 size_t sbidx, index, pagecpy;
386 size_t offset = ctx->buf_offset;
387 struct lib_ring_buffer_backend_pages *rpages;
388 unsigned long sb_bindex, id;
389 mm_segment_t old_fs = get_fs();
390
391 if (unlikely(!len))
392 return;
393 offset &= chanb->buf_size - 1;
394 sbidx = offset >> chanb->subbuf_size_order;
395 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
396 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
397 id = bufb->buf_wsb[sbidx].id;
398 sb_bindex = subbuffer_id_get_index(config, id);
399 rpages = bufb->array[sb_bindex];
400 CHAN_WARN_ON(ctx->chan,
401 config->mode == RING_BUFFER_OVERWRITE
402 && subbuffer_id_is_noref(config, id));
403
404 set_fs(KERNEL_DS);
405 pagefault_disable();
406 if (unlikely(!access_ok(VERIFY_READ, src, len)))
407 goto fill_buffer;
408
409 if (likely(pagecpy == len)) {
410 size_t count;
411
412 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
413 rpages->p[index].virt
414 + (offset & ~PAGE_MASK),
415 src, len - 1);
416 offset += count;
417 /* Padding */
418 if (unlikely(count < len - 1)) {
419 size_t pad_len = len - 1 - count;
420
421 lib_ring_buffer_do_memset(rpages->p[index].virt
422 + (offset & ~PAGE_MASK),
423 pad, pad_len);
424 offset += pad_len;
425 }
426 /* Ending '\0' */
427 lib_ring_buffer_do_memset(rpages->p[index].virt
428 + (offset & ~PAGE_MASK),
429 '\0', 1);
430 } else {
431 _lib_ring_buffer_strcpy_from_user_inatomic(bufb, offset, src,
432 len, 0, pad);
433 }
434 pagefault_enable();
435 set_fs(old_fs);
436 ctx->buf_offset += len;
437
438 return;
439
440fill_buffer:
441 pagefault_enable();
442 set_fs(old_fs);
443 /*
444 * In the error path we call the slow path version to avoid
445 * the pollution of static inline code.
446 */
447 _lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
448 offset += len - 1;
449 _lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
450}
451
f3bc08c5
MD
452/*
453 * This accessor counts the number of unread records in a buffer.
454 * It only provides a consistent value if no reads not writes are performed
455 * concurrently.
456 */
457static inline
458unsigned long lib_ring_buffer_get_records_unread(
459 const struct lib_ring_buffer_config *config,
460 struct lib_ring_buffer *buf)
461{
462 struct lib_ring_buffer_backend *bufb = &buf->backend;
463 struct lib_ring_buffer_backend_pages *pages;
464 unsigned long records_unread = 0, sb_bindex, id;
465 unsigned int i;
466
467 for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
468 id = bufb->buf_wsb[i].id;
469 sb_bindex = subbuffer_id_get_index(config, id);
470 pages = bufb->array[sb_bindex];
471 records_unread += v_read(config, &pages->records_unread);
472 }
473 if (config->mode == RING_BUFFER_OVERWRITE) {
474 id = bufb->buf_rsb.id;
475 sb_bindex = subbuffer_id_get_index(config, id);
476 pages = bufb->array[sb_bindex];
477 records_unread += v_read(config, &pages->records_unread);
478 }
479 return records_unread;
480}
481
886d51a3 482#endif /* _LIB_RING_BUFFER_BACKEND_H */
This page took 0.048782 seconds and 4 git commands to generate.