Fix: wrapper random documentation
[lttng-modules.git] / lib / ringbuffer / backend.h
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * lib/ringbuffer/backend.h
4 *
5 * Ring buffer backend (API).
6 *
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
10 * the reader in flight recorder mode.
11 */
12
13 #ifndef _LIB_RING_BUFFER_BACKEND_H
14 #define _LIB_RING_BUFFER_BACKEND_H
15
16 #include <linux/types.h>
17 #include <linux/sched.h>
18 #include <linux/timer.h>
19 #include <linux/wait.h>
20 #include <linux/poll.h>
21 #include <linux/list.h>
22 #include <linux/fs.h>
23 #include <linux/mm.h>
24 #include <wrapper/uaccess.h>
25
26 /* Internal helpers */
27 #include <wrapper/ringbuffer/backend_internal.h>
28 #include <wrapper/ringbuffer/frontend_internal.h>
29
30 /* Ring buffer backend API */
31
32 /* Ring buffer backend access (read/write) */
33
34 extern size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb,
35 size_t offset, void *dest, size_t len);
36
37 extern int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
38 size_t offset, void __user *dest,
39 size_t len);
40
41 extern int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb,
42 size_t offset, void *dest, size_t len);
43
44 extern unsigned long *
45 lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb, size_t offset,
46 void ***virt);
47
48 /*
49 * Return the address where a given offset is located.
50 * Should be used to get the current subbuffer header pointer. Given we know
51 * it's never on a page boundary, it's safe to write directly to this address,
52 * as long as the write is never bigger than a page size.
53 */
54 extern void *
55 lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
56 size_t offset);
57 extern void *
58 lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
59 size_t offset);
60
61 /**
62 * lib_ring_buffer_write - write data to a buffer backend
63 * @config : ring buffer instance configuration
64 * @ctx: ring buffer context. (input arguments only)
65 * @src : source pointer to copy from
66 * @len : length of data to copy
67 *
68 * This function copies "len" bytes of data from a source pointer to a buffer
69 * backend, at the current context offset. This is more or less a buffer
70 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
71 * if copy is crossing a page boundary.
72 */
73 static inline __attribute__((always_inline))
74 void lib_ring_buffer_write(const struct lib_ring_buffer_config *config,
75 struct lib_ring_buffer_ctx *ctx,
76 const void *src, size_t len)
77 {
78 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
79 struct channel_backend *chanb = &ctx->chan->backend;
80 size_t index, pagecpy;
81 size_t offset = ctx->buf_offset;
82 struct lib_ring_buffer_backend_pages *backend_pages;
83
84 if (unlikely(!len))
85 return;
86 backend_pages =
87 lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
88 offset &= chanb->buf_size - 1;
89 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
90 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
91 if (likely(pagecpy == len))
92 lib_ring_buffer_do_copy(config,
93 backend_pages->p[index].virt
94 + (offset & ~PAGE_MASK),
95 src, len);
96 else
97 _lib_ring_buffer_write(bufb, offset, src, len, 0);
98 ctx->buf_offset += len;
99 }
100
101 /**
102 * lib_ring_buffer_memset - write len bytes of c to a buffer backend
103 * @config : ring buffer instance configuration
104 * @bufb : ring buffer backend
105 * @offset : offset within the buffer
106 * @c : the byte to copy
107 * @len : number of bytes to copy
108 *
109 * This function writes "len" bytes of "c" to a buffer backend, at a specific
110 * offset. This is more or less a buffer backend-specific memset() operation.
111 * Calls the slow path (_ring_buffer_memset) if write is crossing a page
112 * boundary.
113 */
114 static inline
115 void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config,
116 struct lib_ring_buffer_ctx *ctx, int c, size_t len)
117 {
118
119 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
120 struct channel_backend *chanb = &ctx->chan->backend;
121 size_t index, pagecpy;
122 size_t offset = ctx->buf_offset;
123 struct lib_ring_buffer_backend_pages *backend_pages;
124
125 if (unlikely(!len))
126 return;
127 backend_pages =
128 lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
129 offset &= chanb->buf_size - 1;
130 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
131 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
132 if (likely(pagecpy == len))
133 lib_ring_buffer_do_memset(backend_pages->p[index].virt
134 + (offset & ~PAGE_MASK),
135 c, len);
136 else
137 _lib_ring_buffer_memset(bufb, offset, c, len, 0);
138 ctx->buf_offset += len;
139 }
140
141 /*
142 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
143 * terminating character is found in @src. Returns the number of bytes
144 * copied. Does *not* terminate @dest with NULL terminating character.
145 */
146 static inline __attribute__((always_inline))
147 size_t lib_ring_buffer_do_strcpy(const struct lib_ring_buffer_config *config,
148 char *dest, const char *src, size_t len)
149 {
150 size_t count;
151
152 for (count = 0; count < len; count++) {
153 char c;
154
155 /*
156 * Only read source character once, in case it is
157 * modified concurrently.
158 */
159 c = READ_ONCE(src[count]);
160 if (!c)
161 break;
162 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
163 }
164 return count;
165 }
166
167 /*
168 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
169 * terminating character is found in @src, or when a fault occurs.
170 * Returns the number of bytes copied. Does *not* terminate @dest with
171 * NULL terminating character.
172 *
173 * This function deals with userspace pointers, it should never be called
174 * directly without having the src pointer checked with access_ok()
175 * previously.
176 */
177 static inline __attribute__((always_inline))
178 size_t lib_ring_buffer_do_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
179 char *dest, const char __user *src, size_t len)
180 {
181 size_t count;
182
183 for (count = 0; count < len; count++) {
184 int ret;
185 char c;
186
187 ret = __copy_from_user_inatomic(&c, src + count, 1);
188 if (ret || !c)
189 break;
190 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
191 }
192 return count;
193 }
194
195 /**
196 * lib_ring_buffer_strcpy - write string data to a buffer backend
197 * @config : ring buffer instance configuration
198 * @ctx: ring buffer context. (input arguments only)
199 * @src : source pointer to copy from
200 * @len : length of data to copy
201 * @pad : character to use for padding
202 *
203 * This function copies @len - 1 bytes of string data from a source
204 * pointer to a buffer backend, followed by a terminating '\0'
205 * character, at the current context offset. This is more or less a
206 * buffer backend-specific strncpy() operation. If a terminating '\0'
207 * character is found in @src before @len - 1 characters are copied, pad
208 * the buffer with @pad characters (e.g. '#'). Calls the slow path
209 * (_ring_buffer_strcpy) if copy is crossing a page boundary.
210 */
211 static inline
212 void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config,
213 struct lib_ring_buffer_ctx *ctx,
214 const char *src, size_t len, int pad)
215 {
216 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
217 struct channel_backend *chanb = &ctx->chan->backend;
218 size_t index, pagecpy;
219 size_t offset = ctx->buf_offset;
220 struct lib_ring_buffer_backend_pages *backend_pages;
221
222 if (unlikely(!len))
223 return;
224 backend_pages =
225 lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
226 offset &= chanb->buf_size - 1;
227 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
228 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
229 if (likely(pagecpy == len)) {
230 size_t count;
231
232 count = lib_ring_buffer_do_strcpy(config,
233 backend_pages->p[index].virt
234 + (offset & ~PAGE_MASK),
235 src, len - 1);
236 offset += count;
237 /* Padding */
238 if (unlikely(count < len - 1)) {
239 size_t pad_len = len - 1 - count;
240
241 lib_ring_buffer_do_memset(backend_pages->p[index].virt
242 + (offset & ~PAGE_MASK),
243 pad, pad_len);
244 offset += pad_len;
245 }
246 /* Ending '\0' */
247 lib_ring_buffer_do_memset(backend_pages->p[index].virt
248 + (offset & ~PAGE_MASK),
249 '\0', 1);
250 } else {
251 _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad);
252 }
253 ctx->buf_offset += len;
254 }
255
256 /**
257 * lib_ring_buffer_copy_from_user_inatomic - write userspace data to a buffer backend
258 * @config : ring buffer instance configuration
259 * @ctx: ring buffer context. (input arguments only)
260 * @src : userspace source pointer to copy from
261 * @len : length of data to copy
262 *
263 * This function copies "len" bytes of data from a userspace pointer to a
264 * buffer backend, at the current context offset. This is more or less a buffer
265 * backend-specific memcpy() operation. Calls the slow path
266 * (_ring_buffer_write_from_user_inatomic) if copy is crossing a page boundary.
267 * Disable the page fault handler to ensure we never try to take the mmap_sem.
268 */
269 static inline __attribute__((always_inline))
270 void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config *config,
271 struct lib_ring_buffer_ctx *ctx,
272 const void __user *src, size_t len)
273 {
274 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
275 struct channel_backend *chanb = &ctx->chan->backend;
276 size_t index, pagecpy;
277 size_t offset = ctx->buf_offset;
278 struct lib_ring_buffer_backend_pages *backend_pages;
279 unsigned long ret;
280 mm_segment_t old_fs = get_fs();
281
282 if (unlikely(!len))
283 return;
284 backend_pages =
285 lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
286 offset &= chanb->buf_size - 1;
287 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
288 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
289
290 set_fs(KERNEL_DS);
291 pagefault_disable();
292 if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
293 goto fill_buffer;
294
295 if (likely(pagecpy == len)) {
296 ret = lib_ring_buffer_do_copy_from_user_inatomic(
297 backend_pages->p[index].virt + (offset & ~PAGE_MASK),
298 src, len);
299 if (unlikely(ret > 0)) {
300 /* Copy failed. */
301 goto fill_buffer;
302 }
303 } else {
304 _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0);
305 }
306 pagefault_enable();
307 set_fs(old_fs);
308 ctx->buf_offset += len;
309
310 return;
311
312 fill_buffer:
313 pagefault_enable();
314 set_fs(old_fs);
315 /*
316 * In the error path we call the slow path version to avoid
317 * the pollution of static inline code.
318 */
319 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
320 }
321
322 /**
323 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a buffer backend
324 * @config : ring buffer instance configuration
325 * @ctx: ring buffer context (input arguments only)
326 * @src : userspace source pointer to copy from
327 * @len : length of data to copy
328 * @pad : character to use for padding
329 *
330 * This function copies @len - 1 bytes of string data from a userspace
331 * source pointer to a buffer backend, followed by a terminating '\0'
332 * character, at the current context offset. This is more or less a
333 * buffer backend-specific strncpy() operation. If a terminating '\0'
334 * character is found in @src before @len - 1 characters are copied, pad
335 * the buffer with @pad characters (e.g. '#'). Calls the slow path
336 * (_ring_buffer_strcpy_from_user_inatomic) if copy is crossing a page
337 * boundary. Disable the page fault handler to ensure we never try to
338 * take the mmap_sem.
339 */
340 static inline
341 void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_config *config,
342 struct lib_ring_buffer_ctx *ctx,
343 const void __user *src, size_t len, int pad)
344 {
345 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
346 struct channel_backend *chanb = &ctx->chan->backend;
347 size_t index, pagecpy;
348 size_t offset = ctx->buf_offset;
349 struct lib_ring_buffer_backend_pages *backend_pages;
350 mm_segment_t old_fs = get_fs();
351
352 if (unlikely(!len))
353 return;
354 backend_pages =
355 lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
356 offset &= chanb->buf_size - 1;
357 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
358 pagecpy = min_t(size_t, len, (-offset) & ~PAGE_MASK);
359
360 set_fs(KERNEL_DS);
361 pagefault_disable();
362 if (unlikely(!lttng_access_ok(VERIFY_READ, src, len)))
363 goto fill_buffer;
364
365 if (likely(pagecpy == len)) {
366 size_t count;
367
368 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
369 backend_pages->p[index].virt
370 + (offset & ~PAGE_MASK),
371 src, len - 1);
372 offset += count;
373 /* Padding */
374 if (unlikely(count < len - 1)) {
375 size_t pad_len = len - 1 - count;
376
377 lib_ring_buffer_do_memset(backend_pages->p[index].virt
378 + (offset & ~PAGE_MASK),
379 pad, pad_len);
380 offset += pad_len;
381 }
382 /* Ending '\0' */
383 lib_ring_buffer_do_memset(backend_pages->p[index].virt
384 + (offset & ~PAGE_MASK),
385 '\0', 1);
386 } else {
387 _lib_ring_buffer_strcpy_from_user_inatomic(bufb, offset, src,
388 len, 0, pad);
389 }
390 pagefault_enable();
391 set_fs(old_fs);
392 ctx->buf_offset += len;
393
394 return;
395
396 fill_buffer:
397 pagefault_enable();
398 set_fs(old_fs);
399 /*
400 * In the error path we call the slow path version to avoid
401 * the pollution of static inline code.
402 */
403 _lib_ring_buffer_memset(bufb, offset, pad, len - 1, 0);
404 offset += len - 1;
405 _lib_ring_buffer_memset(bufb, offset, '\0', 1, 0);
406 }
407
408 /*
409 * This accessor counts the number of unread records in a buffer.
410 * It only provides a consistent value if no reads not writes are performed
411 * concurrently.
412 */
413 static inline
414 unsigned long lib_ring_buffer_get_records_unread(
415 const struct lib_ring_buffer_config *config,
416 struct lib_ring_buffer *buf)
417 {
418 struct lib_ring_buffer_backend *bufb = &buf->backend;
419 struct lib_ring_buffer_backend_pages *pages;
420 unsigned long records_unread = 0, sb_bindex, id;
421 unsigned int i;
422
423 for (i = 0; i < bufb->chan->backend.num_subbuf; i++) {
424 id = bufb->buf_wsb[i].id;
425 sb_bindex = subbuffer_id_get_index(config, id);
426 pages = bufb->array[sb_bindex];
427 records_unread += v_read(config, &pages->records_unread);
428 }
429 if (config->mode == RING_BUFFER_OVERWRITE) {
430 id = bufb->buf_rsb.id;
431 sb_bindex = subbuffer_id_get_index(config, id);
432 pages = bufb->array[sb_bindex];
433 records_unread += v_read(config, &pages->records_unread);
434 }
435 return records_unread;
436 }
437
438 /*
439 * We use __copy_from_user_inatomic to copy userspace data after
440 * checking with access_ok() and disabling page faults.
441 *
442 * Return 0 if OK, nonzero on error.
443 */
444 static inline
445 unsigned long lib_ring_buffer_copy_from_user_check_nofault(void *dest,
446 const void __user *src,
447 unsigned long len)
448 {
449 unsigned long ret;
450 mm_segment_t old_fs;
451
452 if (!lttng_access_ok(VERIFY_READ, src, len))
453 return 1;
454 old_fs = get_fs();
455 set_fs(KERNEL_DS);
456 pagefault_disable();
457 ret = __copy_from_user_inatomic(dest, src, len);
458 pagefault_enable();
459 set_fs(old_fs);
460 return ret;
461 }
462
463 #endif /* _LIB_RING_BUFFER_BACKEND_H */
This page took 0.038672 seconds and 4 git commands to generate.