Fix: refuse overwrite mode buffers with less than two subbuffers
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
1 /*
2 * ring_buffer_backend.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
29 #include <linux/mm.h>
30
31 #include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
32 #include "../../wrapper/ringbuffer/config.h"
33 #include "../../wrapper/ringbuffer/backend.h"
34 #include "../../wrapper/ringbuffer/frontend.h"
35
36 /**
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
43 */
44 static
45 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
46 struct lib_ring_buffer_backend *bufb,
47 size_t size, size_t num_subbuf,
48 int extra_reader_sb)
49 {
50 struct channel_backend *chanb = &bufb->chan->backend;
51 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
52 unsigned long subbuf_size, mmap_offset = 0;
53 unsigned long num_subbuf_alloc;
54 struct page **pages;
55 void **virt;
56 unsigned long i;
57
58 num_pages = size >> PAGE_SHIFT;
59 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
60 subbuf_size = chanb->subbuf_size;
61 num_subbuf_alloc = num_subbuf;
62
63 if (extra_reader_sb) {
64 num_pages += num_pages_per_subbuf; /* Add pages for reader */
65 num_subbuf_alloc++;
66 }
67
68 pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
69 1 << INTERNODE_CACHE_SHIFT),
70 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
71 if (unlikely(!pages))
72 goto pages_error;
73
74 virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
75 1 << INTERNODE_CACHE_SHIFT),
76 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
77 if (unlikely(!virt))
78 goto virt_error;
79
80 bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
81 * num_subbuf_alloc,
82 1 << INTERNODE_CACHE_SHIFT),
83 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
84 if (unlikely(!bufb->array))
85 goto array_error;
86
87 for (i = 0; i < num_pages; i++) {
88 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
89 GFP_KERNEL | __GFP_ZERO, 0);
90 if (unlikely(!pages[i]))
91 goto depopulate;
92 virt[i] = page_address(pages[i]);
93 }
94 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
95
96 /* Allocate backend pages array elements */
97 for (i = 0; i < num_subbuf_alloc; i++) {
98 bufb->array[i] =
99 kzalloc_node(ALIGN(
100 sizeof(struct lib_ring_buffer_backend_pages) +
101 sizeof(struct lib_ring_buffer_backend_page)
102 * num_pages_per_subbuf,
103 1 << INTERNODE_CACHE_SHIFT),
104 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
105 if (!bufb->array[i])
106 goto free_array;
107 }
108
109 /* Allocate write-side subbuffer table */
110 bufb->buf_wsb = kzalloc_node(ALIGN(
111 sizeof(struct lib_ring_buffer_backend_subbuffer)
112 * num_subbuf,
113 1 << INTERNODE_CACHE_SHIFT),
114 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
115 if (unlikely(!bufb->buf_wsb))
116 goto free_array;
117
118 for (i = 0; i < num_subbuf; i++)
119 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
120
121 /* Assign read-side subbuffer table */
122 if (extra_reader_sb)
123 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
124 num_subbuf_alloc - 1);
125 else
126 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
127
128 /* Assign pages to page index */
129 for (i = 0; i < num_subbuf_alloc; i++) {
130 for (j = 0; j < num_pages_per_subbuf; j++) {
131 CHAN_WARN_ON(chanb, page_idx > num_pages);
132 bufb->array[i]->p[j].virt = virt[page_idx];
133 bufb->array[i]->p[j].page = pages[page_idx];
134 page_idx++;
135 }
136 if (config->output == RING_BUFFER_MMAP) {
137 bufb->array[i]->mmap_offset = mmap_offset;
138 mmap_offset += subbuf_size;
139 }
140 }
141
142 /*
143 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
144 * will not fault.
145 */
146 wrapper_vmalloc_sync_all();
147 kfree(virt);
148 kfree(pages);
149 return 0;
150
151 free_array:
152 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
153 kfree(bufb->array[i]);
154 depopulate:
155 /* Free all allocated pages */
156 for (i = 0; (i < num_pages && pages[i]); i++)
157 __free_page(pages[i]);
158 kfree(bufb->array);
159 array_error:
160 kfree(virt);
161 virt_error:
162 kfree(pages);
163 pages_error:
164 return -ENOMEM;
165 }
166
167 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
168 struct channel_backend *chanb, int cpu)
169 {
170 const struct lib_ring_buffer_config *config = &chanb->config;
171
172 bufb->chan = container_of(chanb, struct channel, backend);
173 bufb->cpu = cpu;
174
175 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
176 chanb->num_subbuf,
177 chanb->extra_reader_sb);
178 }
179
180 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
181 {
182 struct channel_backend *chanb = &bufb->chan->backend;
183 unsigned long i, j, num_subbuf_alloc;
184
185 num_subbuf_alloc = chanb->num_subbuf;
186 if (chanb->extra_reader_sb)
187 num_subbuf_alloc++;
188
189 kfree(bufb->buf_wsb);
190 for (i = 0; i < num_subbuf_alloc; i++) {
191 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
192 __free_page(bufb->array[i]->p[j].page);
193 kfree(bufb->array[i]);
194 }
195 kfree(bufb->array);
196 bufb->allocated = 0;
197 }
198
199 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
200 {
201 struct channel_backend *chanb = &bufb->chan->backend;
202 const struct lib_ring_buffer_config *config = &chanb->config;
203 unsigned long num_subbuf_alloc;
204 unsigned int i;
205
206 num_subbuf_alloc = chanb->num_subbuf;
207 if (chanb->extra_reader_sb)
208 num_subbuf_alloc++;
209
210 for (i = 0; i < chanb->num_subbuf; i++)
211 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
212 if (chanb->extra_reader_sb)
213 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
214 num_subbuf_alloc - 1);
215 else
216 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
217
218 for (i = 0; i < num_subbuf_alloc; i++) {
219 /* Don't reset mmap_offset */
220 v_set(config, &bufb->array[i]->records_commit, 0);
221 v_set(config, &bufb->array[i]->records_unread, 0);
222 bufb->array[i]->data_size = 0;
223 /* Don't reset backend page and virt addresses */
224 }
225 /* Don't reset num_pages_per_subbuf, cpu, allocated */
226 v_set(config, &bufb->records_read, 0);
227 }
228
229 /*
230 * The frontend is responsible for also calling ring_buffer_backend_reset for
231 * each buffer when calling channel_backend_reset.
232 */
233 void channel_backend_reset(struct channel_backend *chanb)
234 {
235 struct channel *chan = container_of(chanb, struct channel, backend);
236 const struct lib_ring_buffer_config *config = &chanb->config;
237
238 /*
239 * Don't reset buf_size, subbuf_size, subbuf_size_order,
240 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
241 * priv, notifiers, config, cpumask and name.
242 */
243 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
244 }
245
246 #ifdef CONFIG_HOTPLUG_CPU
247 /**
248 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
249 * @nb: notifier block
250 * @action: hotplug action to take
251 * @hcpu: CPU number
252 *
253 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
254 */
255 static
256 int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
257 unsigned long action,
258 void *hcpu)
259 {
260 unsigned int cpu = (unsigned long)hcpu;
261 struct channel_backend *chanb = container_of(nb, struct channel_backend,
262 cpu_hp_notifier);
263 const struct lib_ring_buffer_config *config = &chanb->config;
264 struct lib_ring_buffer *buf;
265 int ret;
266
267 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
268
269 switch (action) {
270 case CPU_UP_PREPARE:
271 case CPU_UP_PREPARE_FROZEN:
272 buf = per_cpu_ptr(chanb->buf, cpu);
273 ret = lib_ring_buffer_create(buf, chanb, cpu);
274 if (ret) {
275 printk(KERN_ERR
276 "ring_buffer_cpu_hp_callback: cpu %d "
277 "buffer creation failed\n", cpu);
278 return NOTIFY_BAD;
279 }
280 break;
281 case CPU_DEAD:
282 case CPU_DEAD_FROZEN:
283 /* No need to do a buffer switch here, because it will happen
284 * when tracing is stopped, or will be done by switch timer CPU
285 * DEAD callback. */
286 break;
287 }
288 return NOTIFY_OK;
289 }
290 #endif
291
292 /**
293 * channel_backend_init - initialize a channel backend
294 * @chanb: channel backend
295 * @name: channel name
296 * @config: client ring buffer configuration
297 * @priv: client private data
298 * @parent: dentry of parent directory, %NULL for root directory
299 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
300 * @num_subbuf: number of sub-buffers (power of 2)
301 *
302 * Returns channel pointer if successful, %NULL otherwise.
303 *
304 * Creates per-cpu channel buffers using the sizes and attributes
305 * specified. The created channel buffer files will be named
306 * name_0...name_N-1. File permissions will be %S_IRUSR.
307 *
308 * Called with CPU hotplug disabled.
309 */
310 int channel_backend_init(struct channel_backend *chanb,
311 const char *name,
312 const struct lib_ring_buffer_config *config,
313 void *priv, size_t subbuf_size, size_t num_subbuf)
314 {
315 struct channel *chan = container_of(chanb, struct channel, backend);
316 unsigned int i;
317 int ret;
318
319 if (!name)
320 return -EPERM;
321
322 /* Check that the subbuffer size is larger than a page. */
323 if (subbuf_size < PAGE_SIZE)
324 return -EINVAL;
325
326 /*
327 * Make sure the number of subbuffers and subbuffer size are
328 * power of 2 and nonzero.
329 */
330 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
331 return -EINVAL;
332 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
333 return -EINVAL;
334 /*
335 * Overwrite mode buffers require at least 2 subbuffers per
336 * buffer.
337 */
338 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
339 return -EINVAL;
340
341 ret = subbuffer_id_check_index(config, num_subbuf);
342 if (ret)
343 return ret;
344
345 chanb->priv = priv;
346 chanb->buf_size = num_subbuf * subbuf_size;
347 chanb->subbuf_size = subbuf_size;
348 chanb->buf_size_order = get_count_order(chanb->buf_size);
349 chanb->subbuf_size_order = get_count_order(subbuf_size);
350 chanb->num_subbuf_order = get_count_order(num_subbuf);
351 chanb->extra_reader_sb =
352 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
353 chanb->num_subbuf = num_subbuf;
354 strlcpy(chanb->name, name, NAME_MAX);
355 memcpy(&chanb->config, config, sizeof(chanb->config));
356
357 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
358 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
359 return -ENOMEM;
360 }
361
362 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
363 /* Allocating the buffer per-cpu structures */
364 chanb->buf = alloc_percpu(struct lib_ring_buffer);
365 if (!chanb->buf)
366 goto free_cpumask;
367
368 /*
369 * In case of non-hotplug cpu, if the ring-buffer is allocated
370 * in early initcall, it will not be notified of secondary cpus.
371 * In that off case, we need to allocate for all possible cpus.
372 */
373 #ifdef CONFIG_HOTPLUG_CPU
374 /*
375 * buf->backend.allocated test takes care of concurrent CPU
376 * hotplug.
377 * Priority higher than frontend, so we create the ring buffer
378 * before we start the timer.
379 */
380 chanb->cpu_hp_notifier.notifier_call =
381 lib_ring_buffer_cpu_hp_callback;
382 chanb->cpu_hp_notifier.priority = 5;
383 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
384
385 get_online_cpus();
386 for_each_online_cpu(i) {
387 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
388 chanb, i);
389 if (ret)
390 goto free_bufs; /* cpu hotplug locked */
391 }
392 put_online_cpus();
393 #else
394 for_each_possible_cpu(i) {
395 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
396 chanb, i);
397 if (ret)
398 goto free_bufs; /* cpu hotplug locked */
399 }
400 #endif
401 } else {
402 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
403 if (!chanb->buf)
404 goto free_cpumask;
405 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
406 if (ret)
407 goto free_bufs;
408 }
409 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
410
411 return 0;
412
413 free_bufs:
414 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
415 for_each_possible_cpu(i) {
416 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
417
418 if (!buf->backend.allocated)
419 continue;
420 lib_ring_buffer_free(buf);
421 }
422 #ifdef CONFIG_HOTPLUG_CPU
423 put_online_cpus();
424 #endif
425 free_percpu(chanb->buf);
426 } else
427 kfree(chanb->buf);
428 free_cpumask:
429 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
430 free_cpumask_var(chanb->cpumask);
431 return -ENOMEM;
432 }
433
434 /**
435 * channel_backend_unregister_notifiers - unregister notifiers
436 * @chan: the channel
437 *
438 * Holds CPU hotplug.
439 */
440 void channel_backend_unregister_notifiers(struct channel_backend *chanb)
441 {
442 const struct lib_ring_buffer_config *config = &chanb->config;
443
444 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
445 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
446 }
447
448 /**
449 * channel_backend_free - destroy the channel
450 * @chan: the channel
451 *
452 * Destroy all channel buffers and frees the channel.
453 */
454 void channel_backend_free(struct channel_backend *chanb)
455 {
456 const struct lib_ring_buffer_config *config = &chanb->config;
457 unsigned int i;
458
459 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
460 for_each_possible_cpu(i) {
461 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
462
463 if (!buf->backend.allocated)
464 continue;
465 lib_ring_buffer_free(buf);
466 }
467 free_cpumask_var(chanb->cpumask);
468 free_percpu(chanb->buf);
469 } else {
470 struct lib_ring_buffer *buf = chanb->buf;
471
472 CHAN_WARN_ON(chanb, !buf->backend.allocated);
473 lib_ring_buffer_free(buf);
474 kfree(buf);
475 }
476 }
477
478 /**
479 * lib_ring_buffer_write - write data to a ring_buffer buffer.
480 * @bufb : buffer backend
481 * @offset : offset within the buffer
482 * @src : source address
483 * @len : length to write
484 * @pagecpy : page size copied so far
485 */
486 void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
487 const void *src, size_t len, size_t pagecpy)
488 {
489 struct channel_backend *chanb = &bufb->chan->backend;
490 const struct lib_ring_buffer_config *config = &chanb->config;
491 size_t sbidx, index;
492 struct lib_ring_buffer_backend_pages *rpages;
493 unsigned long sb_bindex, id;
494
495 do {
496 len -= pagecpy;
497 src += pagecpy;
498 offset += pagecpy;
499 sbidx = offset >> chanb->subbuf_size_order;
500 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
501
502 /*
503 * Underlying layer should never ask for writes across
504 * subbuffers.
505 */
506 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
507
508 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
509 id = bufb->buf_wsb[sbidx].id;
510 sb_bindex = subbuffer_id_get_index(config, id);
511 rpages = bufb->array[sb_bindex];
512 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
513 && subbuffer_id_is_noref(config, id));
514 lib_ring_buffer_do_copy(config,
515 rpages->p[index].virt
516 + (offset & ~PAGE_MASK),
517 src, pagecpy);
518 } while (unlikely(len != pagecpy));
519 }
520 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
521
522
523 /**
524 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
525 * @bufb : buffer backend
526 * @offset : offset within the buffer
527 * @c : the byte to write
528 * @len : length to write
529 * @pagecpy : page size copied so far
530 */
531 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
532 size_t offset,
533 int c, size_t len, size_t pagecpy)
534 {
535 struct channel_backend *chanb = &bufb->chan->backend;
536 const struct lib_ring_buffer_config *config = &chanb->config;
537 size_t sbidx, index;
538 struct lib_ring_buffer_backend_pages *rpages;
539 unsigned long sb_bindex, id;
540
541 do {
542 len -= pagecpy;
543 offset += pagecpy;
544 sbidx = offset >> chanb->subbuf_size_order;
545 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
546
547 /*
548 * Underlying layer should never ask for writes across
549 * subbuffers.
550 */
551 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
552
553 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
554 id = bufb->buf_wsb[sbidx].id;
555 sb_bindex = subbuffer_id_get_index(config, id);
556 rpages = bufb->array[sb_bindex];
557 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
558 && subbuffer_id_is_noref(config, id));
559 lib_ring_buffer_do_memset(rpages->p[index].virt
560 + (offset & ~PAGE_MASK),
561 c, pagecpy);
562 } while (unlikely(len != pagecpy));
563 }
564 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
565
566 /**
567 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
568 * @bufb : buffer backend
569 * @offset : offset within the buffer
570 * @src : source address
571 * @len : length to write
572 * @pagecpy : page size copied so far
573 * @pad : character to use for padding
574 */
575 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
576 size_t offset, const char *src, size_t len,
577 size_t pagecpy, int pad)
578 {
579 struct channel_backend *chanb = &bufb->chan->backend;
580 const struct lib_ring_buffer_config *config = &chanb->config;
581 size_t sbidx, index;
582 struct lib_ring_buffer_backend_pages *rpages;
583 unsigned long sb_bindex, id;
584 int src_terminated = 0;
585
586 CHAN_WARN_ON(chanb, !len);
587 offset += pagecpy;
588 do {
589 len -= pagecpy;
590 if (!src_terminated)
591 src += pagecpy;
592 sbidx = offset >> chanb->subbuf_size_order;
593 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
594
595 /*
596 * Underlying layer should never ask for writes across
597 * subbuffers.
598 */
599 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
600
601 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
602 id = bufb->buf_wsb[sbidx].id;
603 sb_bindex = subbuffer_id_get_index(config, id);
604 rpages = bufb->array[sb_bindex];
605 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
606 && subbuffer_id_is_noref(config, id));
607
608 if (likely(!src_terminated)) {
609 size_t count, to_copy;
610
611 to_copy = pagecpy;
612 if (pagecpy == len)
613 to_copy--; /* Final '\0' */
614 count = lib_ring_buffer_do_strcpy(config,
615 rpages->p[index].virt
616 + (offset & ~PAGE_MASK),
617 src, to_copy);
618 offset += count;
619 /* Padding */
620 if (unlikely(count < to_copy)) {
621 size_t pad_len = to_copy - count;
622
623 /* Next pages will have padding */
624 src_terminated = 1;
625 lib_ring_buffer_do_memset(rpages->p[index].virt
626 + (offset & ~PAGE_MASK),
627 pad, pad_len);
628 offset += pad_len;
629 }
630 } else {
631 size_t pad_len;
632
633 pad_len = pagecpy;
634 if (pagecpy == len)
635 pad_len--; /* Final '\0' */
636 lib_ring_buffer_do_memset(rpages->p[index].virt
637 + (offset & ~PAGE_MASK),
638 pad, pad_len);
639 offset += pad_len;
640 }
641 } while (unlikely(len != pagecpy));
642 /* Ending '\0' */
643 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
644 '\0', 1);
645 }
646 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
647
648 /**
649 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
650 * @bufb : buffer backend
651 * @offset : offset within the buffer
652 * @src : source address
653 * @len : length to write
654 * @pagecpy : page size copied so far
655 *
656 * This function deals with userspace pointers, it should never be called
657 * directly without having the src pointer checked with access_ok()
658 * previously.
659 */
660 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
661 size_t offset,
662 const void __user *src, size_t len,
663 size_t pagecpy)
664 {
665 struct channel_backend *chanb = &bufb->chan->backend;
666 const struct lib_ring_buffer_config *config = &chanb->config;
667 size_t sbidx, index;
668 struct lib_ring_buffer_backend_pages *rpages;
669 unsigned long sb_bindex, id;
670 int ret;
671
672 do {
673 len -= pagecpy;
674 src += pagecpy;
675 offset += pagecpy;
676 sbidx = offset >> chanb->subbuf_size_order;
677 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
678
679 /*
680 * Underlying layer should never ask for writes across
681 * subbuffers.
682 */
683 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
684
685 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
686 id = bufb->buf_wsb[sbidx].id;
687 sb_bindex = subbuffer_id_get_index(config, id);
688 rpages = bufb->array[sb_bindex];
689 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
690 && subbuffer_id_is_noref(config, id));
691 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
692 + (offset & ~PAGE_MASK),
693 src, pagecpy) != 0;
694 if (ret > 0) {
695 offset += (pagecpy - ret);
696 len -= (pagecpy - ret);
697 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
698 break; /* stop copy */
699 }
700 } while (unlikely(len != pagecpy));
701 }
702 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
703
704 /**
705 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
706 * @bufb : buffer backend
707 * @offset : offset within the buffer
708 * @src : source address
709 * @len : length to write
710 * @pagecpy : page size copied so far
711 * @pad : character to use for padding
712 *
713 * This function deals with userspace pointers, it should never be called
714 * directly without having the src pointer checked with access_ok()
715 * previously.
716 */
717 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
718 size_t offset, const char __user *src, size_t len,
719 size_t pagecpy, int pad)
720 {
721 struct channel_backend *chanb = &bufb->chan->backend;
722 const struct lib_ring_buffer_config *config = &chanb->config;
723 size_t sbidx, index;
724 struct lib_ring_buffer_backend_pages *rpages;
725 unsigned long sb_bindex, id;
726 int src_terminated = 0;
727
728 offset += pagecpy;
729 do {
730 len -= pagecpy;
731 if (!src_terminated)
732 src += pagecpy;
733 sbidx = offset >> chanb->subbuf_size_order;
734 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
735
736 /*
737 * Underlying layer should never ask for writes across
738 * subbuffers.
739 */
740 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
741
742 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
743 id = bufb->buf_wsb[sbidx].id;
744 sb_bindex = subbuffer_id_get_index(config, id);
745 rpages = bufb->array[sb_bindex];
746 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
747 && subbuffer_id_is_noref(config, id));
748
749 if (likely(!src_terminated)) {
750 size_t count, to_copy;
751
752 to_copy = pagecpy;
753 if (pagecpy == len)
754 to_copy--; /* Final '\0' */
755 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
756 rpages->p[index].virt
757 + (offset & ~PAGE_MASK),
758 src, to_copy);
759 offset += count;
760 /* Padding */
761 if (unlikely(count < to_copy)) {
762 size_t pad_len = to_copy - count;
763
764 /* Next pages will have padding */
765 src_terminated = 1;
766 lib_ring_buffer_do_memset(rpages->p[index].virt
767 + (offset & ~PAGE_MASK),
768 pad, pad_len);
769 offset += pad_len;
770 }
771 } else {
772 size_t pad_len;
773
774 pad_len = pagecpy;
775 if (pagecpy == len)
776 pad_len--; /* Final '\0' */
777 lib_ring_buffer_do_memset(rpages->p[index].virt
778 + (offset & ~PAGE_MASK),
779 pad, pad_len);
780 offset += pad_len;
781 }
782 } while (unlikely(len != pagecpy));
783 /* Ending '\0' */
784 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
785 '\0', 1);
786 }
787 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
788
789 /**
790 * lib_ring_buffer_read - read data from ring_buffer_buffer.
791 * @bufb : buffer backend
792 * @offset : offset within the buffer
793 * @dest : destination address
794 * @len : length to copy to destination
795 *
796 * Should be protected by get_subbuf/put_subbuf.
797 * Returns the length copied.
798 */
799 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
800 void *dest, size_t len)
801 {
802 struct channel_backend *chanb = &bufb->chan->backend;
803 const struct lib_ring_buffer_config *config = &chanb->config;
804 size_t index, pagecpy, orig_len;
805 struct lib_ring_buffer_backend_pages *rpages;
806 unsigned long sb_bindex, id;
807
808 orig_len = len;
809 offset &= chanb->buf_size - 1;
810 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
811 if (unlikely(!len))
812 return 0;
813 for (;;) {
814 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
815 id = bufb->buf_rsb.id;
816 sb_bindex = subbuffer_id_get_index(config, id);
817 rpages = bufb->array[sb_bindex];
818 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
819 && subbuffer_id_is_noref(config, id));
820 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
821 pagecpy);
822 len -= pagecpy;
823 if (likely(!len))
824 break;
825 dest += pagecpy;
826 offset += pagecpy;
827 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
828 /*
829 * Underlying layer should never ask for reads across
830 * subbuffers.
831 */
832 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
833 }
834 return orig_len;
835 }
836 EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
837
838 /**
839 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
840 * @bufb : buffer backend
841 * @offset : offset within the buffer
842 * @dest : destination userspace address
843 * @len : length to copy to destination
844 *
845 * Should be protected by get_subbuf/put_subbuf.
846 * access_ok() must have been performed on dest addresses prior to call this
847 * function.
848 * Returns -EFAULT on error, 0 if ok.
849 */
850 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
851 size_t offset, void __user *dest, size_t len)
852 {
853 struct channel_backend *chanb = &bufb->chan->backend;
854 const struct lib_ring_buffer_config *config = &chanb->config;
855 size_t index;
856 ssize_t pagecpy;
857 struct lib_ring_buffer_backend_pages *rpages;
858 unsigned long sb_bindex, id;
859
860 offset &= chanb->buf_size - 1;
861 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
862 if (unlikely(!len))
863 return 0;
864 for (;;) {
865 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
866 id = bufb->buf_rsb.id;
867 sb_bindex = subbuffer_id_get_index(config, id);
868 rpages = bufb->array[sb_bindex];
869 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
870 && subbuffer_id_is_noref(config, id));
871 if (__copy_to_user(dest,
872 rpages->p[index].virt + (offset & ~PAGE_MASK),
873 pagecpy))
874 return -EFAULT;
875 len -= pagecpy;
876 if (likely(!len))
877 break;
878 dest += pagecpy;
879 offset += pagecpy;
880 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
881 /*
882 * Underlying layer should never ask for reads across
883 * subbuffers.
884 */
885 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
886 }
887 return 0;
888 }
889 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
890
891 /**
892 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
893 * @bufb : buffer backend
894 * @offset : offset within the buffer
895 * @dest : destination address
896 * @len : destination's length
897 *
898 * Return string's length, or -EINVAL on error.
899 * Should be protected by get_subbuf/put_subbuf.
900 * Destination length should be at least 1 to hold '\0'.
901 */
902 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
903 void *dest, size_t len)
904 {
905 struct channel_backend *chanb = &bufb->chan->backend;
906 const struct lib_ring_buffer_config *config = &chanb->config;
907 size_t index;
908 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
909 char *str;
910 struct lib_ring_buffer_backend_pages *rpages;
911 unsigned long sb_bindex, id;
912
913 offset &= chanb->buf_size - 1;
914 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
915 orig_offset = offset;
916 if (unlikely(!len))
917 return -EINVAL;
918 for (;;) {
919 id = bufb->buf_rsb.id;
920 sb_bindex = subbuffer_id_get_index(config, id);
921 rpages = bufb->array[sb_bindex];
922 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
923 && subbuffer_id_is_noref(config, id));
924 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
925 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
926 strpagelen = strnlen(str, pagelen);
927 if (len) {
928 pagecpy = min_t(size_t, len, strpagelen);
929 if (dest) {
930 memcpy(dest, str, pagecpy);
931 dest += pagecpy;
932 }
933 len -= pagecpy;
934 }
935 offset += strpagelen;
936 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
937 if (strpagelen < pagelen)
938 break;
939 /*
940 * Underlying layer should never ask for reads across
941 * subbuffers.
942 */
943 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
944 }
945 if (dest && len)
946 ((char *)dest)[0] = 0;
947 return offset - orig_offset;
948 }
949 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
950
951 /**
952 * lib_ring_buffer_read_get_page - Get a whole page to read from
953 * @bufb : buffer backend
954 * @offset : offset within the buffer
955 * @virt : pointer to page address (output)
956 *
957 * Should be protected by get_subbuf/put_subbuf.
958 * Returns the pointer to the page struct pointer.
959 */
960 struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
961 size_t offset, void ***virt)
962 {
963 size_t index;
964 struct lib_ring_buffer_backend_pages *rpages;
965 struct channel_backend *chanb = &bufb->chan->backend;
966 const struct lib_ring_buffer_config *config = &chanb->config;
967 unsigned long sb_bindex, id;
968
969 offset &= chanb->buf_size - 1;
970 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
971 id = bufb->buf_rsb.id;
972 sb_bindex = subbuffer_id_get_index(config, id);
973 rpages = bufb->array[sb_bindex];
974 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
975 && subbuffer_id_is_noref(config, id));
976 *virt = &rpages->p[index].virt;
977 return &rpages->p[index].page;
978 }
979 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
980
981 /**
982 * lib_ring_buffer_read_offset_address - get address of a buffer location
983 * @bufb : buffer backend
984 * @offset : offset within the buffer.
985 *
986 * Return the address where a given offset is located (for read).
987 * Should be used to get the current subbuffer header pointer. Given we know
988 * it's never on a page boundary, it's safe to read/write directly
989 * from/to this address, as long as the read/write is never bigger than a
990 * page size.
991 */
992 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
993 size_t offset)
994 {
995 size_t index;
996 struct lib_ring_buffer_backend_pages *rpages;
997 struct channel_backend *chanb = &bufb->chan->backend;
998 const struct lib_ring_buffer_config *config = &chanb->config;
999 unsigned long sb_bindex, id;
1000
1001 offset &= chanb->buf_size - 1;
1002 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1003 id = bufb->buf_rsb.id;
1004 sb_bindex = subbuffer_id_get_index(config, id);
1005 rpages = bufb->array[sb_bindex];
1006 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1007 && subbuffer_id_is_noref(config, id));
1008 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1009 }
1010 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1011
1012 /**
1013 * lib_ring_buffer_offset_address - get address of a location within the buffer
1014 * @bufb : buffer backend
1015 * @offset : offset within the buffer.
1016 *
1017 * Return the address where a given offset is located.
1018 * Should be used to get the current subbuffer header pointer. Given we know
1019 * it's always at the beginning of a page, it's safe to write directly to this
1020 * address, as long as the write is never bigger than a page size.
1021 */
1022 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1023 size_t offset)
1024 {
1025 size_t sbidx, index;
1026 struct lib_ring_buffer_backend_pages *rpages;
1027 struct channel_backend *chanb = &bufb->chan->backend;
1028 const struct lib_ring_buffer_config *config = &chanb->config;
1029 unsigned long sb_bindex, id;
1030
1031 offset &= chanb->buf_size - 1;
1032 sbidx = offset >> chanb->subbuf_size_order;
1033 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1034 id = bufb->buf_wsb[sbidx].id;
1035 sb_bindex = subbuffer_id_get_index(config, id);
1036 rpages = bufb->array[sb_bindex];
1037 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1038 && subbuffer_id_is_noref(config, id));
1039 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1040 }
1041 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.083416 seconds and 5 git commands to generate.