Fix: unregister cpu hotplug notifier on buffer alloc error
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_backend.c
3 *
886d51a3 4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 5 *
886d51a3
MD
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
19 */
20
f3bc08c5
MD
21#include <linux/stddef.h>
22#include <linux/module.h>
23#include <linux/string.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/mm.h>
30
b13f3ebe 31#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
f3bc08c5
MD
32#include "../../wrapper/ringbuffer/config.h"
33#include "../../wrapper/ringbuffer/backend.h"
34#include "../../wrapper/ringbuffer/frontend.h"
35
36/**
37 * lib_ring_buffer_backend_allocate - allocate a channel buffer
38 * @config: ring buffer instance configuration
39 * @buf: the buffer struct
40 * @size: total size of the buffer
41 * @num_subbuf: number of subbuffers
42 * @extra_reader_sb: need extra subbuffer for reader
43 */
44static
45int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
46 struct lib_ring_buffer_backend *bufb,
47 size_t size, size_t num_subbuf,
48 int extra_reader_sb)
49{
50 struct channel_backend *chanb = &bufb->chan->backend;
51 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
52 unsigned long subbuf_size, mmap_offset = 0;
53 unsigned long num_subbuf_alloc;
54 struct page **pages;
55 void **virt;
56 unsigned long i;
57
58 num_pages = size >> PAGE_SHIFT;
59 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
60 subbuf_size = chanb->subbuf_size;
61 num_subbuf_alloc = num_subbuf;
62
63 if (extra_reader_sb) {
64 num_pages += num_pages_per_subbuf; /* Add pages for reader */
65 num_subbuf_alloc++;
66 }
67
68 pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
69 1 << INTERNODE_CACHE_SHIFT),
70 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
71 if (unlikely(!pages))
72 goto pages_error;
73
74 virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
75 1 << INTERNODE_CACHE_SHIFT),
76 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
77 if (unlikely(!virt))
78 goto virt_error;
79
80 bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
81 * num_subbuf_alloc,
82 1 << INTERNODE_CACHE_SHIFT),
83 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
84 if (unlikely(!bufb->array))
85 goto array_error;
86
87 for (i = 0; i < num_pages; i++) {
88 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
89 GFP_KERNEL | __GFP_ZERO, 0);
90 if (unlikely(!pages[i]))
91 goto depopulate;
92 virt[i] = page_address(pages[i]);
93 }
94 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
95
96 /* Allocate backend pages array elements */
97 for (i = 0; i < num_subbuf_alloc; i++) {
98 bufb->array[i] =
99 kzalloc_node(ALIGN(
100 sizeof(struct lib_ring_buffer_backend_pages) +
101 sizeof(struct lib_ring_buffer_backend_page)
102 * num_pages_per_subbuf,
103 1 << INTERNODE_CACHE_SHIFT),
104 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
105 if (!bufb->array[i])
106 goto free_array;
107 }
108
109 /* Allocate write-side subbuffer table */
110 bufb->buf_wsb = kzalloc_node(ALIGN(
111 sizeof(struct lib_ring_buffer_backend_subbuffer)
112 * num_subbuf,
113 1 << INTERNODE_CACHE_SHIFT),
114 GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
115 if (unlikely(!bufb->buf_wsb))
116 goto free_array;
117
118 for (i = 0; i < num_subbuf; i++)
119 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
120
121 /* Assign read-side subbuffer table */
122 if (extra_reader_sb)
123 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
124 num_subbuf_alloc - 1);
125 else
126 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
127
128 /* Assign pages to page index */
129 for (i = 0; i < num_subbuf_alloc; i++) {
130 for (j = 0; j < num_pages_per_subbuf; j++) {
131 CHAN_WARN_ON(chanb, page_idx > num_pages);
132 bufb->array[i]->p[j].virt = virt[page_idx];
133 bufb->array[i]->p[j].page = pages[page_idx];
134 page_idx++;
135 }
136 if (config->output == RING_BUFFER_MMAP) {
137 bufb->array[i]->mmap_offset = mmap_offset;
138 mmap_offset += subbuf_size;
139 }
140 }
141
142 /*
143 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
144 * will not fault.
145 */
6d2a620c 146 wrapper_vmalloc_sync_all();
f3bc08c5
MD
147 kfree(virt);
148 kfree(pages);
149 return 0;
150
151free_array:
152 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
153 kfree(bufb->array[i]);
154depopulate:
155 /* Free all allocated pages */
156 for (i = 0; (i < num_pages && pages[i]); i++)
157 __free_page(pages[i]);
158 kfree(bufb->array);
159array_error:
160 kfree(virt);
161virt_error:
162 kfree(pages);
163pages_error:
164 return -ENOMEM;
165}
166
167int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
168 struct channel_backend *chanb, int cpu)
169{
5a8fd222 170 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
171
172 bufb->chan = container_of(chanb, struct channel, backend);
173 bufb->cpu = cpu;
174
175 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
176 chanb->num_subbuf,
177 chanb->extra_reader_sb);
178}
179
180void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
181{
182 struct channel_backend *chanb = &bufb->chan->backend;
183 unsigned long i, j, num_subbuf_alloc;
184
185 num_subbuf_alloc = chanb->num_subbuf;
186 if (chanb->extra_reader_sb)
187 num_subbuf_alloc++;
188
189 kfree(bufb->buf_wsb);
190 for (i = 0; i < num_subbuf_alloc; i++) {
191 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
192 __free_page(bufb->array[i]->p[j].page);
193 kfree(bufb->array[i]);
194 }
195 kfree(bufb->array);
196 bufb->allocated = 0;
197}
198
199void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
200{
201 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 202 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
203 unsigned long num_subbuf_alloc;
204 unsigned int i;
205
206 num_subbuf_alloc = chanb->num_subbuf;
207 if (chanb->extra_reader_sb)
208 num_subbuf_alloc++;
209
210 for (i = 0; i < chanb->num_subbuf; i++)
211 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
212 if (chanb->extra_reader_sb)
213 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
214 num_subbuf_alloc - 1);
215 else
216 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
217
218 for (i = 0; i < num_subbuf_alloc; i++) {
219 /* Don't reset mmap_offset */
220 v_set(config, &bufb->array[i]->records_commit, 0);
221 v_set(config, &bufb->array[i]->records_unread, 0);
222 bufb->array[i]->data_size = 0;
223 /* Don't reset backend page and virt addresses */
224 }
225 /* Don't reset num_pages_per_subbuf, cpu, allocated */
226 v_set(config, &bufb->records_read, 0);
227}
228
229/*
230 * The frontend is responsible for also calling ring_buffer_backend_reset for
231 * each buffer when calling channel_backend_reset.
232 */
233void channel_backend_reset(struct channel_backend *chanb)
234{
235 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 236 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
237
238 /*
239 * Don't reset buf_size, subbuf_size, subbuf_size_order,
240 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
241 * priv, notifiers, config, cpumask and name.
242 */
243 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
244}
245
246#ifdef CONFIG_HOTPLUG_CPU
247/**
248 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
249 * @nb: notifier block
250 * @action: hotplug action to take
251 * @hcpu: CPU number
252 *
253 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
254 */
255static
e8f071d5 256int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
257 unsigned long action,
258 void *hcpu)
259{
260 unsigned int cpu = (unsigned long)hcpu;
261 struct channel_backend *chanb = container_of(nb, struct channel_backend,
262 cpu_hp_notifier);
5a8fd222 263 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
264 struct lib_ring_buffer *buf;
265 int ret;
266
267 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
268
269 switch (action) {
270 case CPU_UP_PREPARE:
271 case CPU_UP_PREPARE_FROZEN:
272 buf = per_cpu_ptr(chanb->buf, cpu);
273 ret = lib_ring_buffer_create(buf, chanb, cpu);
274 if (ret) {
275 printk(KERN_ERR
276 "ring_buffer_cpu_hp_callback: cpu %d "
277 "buffer creation failed\n", cpu);
278 return NOTIFY_BAD;
279 }
280 break;
281 case CPU_DEAD:
282 case CPU_DEAD_FROZEN:
283 /* No need to do a buffer switch here, because it will happen
284 * when tracing is stopped, or will be done by switch timer CPU
285 * DEAD callback. */
286 break;
287 }
288 return NOTIFY_OK;
289}
290#endif
291
292/**
293 * channel_backend_init - initialize a channel backend
294 * @chanb: channel backend
295 * @name: channel name
296 * @config: client ring buffer configuration
297 * @priv: client private data
298 * @parent: dentry of parent directory, %NULL for root directory
299 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
300 * @num_subbuf: number of sub-buffers (power of 2)
301 *
302 * Returns channel pointer if successful, %NULL otherwise.
303 *
304 * Creates per-cpu channel buffers using the sizes and attributes
305 * specified. The created channel buffer files will be named
306 * name_0...name_N-1. File permissions will be %S_IRUSR.
307 *
308 * Called with CPU hotplug disabled.
309 */
310int channel_backend_init(struct channel_backend *chanb,
311 const char *name,
312 const struct lib_ring_buffer_config *config,
313 void *priv, size_t subbuf_size, size_t num_subbuf)
314{
315 struct channel *chan = container_of(chanb, struct channel, backend);
316 unsigned int i;
317 int ret;
318
319 if (!name)
320 return -EPERM;
321
f3bc08c5 322 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
323 if (subbuf_size < PAGE_SIZE)
324 return -EINVAL;
f3bc08c5
MD
325
326 /*
bbda3a00
MD
327 * Make sure the number of subbuffers and subbuffer size are
328 * power of 2 and nonzero.
f3bc08c5 329 */
bbda3a00 330 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 331 return -EINVAL;
bbda3a00 332 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 333 return -EINVAL;
5140d2b3
MD
334 /*
335 * Overwrite mode buffers require at least 2 subbuffers per
336 * buffer.
337 */
338 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
339 return -EINVAL;
f3bc08c5
MD
340
341 ret = subbuffer_id_check_index(config, num_subbuf);
342 if (ret)
343 return ret;
344
345 chanb->priv = priv;
346 chanb->buf_size = num_subbuf * subbuf_size;
347 chanb->subbuf_size = subbuf_size;
348 chanb->buf_size_order = get_count_order(chanb->buf_size);
349 chanb->subbuf_size_order = get_count_order(subbuf_size);
350 chanb->num_subbuf_order = get_count_order(num_subbuf);
351 chanb->extra_reader_sb =
352 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
353 chanb->num_subbuf = num_subbuf;
354 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 355 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
356
357 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
358 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
359 return -ENOMEM;
360 }
361
362 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
363 /* Allocating the buffer per-cpu structures */
364 chanb->buf = alloc_percpu(struct lib_ring_buffer);
365 if (!chanb->buf)
366 goto free_cpumask;
367
368 /*
369 * In case of non-hotplug cpu, if the ring-buffer is allocated
370 * in early initcall, it will not be notified of secondary cpus.
371 * In that off case, we need to allocate for all possible cpus.
372 */
373#ifdef CONFIG_HOTPLUG_CPU
374 /*
375 * buf->backend.allocated test takes care of concurrent CPU
376 * hotplug.
377 * Priority higher than frontend, so we create the ring buffer
378 * before we start the timer.
379 */
380 chanb->cpu_hp_notifier.notifier_call =
381 lib_ring_buffer_cpu_hp_callback;
382 chanb->cpu_hp_notifier.priority = 5;
383 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
384
385 get_online_cpus();
386 for_each_online_cpu(i) {
387 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
388 chanb, i);
389 if (ret)
390 goto free_bufs; /* cpu hotplug locked */
391 }
392 put_online_cpus();
393#else
394 for_each_possible_cpu(i) {
395 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
396 chanb, i);
397 if (ret)
398 goto free_bufs; /* cpu hotplug locked */
399 }
400#endif
401 } else {
402 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
403 if (!chanb->buf)
404 goto free_cpumask;
405 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
406 if (ret)
407 goto free_bufs;
408 }
409 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
410
411 return 0;
412
413free_bufs:
414 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
415 for_each_possible_cpu(i) {
416 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
417
418 if (!buf->backend.allocated)
419 continue;
420 lib_ring_buffer_free(buf);
421 }
422#ifdef CONFIG_HOTPLUG_CPU
423 put_online_cpus();
364754c1 424 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
f3bc08c5
MD
425#endif
426 free_percpu(chanb->buf);
427 } else
428 kfree(chanb->buf);
429free_cpumask:
430 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
431 free_cpumask_var(chanb->cpumask);
432 return -ENOMEM;
433}
434
435/**
436 * channel_backend_unregister_notifiers - unregister notifiers
437 * @chan: the channel
438 *
439 * Holds CPU hotplug.
440 */
441void channel_backend_unregister_notifiers(struct channel_backend *chanb)
442{
5a8fd222 443 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
444
445 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
446 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
447}
448
449/**
450 * channel_backend_free - destroy the channel
451 * @chan: the channel
452 *
453 * Destroy all channel buffers and frees the channel.
454 */
455void channel_backend_free(struct channel_backend *chanb)
456{
5a8fd222 457 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
458 unsigned int i;
459
460 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
461 for_each_possible_cpu(i) {
462 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
463
464 if (!buf->backend.allocated)
465 continue;
466 lib_ring_buffer_free(buf);
467 }
468 free_cpumask_var(chanb->cpumask);
469 free_percpu(chanb->buf);
470 } else {
471 struct lib_ring_buffer *buf = chanb->buf;
472
473 CHAN_WARN_ON(chanb, !buf->backend.allocated);
474 lib_ring_buffer_free(buf);
475 kfree(buf);
476 }
477}
478
479/**
480 * lib_ring_buffer_write - write data to a ring_buffer buffer.
481 * @bufb : buffer backend
482 * @offset : offset within the buffer
483 * @src : source address
484 * @len : length to write
485 * @pagecpy : page size copied so far
486 */
487void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 488 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
489{
490 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 491 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
492 size_t sbidx, index;
493 struct lib_ring_buffer_backend_pages *rpages;
494 unsigned long sb_bindex, id;
495
496 do {
497 len -= pagecpy;
498 src += pagecpy;
499 offset += pagecpy;
500 sbidx = offset >> chanb->subbuf_size_order;
501 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
502
503 /*
504 * Underlying layer should never ask for writes across
505 * subbuffers.
506 */
507 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
508
509 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
510 id = bufb->buf_wsb[sbidx].id;
511 sb_bindex = subbuffer_id_get_index(config, id);
512 rpages = bufb->array[sb_bindex];
513 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
514 && subbuffer_id_is_noref(config, id));
515 lib_ring_buffer_do_copy(config,
516 rpages->p[index].virt
517 + (offset & ~PAGE_MASK),
518 src, pagecpy);
519 } while (unlikely(len != pagecpy));
520}
521EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
522
4ea00e4f
JD
523
524/**
525 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
526 * @bufb : buffer backend
527 * @offset : offset within the buffer
528 * @c : the byte to write
529 * @len : length to write
530 * @pagecpy : page size copied so far
531 */
532void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
533 size_t offset,
bfe529f9 534 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
535{
536 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 537 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
538 size_t sbidx, index;
539 struct lib_ring_buffer_backend_pages *rpages;
540 unsigned long sb_bindex, id;
541
542 do {
543 len -= pagecpy;
544 offset += pagecpy;
545 sbidx = offset >> chanb->subbuf_size_order;
546 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
547
548 /*
549 * Underlying layer should never ask for writes across
550 * subbuffers.
551 */
552 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
553
554 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
555 id = bufb->buf_wsb[sbidx].id;
556 sb_bindex = subbuffer_id_get_index(config, id);
557 rpages = bufb->array[sb_bindex];
558 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
559 && subbuffer_id_is_noref(config, id));
560 lib_ring_buffer_do_memset(rpages->p[index].virt
561 + (offset & ~PAGE_MASK),
562 c, pagecpy);
563 } while (unlikely(len != pagecpy));
564}
565EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
566
16f78f3a
MD
567/**
568 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
569 * @bufb : buffer backend
570 * @offset : offset within the buffer
571 * @src : source address
572 * @len : length to write
573 * @pagecpy : page size copied so far
574 * @pad : character to use for padding
575 */
576void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
577 size_t offset, const char *src, size_t len,
578 size_t pagecpy, int pad)
579{
580 struct channel_backend *chanb = &bufb->chan->backend;
581 const struct lib_ring_buffer_config *config = &chanb->config;
582 size_t sbidx, index;
583 struct lib_ring_buffer_backend_pages *rpages;
584 unsigned long sb_bindex, id;
585 int src_terminated = 0;
586
587 CHAN_WARN_ON(chanb, !len);
588 offset += pagecpy;
589 do {
590 len -= pagecpy;
591 if (!src_terminated)
592 src += pagecpy;
593 sbidx = offset >> chanb->subbuf_size_order;
594 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
595
596 /*
597 * Underlying layer should never ask for writes across
598 * subbuffers.
599 */
600 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
601
602 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
603 id = bufb->buf_wsb[sbidx].id;
604 sb_bindex = subbuffer_id_get_index(config, id);
605 rpages = bufb->array[sb_bindex];
606 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
607 && subbuffer_id_is_noref(config, id));
608
609 if (likely(!src_terminated)) {
610 size_t count, to_copy;
611
612 to_copy = pagecpy;
613 if (pagecpy == len)
614 to_copy--; /* Final '\0' */
615 count = lib_ring_buffer_do_strcpy(config,
616 rpages->p[index].virt
617 + (offset & ~PAGE_MASK),
618 src, to_copy);
619 offset += count;
620 /* Padding */
621 if (unlikely(count < to_copy)) {
622 size_t pad_len = to_copy - count;
623
624 /* Next pages will have padding */
625 src_terminated = 1;
626 lib_ring_buffer_do_memset(rpages->p[index].virt
627 + (offset & ~PAGE_MASK),
628 pad, pad_len);
629 offset += pad_len;
630 }
631 } else {
632 size_t pad_len;
633
634 pad_len = pagecpy;
635 if (pagecpy == len)
636 pad_len--; /* Final '\0' */
637 lib_ring_buffer_do_memset(rpages->p[index].virt
638 + (offset & ~PAGE_MASK),
639 pad, pad_len);
640 offset += pad_len;
641 }
642 } while (unlikely(len != pagecpy));
643 /* Ending '\0' */
644 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
645 '\0', 1);
646}
647EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
648
649/**
7b8ea3a5 650 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
651 * @bufb : buffer backend
652 * @offset : offset within the buffer
653 * @src : source address
654 * @len : length to write
655 * @pagecpy : page size copied so far
656 *
657 * This function deals with userspace pointers, it should never be called
658 * directly without having the src pointer checked with access_ok()
659 * previously.
660 */
7b8ea3a5 661void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
662 size_t offset,
663 const void __user *src, size_t len,
bfe529f9 664 size_t pagecpy)
4ea00e4f
JD
665{
666 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 667 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
668 size_t sbidx, index;
669 struct lib_ring_buffer_backend_pages *rpages;
670 unsigned long sb_bindex, id;
671 int ret;
672
673 do {
674 len -= pagecpy;
675 src += pagecpy;
676 offset += pagecpy;
677 sbidx = offset >> chanb->subbuf_size_order;
678 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
679
680 /*
681 * Underlying layer should never ask for writes across
682 * subbuffers.
683 */
684 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
685
686 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
687 id = bufb->buf_wsb[sbidx].id;
688 sb_bindex = subbuffer_id_get_index(config, id);
689 rpages = bufb->array[sb_bindex];
690 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
691 && subbuffer_id_is_noref(config, id));
7b8ea3a5 692 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
693 + (offset & ~PAGE_MASK),
694 src, pagecpy) != 0;
695 if (ret > 0) {
6115dd20 696 /* Copy failed. */
4ea00e4f
JD
697 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
698 break; /* stop copy */
699 }
700 } while (unlikely(len != pagecpy));
701}
7b8ea3a5 702EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 703
16f78f3a
MD
704/**
705 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
706 * @bufb : buffer backend
707 * @offset : offset within the buffer
708 * @src : source address
709 * @len : length to write
710 * @pagecpy : page size copied so far
711 * @pad : character to use for padding
712 *
713 * This function deals with userspace pointers, it should never be called
714 * directly without having the src pointer checked with access_ok()
715 * previously.
716 */
717void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
718 size_t offset, const char __user *src, size_t len,
719 size_t pagecpy, int pad)
720{
721 struct channel_backend *chanb = &bufb->chan->backend;
722 const struct lib_ring_buffer_config *config = &chanb->config;
723 size_t sbidx, index;
724 struct lib_ring_buffer_backend_pages *rpages;
725 unsigned long sb_bindex, id;
726 int src_terminated = 0;
727
728 offset += pagecpy;
729 do {
730 len -= pagecpy;
731 if (!src_terminated)
732 src += pagecpy;
733 sbidx = offset >> chanb->subbuf_size_order;
734 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
735
736 /*
737 * Underlying layer should never ask for writes across
738 * subbuffers.
739 */
740 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
741
742 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
743 id = bufb->buf_wsb[sbidx].id;
744 sb_bindex = subbuffer_id_get_index(config, id);
745 rpages = bufb->array[sb_bindex];
746 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
747 && subbuffer_id_is_noref(config, id));
748
749 if (likely(!src_terminated)) {
750 size_t count, to_copy;
751
752 to_copy = pagecpy;
753 if (pagecpy == len)
754 to_copy--; /* Final '\0' */
755 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
756 rpages->p[index].virt
757 + (offset & ~PAGE_MASK),
758 src, to_copy);
759 offset += count;
760 /* Padding */
761 if (unlikely(count < to_copy)) {
762 size_t pad_len = to_copy - count;
763
764 /* Next pages will have padding */
765 src_terminated = 1;
766 lib_ring_buffer_do_memset(rpages->p[index].virt
767 + (offset & ~PAGE_MASK),
768 pad, pad_len);
769 offset += pad_len;
770 }
771 } else {
772 size_t pad_len;
773
774 pad_len = pagecpy;
775 if (pagecpy == len)
776 pad_len--; /* Final '\0' */
777 lib_ring_buffer_do_memset(rpages->p[index].virt
778 + (offset & ~PAGE_MASK),
779 pad, pad_len);
780 offset += pad_len;
781 }
782 } while (unlikely(len != pagecpy));
783 /* Ending '\0' */
784 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
785 '\0', 1);
786}
787EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
788
f3bc08c5
MD
789/**
790 * lib_ring_buffer_read - read data from ring_buffer_buffer.
791 * @bufb : buffer backend
792 * @offset : offset within the buffer
793 * @dest : destination address
794 * @len : length to copy to destination
795 *
796 * Should be protected by get_subbuf/put_subbuf.
797 * Returns the length copied.
798 */
799size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
800 void *dest, size_t len)
801{
802 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 803 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 804 size_t index, pagecpy, orig_len;
f3bc08c5
MD
805 struct lib_ring_buffer_backend_pages *rpages;
806 unsigned long sb_bindex, id;
807
808 orig_len = len;
809 offset &= chanb->buf_size - 1;
810 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
811 if (unlikely(!len))
812 return 0;
813 for (;;) {
814 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
815 id = bufb->buf_rsb.id;
816 sb_bindex = subbuffer_id_get_index(config, id);
817 rpages = bufb->array[sb_bindex];
818 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
819 && subbuffer_id_is_noref(config, id));
820 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
821 pagecpy);
822 len -= pagecpy;
823 if (likely(!len))
824 break;
825 dest += pagecpy;
826 offset += pagecpy;
827 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
828 /*
829 * Underlying layer should never ask for reads across
830 * subbuffers.
831 */
832 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
833 }
834 return orig_len;
835}
836EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
837
838/**
839 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
840 * @bufb : buffer backend
841 * @offset : offset within the buffer
842 * @dest : destination userspace address
843 * @len : length to copy to destination
844 *
845 * Should be protected by get_subbuf/put_subbuf.
846 * access_ok() must have been performed on dest addresses prior to call this
847 * function.
848 * Returns -EFAULT on error, 0 if ok.
849 */
850int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
851 size_t offset, void __user *dest, size_t len)
852{
853 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 854 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 855 size_t index;
88dfd899 856 ssize_t pagecpy;
f3bc08c5
MD
857 struct lib_ring_buffer_backend_pages *rpages;
858 unsigned long sb_bindex, id;
859
f3bc08c5
MD
860 offset &= chanb->buf_size - 1;
861 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
862 if (unlikely(!len))
863 return 0;
864 for (;;) {
865 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
866 id = bufb->buf_rsb.id;
867 sb_bindex = subbuffer_id_get_index(config, id);
868 rpages = bufb->array[sb_bindex];
869 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
870 && subbuffer_id_is_noref(config, id));
871 if (__copy_to_user(dest,
872 rpages->p[index].virt + (offset & ~PAGE_MASK),
873 pagecpy))
874 return -EFAULT;
875 len -= pagecpy;
876 if (likely(!len))
877 break;
878 dest += pagecpy;
879 offset += pagecpy;
880 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
881 /*
882 * Underlying layer should never ask for reads across
883 * subbuffers.
884 */
885 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
886 }
887 return 0;
888}
889EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
890
891/**
892 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
893 * @bufb : buffer backend
894 * @offset : offset within the buffer
895 * @dest : destination address
896 * @len : destination's length
897 *
61eb4c39 898 * Return string's length, or -EINVAL on error.
f3bc08c5 899 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 900 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
901 */
902int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
903 void *dest, size_t len)
904{
905 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 906 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
907 size_t index;
908 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
909 char *str;
910 struct lib_ring_buffer_backend_pages *rpages;
911 unsigned long sb_bindex, id;
912
913 offset &= chanb->buf_size - 1;
914 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
915 orig_offset = offset;
61eb4c39
MD
916 if (unlikely(!len))
917 return -EINVAL;
f3bc08c5
MD
918 for (;;) {
919 id = bufb->buf_rsb.id;
920 sb_bindex = subbuffer_id_get_index(config, id);
921 rpages = bufb->array[sb_bindex];
922 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
923 && subbuffer_id_is_noref(config, id));
924 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
925 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
926 strpagelen = strnlen(str, pagelen);
927 if (len) {
928 pagecpy = min_t(size_t, len, strpagelen);
929 if (dest) {
930 memcpy(dest, str, pagecpy);
931 dest += pagecpy;
932 }
933 len -= pagecpy;
934 }
935 offset += strpagelen;
936 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
937 if (strpagelen < pagelen)
938 break;
939 /*
940 * Underlying layer should never ask for reads across
941 * subbuffers.
942 */
943 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
944 }
945 if (dest && len)
946 ((char *)dest)[0] = 0;
947 return offset - orig_offset;
948}
949EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
950
951/**
952 * lib_ring_buffer_read_get_page - Get a whole page to read from
953 * @bufb : buffer backend
954 * @offset : offset within the buffer
955 * @virt : pointer to page address (output)
956 *
957 * Should be protected by get_subbuf/put_subbuf.
958 * Returns the pointer to the page struct pointer.
959 */
960struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
961 size_t offset, void ***virt)
962{
963 size_t index;
964 struct lib_ring_buffer_backend_pages *rpages;
965 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 966 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
967 unsigned long sb_bindex, id;
968
969 offset &= chanb->buf_size - 1;
970 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
971 id = bufb->buf_rsb.id;
972 sb_bindex = subbuffer_id_get_index(config, id);
973 rpages = bufb->array[sb_bindex];
974 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
975 && subbuffer_id_is_noref(config, id));
976 *virt = &rpages->p[index].virt;
977 return &rpages->p[index].page;
978}
979EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
980
981/**
982 * lib_ring_buffer_read_offset_address - get address of a buffer location
983 * @bufb : buffer backend
984 * @offset : offset within the buffer.
985 *
986 * Return the address where a given offset is located (for read).
987 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
988 * it's never on a page boundary, it's safe to read/write directly
989 * from/to this address, as long as the read/write is never bigger than a
990 * page size.
f3bc08c5
MD
991 */
992void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
993 size_t offset)
994{
995 size_t index;
996 struct lib_ring_buffer_backend_pages *rpages;
997 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 998 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
999 unsigned long sb_bindex, id;
1000
1001 offset &= chanb->buf_size - 1;
1002 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1003 id = bufb->buf_rsb.id;
1004 sb_bindex = subbuffer_id_get_index(config, id);
1005 rpages = bufb->array[sb_bindex];
1006 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1007 && subbuffer_id_is_noref(config, id));
1008 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1009}
1010EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1011
1012/**
1013 * lib_ring_buffer_offset_address - get address of a location within the buffer
1014 * @bufb : buffer backend
1015 * @offset : offset within the buffer.
1016 *
1017 * Return the address where a given offset is located.
1018 * Should be used to get the current subbuffer header pointer. Given we know
1019 * it's always at the beginning of a page, it's safe to write directly to this
1020 * address, as long as the write is never bigger than a page size.
1021 */
1022void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1023 size_t offset)
1024{
1025 size_t sbidx, index;
1026 struct lib_ring_buffer_backend_pages *rpages;
1027 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1028 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1029 unsigned long sb_bindex, id;
1030
1031 offset &= chanb->buf_size - 1;
1032 sbidx = offset >> chanb->subbuf_size_order;
1033 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1034 id = bufb->buf_wsb[sbidx].id;
1035 sb_bindex = subbuffer_id_get_index(config, id);
1036 rpages = bufb->array[sb_bindex];
1037 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1038 && subbuffer_id_is_noref(config, id));
1039 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1040}
1041EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.071325 seconds and 4 git commands to generate.