Cleanup: Move lib/ringbuffer/ headers to include/ringbuffer/
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * ring_buffer_backend.c
4 *
5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 */
7
8 #include <linux/stddef.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/cpu.h>
16 #include <linux/mm.h>
17 #include <linux/vmalloc.h>
18
19 #include <wrapper/mm.h>
20 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
21 #include <ringbuffer/config.h>
22 #include <ringbuffer/backend.h>
23 #include <ringbuffer/frontend.h>
24
25 /**
26 * lib_ring_buffer_backend_allocate - allocate a channel buffer
27 * @config: ring buffer instance configuration
28 * @buf: the buffer struct
29 * @size: total size of the buffer
30 * @num_subbuf: number of subbuffers
31 * @extra_reader_sb: need extra subbuffer for reader
32 */
33 static
34 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
35 struct lib_ring_buffer_backend *bufb,
36 size_t size, size_t num_subbuf,
37 int extra_reader_sb)
38 {
39 struct channel_backend *chanb = &bufb->chan->backend;
40 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
41 unsigned long subbuf_size, mmap_offset = 0;
42 unsigned long num_subbuf_alloc;
43 struct page **pages;
44 unsigned long i;
45
46 num_pages = size >> PAGE_SHIFT;
47
48 /*
49 * Verify that there is enough free pages available on the system for
50 * the current allocation request.
51 * wrapper_check_enough_free_pages uses si_mem_available() if available
52 * and returns if there should be enough free pages based on the
53 * current estimate.
54 */
55 if (!wrapper_check_enough_free_pages(num_pages))
56 goto not_enough_pages;
57
58 /*
59 * Set the current user thread as the first target of the OOM killer.
60 * If the estimate received by si_mem_available() was off, and we do
61 * end up running out of memory because of this buffer allocation, we
62 * want to kill the offending app first.
63 */
64 wrapper_set_current_oom_origin();
65
66 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
67 subbuf_size = chanb->subbuf_size;
68 num_subbuf_alloc = num_subbuf;
69
70 if (extra_reader_sb) {
71 num_pages += num_pages_per_subbuf; /* Add pages for reader */
72 num_subbuf_alloc++;
73 }
74
75 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
76 1 << INTERNODE_CACHE_SHIFT),
77 cpu_to_node(max(bufb->cpu, 0)));
78 if (unlikely(!pages))
79 goto pages_error;
80
81 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
82 * num_subbuf_alloc,
83 1 << INTERNODE_CACHE_SHIFT),
84 GFP_KERNEL | __GFP_NOWARN,
85 cpu_to_node(max(bufb->cpu, 0)));
86 if (unlikely(!bufb->array))
87 goto array_error;
88
89 for (i = 0; i < num_pages; i++) {
90 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
91 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
92 if (unlikely(!pages[i]))
93 goto depopulate;
94 }
95 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
96
97 /* Allocate backend pages array elements */
98 for (i = 0; i < num_subbuf_alloc; i++) {
99 bufb->array[i] =
100 lttng_kvzalloc_node(ALIGN(
101 sizeof(struct lib_ring_buffer_backend_pages) +
102 sizeof(struct lib_ring_buffer_backend_page)
103 * num_pages_per_subbuf,
104 1 << INTERNODE_CACHE_SHIFT),
105 GFP_KERNEL | __GFP_NOWARN,
106 cpu_to_node(max(bufb->cpu, 0)));
107 if (!bufb->array[i])
108 goto free_array;
109 }
110
111 /* Allocate write-side subbuffer table */
112 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
113 sizeof(struct lib_ring_buffer_backend_subbuffer)
114 * num_subbuf,
115 1 << INTERNODE_CACHE_SHIFT),
116 GFP_KERNEL | __GFP_NOWARN,
117 cpu_to_node(max(bufb->cpu, 0)));
118 if (unlikely(!bufb->buf_wsb))
119 goto free_array;
120
121 for (i = 0; i < num_subbuf; i++)
122 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
123
124 /* Assign read-side subbuffer table */
125 if (extra_reader_sb)
126 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
127 num_subbuf_alloc - 1);
128 else
129 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
130
131 /* Allocate subbuffer packet counter table */
132 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
133 sizeof(struct lib_ring_buffer_backend_counts)
134 * num_subbuf,
135 1 << INTERNODE_CACHE_SHIFT),
136 GFP_KERNEL | __GFP_NOWARN,
137 cpu_to_node(max(bufb->cpu, 0)));
138 if (unlikely(!bufb->buf_cnt))
139 goto free_wsb;
140
141 /* Assign pages to page index */
142 for (i = 0; i < num_subbuf_alloc; i++) {
143 for (j = 0; j < num_pages_per_subbuf; j++) {
144 CHAN_WARN_ON(chanb, page_idx > num_pages);
145 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
146 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
147 page_idx++;
148 }
149 if (config->output == RING_BUFFER_MMAP) {
150 bufb->array[i]->mmap_offset = mmap_offset;
151 mmap_offset += subbuf_size;
152 }
153 }
154
155 /*
156 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
157 * will not fault.
158 */
159 wrapper_vmalloc_sync_mappings();
160 wrapper_clear_current_oom_origin();
161 vfree(pages);
162 return 0;
163
164 free_wsb:
165 lttng_kvfree(bufb->buf_wsb);
166 free_array:
167 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
168 lttng_kvfree(bufb->array[i]);
169 depopulate:
170 /* Free all allocated pages */
171 for (i = 0; (i < num_pages && pages[i]); i++)
172 __free_page(pages[i]);
173 lttng_kvfree(bufb->array);
174 array_error:
175 vfree(pages);
176 pages_error:
177 wrapper_clear_current_oom_origin();
178 not_enough_pages:
179 return -ENOMEM;
180 }
181
182 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
183 struct channel_backend *chanb, int cpu)
184 {
185 const struct lib_ring_buffer_config *config = &chanb->config;
186
187 bufb->chan = container_of(chanb, struct channel, backend);
188 bufb->cpu = cpu;
189
190 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
191 chanb->num_subbuf,
192 chanb->extra_reader_sb);
193 }
194
195 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
196 {
197 struct channel_backend *chanb = &bufb->chan->backend;
198 unsigned long i, j, num_subbuf_alloc;
199
200 num_subbuf_alloc = chanb->num_subbuf;
201 if (chanb->extra_reader_sb)
202 num_subbuf_alloc++;
203
204 lttng_kvfree(bufb->buf_wsb);
205 lttng_kvfree(bufb->buf_cnt);
206 for (i = 0; i < num_subbuf_alloc; i++) {
207 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
208 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
209 lttng_kvfree(bufb->array[i]);
210 }
211 lttng_kvfree(bufb->array);
212 bufb->allocated = 0;
213 }
214
215 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
216 {
217 struct channel_backend *chanb = &bufb->chan->backend;
218 const struct lib_ring_buffer_config *config = &chanb->config;
219 unsigned long num_subbuf_alloc;
220 unsigned int i;
221
222 num_subbuf_alloc = chanb->num_subbuf;
223 if (chanb->extra_reader_sb)
224 num_subbuf_alloc++;
225
226 for (i = 0; i < chanb->num_subbuf; i++)
227 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
228 if (chanb->extra_reader_sb)
229 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
230 num_subbuf_alloc - 1);
231 else
232 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
233
234 for (i = 0; i < num_subbuf_alloc; i++) {
235 /* Don't reset mmap_offset */
236 v_set(config, &bufb->array[i]->records_commit, 0);
237 v_set(config, &bufb->array[i]->records_unread, 0);
238 bufb->array[i]->data_size = 0;
239 /* Don't reset backend page and virt addresses */
240 }
241 /* Don't reset num_pages_per_subbuf, cpu, allocated */
242 v_set(config, &bufb->records_read, 0);
243 }
244
245 /*
246 * The frontend is responsible for also calling ring_buffer_backend_reset for
247 * each buffer when calling channel_backend_reset.
248 */
249 void channel_backend_reset(struct channel_backend *chanb)
250 {
251 struct channel *chan = container_of(chanb, struct channel, backend);
252 const struct lib_ring_buffer_config *config = &chanb->config;
253
254 /*
255 * Don't reset buf_size, subbuf_size, subbuf_size_order,
256 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
257 * priv, notifiers, config, cpumask and name.
258 */
259 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
260 }
261
262 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
263
264 /*
265 * No need to implement a "dead" callback to do a buffer switch here,
266 * because it will happen when tracing is stopped, or will be done by
267 * switch timer CPU DEAD callback.
268 * We don't free buffers when CPU go away, because it would make trace
269 * data vanish, which is unwanted.
270 */
271 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
272 struct lttng_cpuhp_node *node)
273 {
274 struct channel_backend *chanb = container_of(node,
275 struct channel_backend, cpuhp_prepare);
276 const struct lib_ring_buffer_config *config = &chanb->config;
277 struct lib_ring_buffer *buf;
278 int ret;
279
280 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
281
282 buf = per_cpu_ptr(chanb->buf, cpu);
283 ret = lib_ring_buffer_create(buf, chanb, cpu);
284 if (ret) {
285 printk(KERN_ERR
286 "ring_buffer_cpu_hp_callback: cpu %d "
287 "buffer creation failed\n", cpu);
288 return ret;
289 }
290 return 0;
291 }
292 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
293
294 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
295
296 #ifdef CONFIG_HOTPLUG_CPU
297
298 /**
299 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
300 * @nb: notifier block
301 * @action: hotplug action to take
302 * @hcpu: CPU number
303 *
304 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
305 */
306 static
307 int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
308 unsigned long action,
309 void *hcpu)
310 {
311 unsigned int cpu = (unsigned long)hcpu;
312 struct channel_backend *chanb = container_of(nb, struct channel_backend,
313 cpu_hp_notifier);
314 const struct lib_ring_buffer_config *config = &chanb->config;
315 struct lib_ring_buffer *buf;
316 int ret;
317
318 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
319
320 switch (action) {
321 case CPU_UP_PREPARE:
322 case CPU_UP_PREPARE_FROZEN:
323 buf = per_cpu_ptr(chanb->buf, cpu);
324 ret = lib_ring_buffer_create(buf, chanb, cpu);
325 if (ret) {
326 printk(KERN_ERR
327 "ring_buffer_cpu_hp_callback: cpu %d "
328 "buffer creation failed\n", cpu);
329 return NOTIFY_BAD;
330 }
331 break;
332 case CPU_DEAD:
333 case CPU_DEAD_FROZEN:
334 /* No need to do a buffer switch here, because it will happen
335 * when tracing is stopped, or will be done by switch timer CPU
336 * DEAD callback. */
337 break;
338 }
339 return NOTIFY_OK;
340 }
341
342 #endif
343
344 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
345
346 /**
347 * channel_backend_init - initialize a channel backend
348 * @chanb: channel backend
349 * @name: channel name
350 * @config: client ring buffer configuration
351 * @priv: client private data
352 * @parent: dentry of parent directory, %NULL for root directory
353 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
354 * @num_subbuf: number of sub-buffers (power of 2)
355 *
356 * Returns channel pointer if successful, %NULL otherwise.
357 *
358 * Creates per-cpu channel buffers using the sizes and attributes
359 * specified. The created channel buffer files will be named
360 * name_0...name_N-1. File permissions will be %S_IRUSR.
361 *
362 * Called with CPU hotplug disabled.
363 */
364 int channel_backend_init(struct channel_backend *chanb,
365 const char *name,
366 const struct lib_ring_buffer_config *config,
367 void *priv, size_t subbuf_size, size_t num_subbuf)
368 {
369 struct channel *chan = container_of(chanb, struct channel, backend);
370 unsigned int i;
371 int ret;
372
373 if (!name)
374 return -EPERM;
375
376 /* Check that the subbuffer size is larger than a page. */
377 if (subbuf_size < PAGE_SIZE)
378 return -EINVAL;
379
380 /*
381 * Make sure the number of subbuffers and subbuffer size are
382 * power of 2 and nonzero.
383 */
384 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
385 return -EINVAL;
386 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
387 return -EINVAL;
388 /*
389 * Overwrite mode buffers require at least 2 subbuffers per
390 * buffer.
391 */
392 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
393 return -EINVAL;
394
395 ret = subbuffer_id_check_index(config, num_subbuf);
396 if (ret)
397 return ret;
398
399 chanb->priv = priv;
400 chanb->buf_size = num_subbuf * subbuf_size;
401 chanb->subbuf_size = subbuf_size;
402 chanb->buf_size_order = get_count_order(chanb->buf_size);
403 chanb->subbuf_size_order = get_count_order(subbuf_size);
404 chanb->num_subbuf_order = get_count_order(num_subbuf);
405 chanb->extra_reader_sb =
406 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
407 chanb->num_subbuf = num_subbuf;
408 strlcpy(chanb->name, name, NAME_MAX);
409 memcpy(&chanb->config, config, sizeof(chanb->config));
410
411 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
412 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
413 return -ENOMEM;
414 }
415
416 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
417 /* Allocating the buffer per-cpu structures */
418 chanb->buf = alloc_percpu(struct lib_ring_buffer);
419 if (!chanb->buf)
420 goto free_cpumask;
421
422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
423 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
424 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
425 &chanb->cpuhp_prepare.node);
426 if (ret)
427 goto free_bufs;
428 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
429
430 {
431 /*
432 * In case of non-hotplug cpu, if the ring-buffer is allocated
433 * in early initcall, it will not be notified of secondary cpus.
434 * In that off case, we need to allocate for all possible cpus.
435 */
436 #ifdef CONFIG_HOTPLUG_CPU
437 /*
438 * buf->backend.allocated test takes care of concurrent CPU
439 * hotplug.
440 * Priority higher than frontend, so we create the ring buffer
441 * before we start the timer.
442 */
443 chanb->cpu_hp_notifier.notifier_call =
444 lib_ring_buffer_cpu_hp_callback;
445 chanb->cpu_hp_notifier.priority = 5;
446 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
447
448 get_online_cpus();
449 for_each_online_cpu(i) {
450 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
451 chanb, i);
452 if (ret)
453 goto free_bufs; /* cpu hotplug locked */
454 }
455 put_online_cpus();
456 #else
457 for_each_possible_cpu(i) {
458 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
459 chanb, i);
460 if (ret)
461 goto free_bufs;
462 }
463 #endif
464 }
465 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
466 } else {
467 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
468 if (!chanb->buf)
469 goto free_cpumask;
470 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
471 if (ret)
472 goto free_bufs;
473 }
474 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
475
476 return 0;
477
478 free_bufs:
479 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
480 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
481 /*
482 * Teardown of lttng_rb_hp_prepare instance
483 * on "add" error is handled within cpu hotplug,
484 * no teardown to do from the caller.
485 */
486 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
487 #ifdef CONFIG_HOTPLUG_CPU
488 put_online_cpus();
489 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
490 #endif
491 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
492 for_each_possible_cpu(i) {
493 struct lib_ring_buffer *buf =
494 per_cpu_ptr(chanb->buf, i);
495
496 if (!buf->backend.allocated)
497 continue;
498 lib_ring_buffer_free(buf);
499 }
500 free_percpu(chanb->buf);
501 } else
502 kfree(chanb->buf);
503 free_cpumask:
504 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
505 free_cpumask_var(chanb->cpumask);
506 return -ENOMEM;
507 }
508
509 /**
510 * channel_backend_unregister_notifiers - unregister notifiers
511 * @chan: the channel
512 *
513 * Holds CPU hotplug.
514 */
515 void channel_backend_unregister_notifiers(struct channel_backend *chanb)
516 {
517 const struct lib_ring_buffer_config *config = &chanb->config;
518
519 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
520 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
521 int ret;
522
523 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
524 &chanb->cpuhp_prepare.node);
525 WARN_ON(ret);
526 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
527 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
528 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
529 }
530 }
531
532 /**
533 * channel_backend_free - destroy the channel
534 * @chan: the channel
535 *
536 * Destroy all channel buffers and frees the channel.
537 */
538 void channel_backend_free(struct channel_backend *chanb)
539 {
540 const struct lib_ring_buffer_config *config = &chanb->config;
541 unsigned int i;
542
543 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
544 for_each_possible_cpu(i) {
545 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
546
547 if (!buf->backend.allocated)
548 continue;
549 lib_ring_buffer_free(buf);
550 }
551 free_cpumask_var(chanb->cpumask);
552 free_percpu(chanb->buf);
553 } else {
554 struct lib_ring_buffer *buf = chanb->buf;
555
556 CHAN_WARN_ON(chanb, !buf->backend.allocated);
557 lib_ring_buffer_free(buf);
558 kfree(buf);
559 }
560 }
561
562 /**
563 * lib_ring_buffer_write - write data to a ring_buffer buffer.
564 * @bufb : buffer backend
565 * @offset : offset within the buffer
566 * @src : source address
567 * @len : length to write
568 * @pagecpy : page size copied so far
569 */
570 void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
571 const void *src, size_t len, size_t pagecpy)
572 {
573 struct channel_backend *chanb = &bufb->chan->backend;
574 const struct lib_ring_buffer_config *config = &chanb->config;
575 size_t sbidx, index;
576 struct lib_ring_buffer_backend_pages *rpages;
577 unsigned long sb_bindex, id;
578
579 do {
580 len -= pagecpy;
581 src += pagecpy;
582 offset += pagecpy;
583 sbidx = offset >> chanb->subbuf_size_order;
584 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
585
586 /*
587 * Underlying layer should never ask for writes across
588 * subbuffers.
589 */
590 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
591
592 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
593 id = bufb->buf_wsb[sbidx].id;
594 sb_bindex = subbuffer_id_get_index(config, id);
595 rpages = bufb->array[sb_bindex];
596 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
597 && subbuffer_id_is_noref(config, id));
598 lib_ring_buffer_do_copy(config,
599 rpages->p[index].virt
600 + (offset & ~PAGE_MASK),
601 src, pagecpy);
602 } while (unlikely(len != pagecpy));
603 }
604 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
605
606
607 /**
608 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
609 * @bufb : buffer backend
610 * @offset : offset within the buffer
611 * @c : the byte to write
612 * @len : length to write
613 * @pagecpy : page size copied so far
614 */
615 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
616 size_t offset,
617 int c, size_t len, size_t pagecpy)
618 {
619 struct channel_backend *chanb = &bufb->chan->backend;
620 const struct lib_ring_buffer_config *config = &chanb->config;
621 size_t sbidx, index;
622 struct lib_ring_buffer_backend_pages *rpages;
623 unsigned long sb_bindex, id;
624
625 do {
626 len -= pagecpy;
627 offset += pagecpy;
628 sbidx = offset >> chanb->subbuf_size_order;
629 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
630
631 /*
632 * Underlying layer should never ask for writes across
633 * subbuffers.
634 */
635 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
636
637 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
638 id = bufb->buf_wsb[sbidx].id;
639 sb_bindex = subbuffer_id_get_index(config, id);
640 rpages = bufb->array[sb_bindex];
641 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
642 && subbuffer_id_is_noref(config, id));
643 lib_ring_buffer_do_memset(rpages->p[index].virt
644 + (offset & ~PAGE_MASK),
645 c, pagecpy);
646 } while (unlikely(len != pagecpy));
647 }
648 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
649
650 /**
651 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
652 * @bufb : buffer backend
653 * @offset : offset within the buffer
654 * @src : source address
655 * @len : length to write
656 * @pagecpy : page size copied so far
657 * @pad : character to use for padding
658 */
659 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
660 size_t offset, const char *src, size_t len,
661 size_t pagecpy, int pad)
662 {
663 struct channel_backend *chanb = &bufb->chan->backend;
664 const struct lib_ring_buffer_config *config = &chanb->config;
665 size_t sbidx, index;
666 struct lib_ring_buffer_backend_pages *rpages;
667 unsigned long sb_bindex, id;
668 int src_terminated = 0;
669
670 CHAN_WARN_ON(chanb, !len);
671 offset += pagecpy;
672 do {
673 len -= pagecpy;
674 if (!src_terminated)
675 src += pagecpy;
676 sbidx = offset >> chanb->subbuf_size_order;
677 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
678
679 /*
680 * Underlying layer should never ask for writes across
681 * subbuffers.
682 */
683 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
684
685 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
686 id = bufb->buf_wsb[sbidx].id;
687 sb_bindex = subbuffer_id_get_index(config, id);
688 rpages = bufb->array[sb_bindex];
689 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
690 && subbuffer_id_is_noref(config, id));
691
692 if (likely(!src_terminated)) {
693 size_t count, to_copy;
694
695 to_copy = pagecpy;
696 if (pagecpy == len)
697 to_copy--; /* Final '\0' */
698 count = lib_ring_buffer_do_strcpy(config,
699 rpages->p[index].virt
700 + (offset & ~PAGE_MASK),
701 src, to_copy);
702 offset += count;
703 /* Padding */
704 if (unlikely(count < to_copy)) {
705 size_t pad_len = to_copy - count;
706
707 /* Next pages will have padding */
708 src_terminated = 1;
709 lib_ring_buffer_do_memset(rpages->p[index].virt
710 + (offset & ~PAGE_MASK),
711 pad, pad_len);
712 offset += pad_len;
713 }
714 } else {
715 size_t pad_len;
716
717 pad_len = pagecpy;
718 if (pagecpy == len)
719 pad_len--; /* Final '\0' */
720 lib_ring_buffer_do_memset(rpages->p[index].virt
721 + (offset & ~PAGE_MASK),
722 pad, pad_len);
723 offset += pad_len;
724 }
725 } while (unlikely(len != pagecpy));
726 /* Ending '\0' */
727 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
728 '\0', 1);
729 }
730 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
731
732 /**
733 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
734 * @bufb : buffer backend
735 * @offset : offset within the buffer
736 * @src : source address
737 * @len : length to write
738 * @pagecpy : page size copied so far
739 *
740 * This function deals with userspace pointers, it should never be called
741 * directly without having the src pointer checked with access_ok()
742 * previously.
743 */
744 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
745 size_t offset,
746 const void __user *src, size_t len,
747 size_t pagecpy)
748 {
749 struct channel_backend *chanb = &bufb->chan->backend;
750 const struct lib_ring_buffer_config *config = &chanb->config;
751 size_t sbidx, index;
752 struct lib_ring_buffer_backend_pages *rpages;
753 unsigned long sb_bindex, id;
754 int ret;
755
756 do {
757 len -= pagecpy;
758 src += pagecpy;
759 offset += pagecpy;
760 sbidx = offset >> chanb->subbuf_size_order;
761 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
762
763 /*
764 * Underlying layer should never ask for writes across
765 * subbuffers.
766 */
767 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
768
769 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
770 id = bufb->buf_wsb[sbidx].id;
771 sb_bindex = subbuffer_id_get_index(config, id);
772 rpages = bufb->array[sb_bindex];
773 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
774 && subbuffer_id_is_noref(config, id));
775 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
776 + (offset & ~PAGE_MASK),
777 src, pagecpy) != 0;
778 if (ret > 0) {
779 /* Copy failed. */
780 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
781 break; /* stop copy */
782 }
783 } while (unlikely(len != pagecpy));
784 }
785 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
786
787 /**
788 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
789 * @bufb : buffer backend
790 * @offset : offset within the buffer
791 * @src : source address
792 * @len : length to write
793 * @pagecpy : page size copied so far
794 * @pad : character to use for padding
795 *
796 * This function deals with userspace pointers, it should never be called
797 * directly without having the src pointer checked with access_ok()
798 * previously.
799 */
800 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
801 size_t offset, const char __user *src, size_t len,
802 size_t pagecpy, int pad)
803 {
804 struct channel_backend *chanb = &bufb->chan->backend;
805 const struct lib_ring_buffer_config *config = &chanb->config;
806 size_t sbidx, index;
807 struct lib_ring_buffer_backend_pages *rpages;
808 unsigned long sb_bindex, id;
809 int src_terminated = 0;
810
811 offset += pagecpy;
812 do {
813 len -= pagecpy;
814 if (!src_terminated)
815 src += pagecpy;
816 sbidx = offset >> chanb->subbuf_size_order;
817 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
818
819 /*
820 * Underlying layer should never ask for writes across
821 * subbuffers.
822 */
823 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
824
825 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
826 id = bufb->buf_wsb[sbidx].id;
827 sb_bindex = subbuffer_id_get_index(config, id);
828 rpages = bufb->array[sb_bindex];
829 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
830 && subbuffer_id_is_noref(config, id));
831
832 if (likely(!src_terminated)) {
833 size_t count, to_copy;
834
835 to_copy = pagecpy;
836 if (pagecpy == len)
837 to_copy--; /* Final '\0' */
838 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
839 rpages->p[index].virt
840 + (offset & ~PAGE_MASK),
841 src, to_copy);
842 offset += count;
843 /* Padding */
844 if (unlikely(count < to_copy)) {
845 size_t pad_len = to_copy - count;
846
847 /* Next pages will have padding */
848 src_terminated = 1;
849 lib_ring_buffer_do_memset(rpages->p[index].virt
850 + (offset & ~PAGE_MASK),
851 pad, pad_len);
852 offset += pad_len;
853 }
854 } else {
855 size_t pad_len;
856
857 pad_len = pagecpy;
858 if (pagecpy == len)
859 pad_len--; /* Final '\0' */
860 lib_ring_buffer_do_memset(rpages->p[index].virt
861 + (offset & ~PAGE_MASK),
862 pad, pad_len);
863 offset += pad_len;
864 }
865 } while (unlikely(len != pagecpy));
866 /* Ending '\0' */
867 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
868 '\0', 1);
869 }
870 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
871
872 /**
873 * lib_ring_buffer_read - read data from ring_buffer_buffer.
874 * @bufb : buffer backend
875 * @offset : offset within the buffer
876 * @dest : destination address
877 * @len : length to copy to destination
878 *
879 * Should be protected by get_subbuf/put_subbuf.
880 * Returns the length copied.
881 */
882 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
883 void *dest, size_t len)
884 {
885 struct channel_backend *chanb = &bufb->chan->backend;
886 const struct lib_ring_buffer_config *config = &chanb->config;
887 size_t index, pagecpy, orig_len;
888 struct lib_ring_buffer_backend_pages *rpages;
889 unsigned long sb_bindex, id;
890
891 orig_len = len;
892 offset &= chanb->buf_size - 1;
893 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
894 if (unlikely(!len))
895 return 0;
896 for (;;) {
897 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
898 id = bufb->buf_rsb.id;
899 sb_bindex = subbuffer_id_get_index(config, id);
900 rpages = bufb->array[sb_bindex];
901 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
902 && subbuffer_id_is_noref(config, id));
903 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
904 pagecpy);
905 len -= pagecpy;
906 if (likely(!len))
907 break;
908 dest += pagecpy;
909 offset += pagecpy;
910 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
911 /*
912 * Underlying layer should never ask for reads across
913 * subbuffers.
914 */
915 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
916 }
917 return orig_len;
918 }
919 EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
920
921 /**
922 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
923 * @bufb : buffer backend
924 * @offset : offset within the buffer
925 * @dest : destination userspace address
926 * @len : length to copy to destination
927 *
928 * Should be protected by get_subbuf/put_subbuf.
929 * access_ok() must have been performed on dest addresses prior to call this
930 * function.
931 * Returns -EFAULT on error, 0 if ok.
932 */
933 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
934 size_t offset, void __user *dest, size_t len)
935 {
936 struct channel_backend *chanb = &bufb->chan->backend;
937 const struct lib_ring_buffer_config *config = &chanb->config;
938 size_t index;
939 ssize_t pagecpy;
940 struct lib_ring_buffer_backend_pages *rpages;
941 unsigned long sb_bindex, id;
942
943 offset &= chanb->buf_size - 1;
944 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
945 if (unlikely(!len))
946 return 0;
947 for (;;) {
948 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
949 id = bufb->buf_rsb.id;
950 sb_bindex = subbuffer_id_get_index(config, id);
951 rpages = bufb->array[sb_bindex];
952 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
953 && subbuffer_id_is_noref(config, id));
954 if (__copy_to_user(dest,
955 rpages->p[index].virt + (offset & ~PAGE_MASK),
956 pagecpy))
957 return -EFAULT;
958 len -= pagecpy;
959 if (likely(!len))
960 break;
961 dest += pagecpy;
962 offset += pagecpy;
963 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
964 /*
965 * Underlying layer should never ask for reads across
966 * subbuffers.
967 */
968 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
969 }
970 return 0;
971 }
972 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
973
974 /**
975 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
976 * @bufb : buffer backend
977 * @offset : offset within the buffer
978 * @dest : destination address
979 * @len : destination's length
980 *
981 * Return string's length, or -EINVAL on error.
982 * Should be protected by get_subbuf/put_subbuf.
983 * Destination length should be at least 1 to hold '\0'.
984 */
985 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
986 void *dest, size_t len)
987 {
988 struct channel_backend *chanb = &bufb->chan->backend;
989 const struct lib_ring_buffer_config *config = &chanb->config;
990 size_t index;
991 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
992 char *str;
993 struct lib_ring_buffer_backend_pages *rpages;
994 unsigned long sb_bindex, id;
995
996 offset &= chanb->buf_size - 1;
997 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
998 orig_offset = offset;
999 if (unlikely(!len))
1000 return -EINVAL;
1001 for (;;) {
1002 id = bufb->buf_rsb.id;
1003 sb_bindex = subbuffer_id_get_index(config, id);
1004 rpages = bufb->array[sb_bindex];
1005 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1006 && subbuffer_id_is_noref(config, id));
1007 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1008 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1009 strpagelen = strnlen(str, pagelen);
1010 if (len) {
1011 pagecpy = min_t(size_t, len, strpagelen);
1012 if (dest) {
1013 memcpy(dest, str, pagecpy);
1014 dest += pagecpy;
1015 }
1016 len -= pagecpy;
1017 }
1018 offset += strpagelen;
1019 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1020 if (strpagelen < pagelen)
1021 break;
1022 /*
1023 * Underlying layer should never ask for reads across
1024 * subbuffers.
1025 */
1026 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1027 }
1028 if (dest && len)
1029 ((char *)dest)[0] = 0;
1030 return offset - orig_offset;
1031 }
1032 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1033
1034 /**
1035 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1036 * @bufb : buffer backend
1037 * @offset : offset within the buffer
1038 * @virt : pointer to page address (output)
1039 *
1040 * Should be protected by get_subbuf/put_subbuf.
1041 * Returns the pointer to the page frame number unsigned long.
1042 */
1043 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
1044 size_t offset, void ***virt)
1045 {
1046 size_t index;
1047 struct lib_ring_buffer_backend_pages *rpages;
1048 struct channel_backend *chanb = &bufb->chan->backend;
1049 const struct lib_ring_buffer_config *config = &chanb->config;
1050 unsigned long sb_bindex, id;
1051
1052 offset &= chanb->buf_size - 1;
1053 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1054 id = bufb->buf_rsb.id;
1055 sb_bindex = subbuffer_id_get_index(config, id);
1056 rpages = bufb->array[sb_bindex];
1057 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1058 && subbuffer_id_is_noref(config, id));
1059 *virt = &rpages->p[index].virt;
1060 return &rpages->p[index].pfn;
1061 }
1062 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
1063
1064 /**
1065 * lib_ring_buffer_read_offset_address - get address of a buffer location
1066 * @bufb : buffer backend
1067 * @offset : offset within the buffer.
1068 *
1069 * Return the address where a given offset is located (for read).
1070 * Should be used to get the current subbuffer header pointer. Given we know
1071 * it's never on a page boundary, it's safe to read/write directly
1072 * from/to this address, as long as the read/write is never bigger than a
1073 * page size.
1074 */
1075 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
1076 size_t offset)
1077 {
1078 size_t index;
1079 struct lib_ring_buffer_backend_pages *rpages;
1080 struct channel_backend *chanb = &bufb->chan->backend;
1081 const struct lib_ring_buffer_config *config = &chanb->config;
1082 unsigned long sb_bindex, id;
1083
1084 offset &= chanb->buf_size - 1;
1085 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1086 id = bufb->buf_rsb.id;
1087 sb_bindex = subbuffer_id_get_index(config, id);
1088 rpages = bufb->array[sb_bindex];
1089 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1090 && subbuffer_id_is_noref(config, id));
1091 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1092 }
1093 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1094
1095 /**
1096 * lib_ring_buffer_offset_address - get address of a location within the buffer
1097 * @bufb : buffer backend
1098 * @offset : offset within the buffer.
1099 *
1100 * Return the address where a given offset is located.
1101 * Should be used to get the current subbuffer header pointer. Given we know
1102 * it's always at the beginning of a page, it's safe to write directly to this
1103 * address, as long as the write is never bigger than a page size.
1104 */
1105 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1106 size_t offset)
1107 {
1108 size_t sbidx, index;
1109 struct lib_ring_buffer_backend_pages *rpages;
1110 struct channel_backend *chanb = &bufb->chan->backend;
1111 const struct lib_ring_buffer_config *config = &chanb->config;
1112 unsigned long sb_bindex, id;
1113
1114 offset &= chanb->buf_size - 1;
1115 sbidx = offset >> chanb->subbuf_size_order;
1116 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1117 id = bufb->buf_wsb[sbidx].id;
1118 sb_bindex = subbuffer_id_get_index(config, id);
1119 rpages = bufb->array[sb_bindex];
1120 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1121 && subbuffer_id_is_noref(config, id));
1122 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1123 }
1124 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.086389 seconds and 4 git commands to generate.