wrapper: remove mm.h wrapper
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
9f36eaed 2 *
f3bc08c5
MD
3 * ring_buffer_backend.c
4 *
886d51a3 5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5
MD
6 */
7
f3bc08c5
MD
8#include <linux/stddef.h>
9#include <linux/module.h>
10#include <linux/string.h>
11#include <linux/bitops.h>
12#include <linux/delay.h>
13#include <linux/errno.h>
14#include <linux/slab.h>
15#include <linux/cpu.h>
16#include <linux/mm.h>
df388b78 17#include <linux/vmalloc.h>
c319299a 18#include <linux/oom.h>
f3bc08c5 19
c075712b
MD
20#include <wrapper/ringbuffer/config.h>
21#include <wrapper/ringbuffer/backend.h>
22#include <wrapper/ringbuffer/frontend.h>
f3bc08c5
MD
23
24/**
25 * lib_ring_buffer_backend_allocate - allocate a channel buffer
26 * @config: ring buffer instance configuration
27 * @buf: the buffer struct
28 * @size: total size of the buffer
29 * @num_subbuf: number of subbuffers
30 * @extra_reader_sb: need extra subbuffer for reader
31 */
32static
33int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
34 struct lib_ring_buffer_backend *bufb,
35 size_t size, size_t num_subbuf,
36 int extra_reader_sb)
37{
38 struct channel_backend *chanb = &bufb->chan->backend;
39 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
40 unsigned long subbuf_size, mmap_offset = 0;
41 unsigned long num_subbuf_alloc;
42 struct page **pages;
f3bc08c5
MD
43 unsigned long i;
44
45 num_pages = size >> PAGE_SHIFT;
1f0ab1eb
FD
46
47 /*
7502f47a
FD
48 * Verify that there is enough free pages available on the system for
49 * the current allocation request.
50 * wrapper_check_enough_free_pages uses si_mem_available() if available
51 * and returns if there should be enough free pages based on the
52 * current estimate.
1f0ab1eb 53 */
c319299a 54 if (num_pages >= si_mem_available())
1f0ab1eb
FD
55 goto not_enough_pages;
56
57 /*
58 * Set the current user thread as the first target of the OOM killer.
59 * If the estimate received by si_mem_available() was off, and we do
60 * end up running out of memory because of this buffer allocation, we
61 * want to kill the offending app first.
62 */
c319299a 63 set_current_oom_origin();
1f0ab1eb 64
f3bc08c5
MD
65 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
66 subbuf_size = chanb->subbuf_size;
67 num_subbuf_alloc = num_subbuf;
68
69 if (extra_reader_sb) {
70 num_pages += num_pages_per_subbuf; /* Add pages for reader */
71 num_subbuf_alloc++;
72 }
73
df388b78 74 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 75 1 << INTERNODE_CACHE_SHIFT),
df388b78 76 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
77 if (unlikely(!pages))
78 goto pages_error;
79
c50bdba2
MD
80 bufb->array = kvmalloc_node(ALIGN(sizeof(*bufb->array)
81 * num_subbuf_alloc,
82 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
83 GFP_KERNEL | __GFP_NOWARN,
84 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
85 if (unlikely(!bufb->array))
86 goto array_error;
f3bc08c5
MD
87 for (i = 0; i < num_pages; i++) {
88 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 89 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
90 if (unlikely(!pages[i]))
91 goto depopulate;
f3bc08c5
MD
92 }
93 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
94
95 /* Allocate backend pages array elements */
96 for (i = 0; i < num_subbuf_alloc; i++) {
97 bufb->array[i] =
c50bdba2 98 kvzalloc_node(ALIGN(
f3bc08c5
MD
99 sizeof(struct lib_ring_buffer_backend_pages) +
100 sizeof(struct lib_ring_buffer_backend_page)
101 * num_pages_per_subbuf,
102 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
103 GFP_KERNEL | __GFP_NOWARN,
104 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
105 if (!bufb->array[i])
106 goto free_array;
107 }
108
109 /* Allocate write-side subbuffer table */
c50bdba2 110 bufb->buf_wsb = kvzalloc_node(ALIGN(
f3bc08c5
MD
111 sizeof(struct lib_ring_buffer_backend_subbuffer)
112 * num_subbuf,
113 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
114 GFP_KERNEL | __GFP_NOWARN,
115 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
116 if (unlikely(!bufb->buf_wsb))
117 goto free_array;
118
119 for (i = 0; i < num_subbuf; i++)
120 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
121
122 /* Assign read-side subbuffer table */
123 if (extra_reader_sb)
124 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
125 num_subbuf_alloc - 1);
126 else
127 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
128
5b3cf4f9 129 /* Allocate subbuffer packet counter table */
c50bdba2 130 bufb->buf_cnt = kvzalloc_node(ALIGN(
5b3cf4f9
JD
131 sizeof(struct lib_ring_buffer_backend_counts)
132 * num_subbuf,
133 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
134 GFP_KERNEL | __GFP_NOWARN,
135 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
136 if (unlikely(!bufb->buf_cnt))
137 goto free_wsb;
138
f3bc08c5
MD
139 /* Assign pages to page index */
140 for (i = 0; i < num_subbuf_alloc; i++) {
141 for (j = 0; j < num_pages_per_subbuf; j++) {
142 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
143 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
144 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
145 page_idx++;
146 }
147 if (config->output == RING_BUFFER_MMAP) {
148 bufb->array[i]->mmap_offset = mmap_offset;
149 mmap_offset += subbuf_size;
150 }
151 }
152
c319299a 153 clear_current_oom_origin();
df388b78 154 vfree(pages);
f3bc08c5
MD
155 return 0;
156
5b3cf4f9 157free_wsb:
c50bdba2 158 kvfree(bufb->buf_wsb);
f3bc08c5
MD
159free_array:
160 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
c50bdba2 161 kvfree(bufb->array[i]);
f3bc08c5
MD
162depopulate:
163 /* Free all allocated pages */
164 for (i = 0; (i < num_pages && pages[i]); i++)
165 __free_page(pages[i]);
c50bdba2 166 kvfree(bufb->array);
f3bc08c5 167array_error:
df388b78 168 vfree(pages);
f3bc08c5 169pages_error:
c319299a 170 clear_current_oom_origin();
1f0ab1eb 171not_enough_pages:
f3bc08c5
MD
172 return -ENOMEM;
173}
174
175int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
176 struct channel_backend *chanb, int cpu)
177{
5a8fd222 178 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
179
180 bufb->chan = container_of(chanb, struct channel, backend);
181 bufb->cpu = cpu;
182
183 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
184 chanb->num_subbuf,
185 chanb->extra_reader_sb);
186}
187
188void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
189{
190 struct channel_backend *chanb = &bufb->chan->backend;
191 unsigned long i, j, num_subbuf_alloc;
192
193 num_subbuf_alloc = chanb->num_subbuf;
194 if (chanb->extra_reader_sb)
195 num_subbuf_alloc++;
196
c50bdba2
MD
197 kvfree(bufb->buf_wsb);
198 kvfree(bufb->buf_cnt);
f3bc08c5
MD
199 for (i = 0; i < num_subbuf_alloc; i++) {
200 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 201 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
c50bdba2 202 kvfree(bufb->array[i]);
f3bc08c5 203 }
c50bdba2 204 kvfree(bufb->array);
f3bc08c5
MD
205 bufb->allocated = 0;
206}
207
208void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
209{
210 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 211 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
212 unsigned long num_subbuf_alloc;
213 unsigned int i;
214
215 num_subbuf_alloc = chanb->num_subbuf;
216 if (chanb->extra_reader_sb)
217 num_subbuf_alloc++;
218
219 for (i = 0; i < chanb->num_subbuf; i++)
220 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
221 if (chanb->extra_reader_sb)
222 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
223 num_subbuf_alloc - 1);
224 else
225 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
226
227 for (i = 0; i < num_subbuf_alloc; i++) {
228 /* Don't reset mmap_offset */
229 v_set(config, &bufb->array[i]->records_commit, 0);
230 v_set(config, &bufb->array[i]->records_unread, 0);
231 bufb->array[i]->data_size = 0;
232 /* Don't reset backend page and virt addresses */
233 }
234 /* Don't reset num_pages_per_subbuf, cpu, allocated */
235 v_set(config, &bufb->records_read, 0);
236}
237
238/*
239 * The frontend is responsible for also calling ring_buffer_backend_reset for
240 * each buffer when calling channel_backend_reset.
241 */
242void channel_backend_reset(struct channel_backend *chanb)
243{
244 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 245 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
246
247 /*
248 * Don't reset buf_size, subbuf_size, subbuf_size_order,
249 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
250 * priv, notifiers, config, cpumask and name.
251 */
252 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
253}
254
1e367326
MD
255#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
256
257/*
258 * No need to implement a "dead" callback to do a buffer switch here,
259 * because it will happen when tracing is stopped, or will be done by
260 * switch timer CPU DEAD callback.
261 * We don't free buffers when CPU go away, because it would make trace
262 * data vanish, which is unwanted.
263 */
264int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
265 struct lttng_cpuhp_node *node)
266{
267 struct channel_backend *chanb = container_of(node,
268 struct channel_backend, cpuhp_prepare);
269 const struct lib_ring_buffer_config *config = &chanb->config;
270 struct lib_ring_buffer *buf;
271 int ret;
272
273 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
274
275 buf = per_cpu_ptr(chanb->buf, cpu);
276 ret = lib_ring_buffer_create(buf, chanb, cpu);
277 if (ret) {
278 printk(KERN_ERR
279 "ring_buffer_cpu_hp_callback: cpu %d "
280 "buffer creation failed\n", cpu);
281 return ret;
282 }
283 return 0;
284}
285EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
286
287#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
288
f3bc08c5 289#ifdef CONFIG_HOTPLUG_CPU
1e367326 290
f3bc08c5
MD
291/**
292 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
293 * @nb: notifier block
294 * @action: hotplug action to take
295 * @hcpu: CPU number
296 *
297 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
298 */
299static
e8f071d5 300int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
301 unsigned long action,
302 void *hcpu)
303{
304 unsigned int cpu = (unsigned long)hcpu;
305 struct channel_backend *chanb = container_of(nb, struct channel_backend,
306 cpu_hp_notifier);
5a8fd222 307 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
308 struct lib_ring_buffer *buf;
309 int ret;
310
311 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
312
313 switch (action) {
314 case CPU_UP_PREPARE:
315 case CPU_UP_PREPARE_FROZEN:
316 buf = per_cpu_ptr(chanb->buf, cpu);
317 ret = lib_ring_buffer_create(buf, chanb, cpu);
318 if (ret) {
319 printk(KERN_ERR
320 "ring_buffer_cpu_hp_callback: cpu %d "
321 "buffer creation failed\n", cpu);
322 return NOTIFY_BAD;
323 }
324 break;
325 case CPU_DEAD:
326 case CPU_DEAD_FROZEN:
327 /* No need to do a buffer switch here, because it will happen
328 * when tracing is stopped, or will be done by switch timer CPU
329 * DEAD callback. */
330 break;
331 }
332 return NOTIFY_OK;
333}
1e367326 334
f3bc08c5
MD
335#endif
336
1e367326
MD
337#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
338
f3bc08c5
MD
339/**
340 * channel_backend_init - initialize a channel backend
341 * @chanb: channel backend
342 * @name: channel name
343 * @config: client ring buffer configuration
344 * @priv: client private data
345 * @parent: dentry of parent directory, %NULL for root directory
346 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
347 * @num_subbuf: number of sub-buffers (power of 2)
348 *
349 * Returns channel pointer if successful, %NULL otherwise.
350 *
351 * Creates per-cpu channel buffers using the sizes and attributes
352 * specified. The created channel buffer files will be named
353 * name_0...name_N-1. File permissions will be %S_IRUSR.
354 *
355 * Called with CPU hotplug disabled.
356 */
357int channel_backend_init(struct channel_backend *chanb,
358 const char *name,
359 const struct lib_ring_buffer_config *config,
360 void *priv, size_t subbuf_size, size_t num_subbuf)
361{
362 struct channel *chan = container_of(chanb, struct channel, backend);
363 unsigned int i;
364 int ret;
365
366 if (!name)
367 return -EPERM;
368
f3bc08c5 369 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
370 if (subbuf_size < PAGE_SIZE)
371 return -EINVAL;
f3bc08c5
MD
372
373 /*
bbda3a00
MD
374 * Make sure the number of subbuffers and subbuffer size are
375 * power of 2 and nonzero.
f3bc08c5 376 */
bbda3a00 377 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 378 return -EINVAL;
bbda3a00 379 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 380 return -EINVAL;
5140d2b3
MD
381 /*
382 * Overwrite mode buffers require at least 2 subbuffers per
383 * buffer.
384 */
385 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
386 return -EINVAL;
f3bc08c5
MD
387
388 ret = subbuffer_id_check_index(config, num_subbuf);
389 if (ret)
390 return ret;
391
392 chanb->priv = priv;
393 chanb->buf_size = num_subbuf * subbuf_size;
394 chanb->subbuf_size = subbuf_size;
395 chanb->buf_size_order = get_count_order(chanb->buf_size);
396 chanb->subbuf_size_order = get_count_order(subbuf_size);
397 chanb->num_subbuf_order = get_count_order(num_subbuf);
398 chanb->extra_reader_sb =
399 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
400 chanb->num_subbuf = num_subbuf;
401 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 402 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
403
404 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
405 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
406 return -ENOMEM;
407 }
408
409 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
410 /* Allocating the buffer per-cpu structures */
411 chanb->buf = alloc_percpu(struct lib_ring_buffer);
412 if (!chanb->buf)
413 goto free_cpumask;
414
1e367326
MD
415#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
416 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
417 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
418 &chanb->cpuhp_prepare.node);
419 if (ret)
420 goto free_bufs;
421#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
422
423 {
424 /*
425 * In case of non-hotplug cpu, if the ring-buffer is allocated
426 * in early initcall, it will not be notified of secondary cpus.
427 * In that off case, we need to allocate for all possible cpus.
428 */
f3bc08c5 429#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
430 /*
431 * buf->backend.allocated test takes care of concurrent CPU
432 * hotplug.
433 * Priority higher than frontend, so we create the ring buffer
434 * before we start the timer.
435 */
436 chanb->cpu_hp_notifier.notifier_call =
437 lib_ring_buffer_cpu_hp_callback;
438 chanb->cpu_hp_notifier.priority = 5;
439 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
440
441 get_online_cpus();
442 for_each_online_cpu(i) {
443 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
444 chanb, i);
445 if (ret)
446 goto free_bufs; /* cpu hotplug locked */
447 }
448 put_online_cpus();
f3bc08c5 449#else
1e367326
MD
450 for_each_possible_cpu(i) {
451 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
452 chanb, i);
453 if (ret)
454 goto free_bufs;
455 }
f3bc08c5 456#endif
1e367326
MD
457 }
458#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5
MD
459 } else {
460 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
461 if (!chanb->buf)
462 goto free_cpumask;
463 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
464 if (ret)
465 goto free_bufs;
466 }
467 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
468
469 return 0;
470
471free_bufs:
472 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
1e367326 473#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
5f14d8ae
MD
474 /*
475 * Teardown of lttng_rb_hp_prepare instance
476 * on "add" error is handled within cpu hotplug,
477 * no teardown to do from the caller.
478 */
1e367326
MD
479#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
480#ifdef CONFIG_HOTPLUG_CPU
481 put_online_cpus();
482 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
483#endif
484#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 485 for_each_possible_cpu(i) {
1e367326
MD
486 struct lib_ring_buffer *buf =
487 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
488
489 if (!buf->backend.allocated)
490 continue;
491 lib_ring_buffer_free(buf);
492 }
f3bc08c5
MD
493 free_percpu(chanb->buf);
494 } else
495 kfree(chanb->buf);
496free_cpumask:
497 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
498 free_cpumask_var(chanb->cpumask);
499 return -ENOMEM;
500}
501
502/**
503 * channel_backend_unregister_notifiers - unregister notifiers
504 * @chan: the channel
505 *
506 * Holds CPU hotplug.
507 */
508void channel_backend_unregister_notifiers(struct channel_backend *chanb)
509{
5a8fd222 510 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 511
1e367326
MD
512 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
513#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
514 int ret;
515
516 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
517 &chanb->cpuhp_prepare.node);
518 WARN_ON(ret);
519#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 520 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
1e367326
MD
521#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
522 }
f3bc08c5
MD
523}
524
525/**
526 * channel_backend_free - destroy the channel
527 * @chan: the channel
528 *
529 * Destroy all channel buffers and frees the channel.
530 */
531void channel_backend_free(struct channel_backend *chanb)
532{
5a8fd222 533 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
534 unsigned int i;
535
536 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
537 for_each_possible_cpu(i) {
538 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
539
540 if (!buf->backend.allocated)
541 continue;
542 lib_ring_buffer_free(buf);
543 }
544 free_cpumask_var(chanb->cpumask);
545 free_percpu(chanb->buf);
546 } else {
547 struct lib_ring_buffer *buf = chanb->buf;
548
549 CHAN_WARN_ON(chanb, !buf->backend.allocated);
550 lib_ring_buffer_free(buf);
551 kfree(buf);
552 }
553}
554
555/**
556 * lib_ring_buffer_write - write data to a ring_buffer buffer.
557 * @bufb : buffer backend
558 * @offset : offset within the buffer
559 * @src : source address
560 * @len : length to write
561 * @pagecpy : page size copied so far
562 */
563void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 564 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
565{
566 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 567 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
568 size_t sbidx, index;
569 struct lib_ring_buffer_backend_pages *rpages;
570 unsigned long sb_bindex, id;
571
572 do {
573 len -= pagecpy;
574 src += pagecpy;
575 offset += pagecpy;
576 sbidx = offset >> chanb->subbuf_size_order;
577 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
578
579 /*
580 * Underlying layer should never ask for writes across
581 * subbuffers.
582 */
583 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
584
585 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
586 id = bufb->buf_wsb[sbidx].id;
587 sb_bindex = subbuffer_id_get_index(config, id);
588 rpages = bufb->array[sb_bindex];
589 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
590 && subbuffer_id_is_noref(config, id));
591 lib_ring_buffer_do_copy(config,
592 rpages->p[index].virt
593 + (offset & ~PAGE_MASK),
594 src, pagecpy);
595 } while (unlikely(len != pagecpy));
596}
597EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
598
4ea00e4f
JD
599
600/**
601 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
602 * @bufb : buffer backend
603 * @offset : offset within the buffer
604 * @c : the byte to write
605 * @len : length to write
606 * @pagecpy : page size copied so far
607 */
608void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
609 size_t offset,
bfe529f9 610 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
611{
612 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 613 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
614 size_t sbidx, index;
615 struct lib_ring_buffer_backend_pages *rpages;
616 unsigned long sb_bindex, id;
617
618 do {
619 len -= pagecpy;
620 offset += pagecpy;
621 sbidx = offset >> chanb->subbuf_size_order;
622 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
623
624 /*
625 * Underlying layer should never ask for writes across
626 * subbuffers.
627 */
628 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
629
630 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
631 id = bufb->buf_wsb[sbidx].id;
632 sb_bindex = subbuffer_id_get_index(config, id);
633 rpages = bufb->array[sb_bindex];
634 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
635 && subbuffer_id_is_noref(config, id));
636 lib_ring_buffer_do_memset(rpages->p[index].virt
637 + (offset & ~PAGE_MASK),
638 c, pagecpy);
639 } while (unlikely(len != pagecpy));
640}
641EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
642
16f78f3a
MD
643/**
644 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
645 * @bufb : buffer backend
646 * @offset : offset within the buffer
647 * @src : source address
648 * @len : length to write
649 * @pagecpy : page size copied so far
650 * @pad : character to use for padding
651 */
652void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
653 size_t offset, const char *src, size_t len,
654 size_t pagecpy, int pad)
655{
656 struct channel_backend *chanb = &bufb->chan->backend;
657 const struct lib_ring_buffer_config *config = &chanb->config;
658 size_t sbidx, index;
659 struct lib_ring_buffer_backend_pages *rpages;
660 unsigned long sb_bindex, id;
661 int src_terminated = 0;
662
663 CHAN_WARN_ON(chanb, !len);
664 offset += pagecpy;
665 do {
666 len -= pagecpy;
667 if (!src_terminated)
668 src += pagecpy;
669 sbidx = offset >> chanb->subbuf_size_order;
670 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
671
672 /*
673 * Underlying layer should never ask for writes across
674 * subbuffers.
675 */
676 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
677
678 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
679 id = bufb->buf_wsb[sbidx].id;
680 sb_bindex = subbuffer_id_get_index(config, id);
681 rpages = bufb->array[sb_bindex];
682 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
683 && subbuffer_id_is_noref(config, id));
684
685 if (likely(!src_terminated)) {
686 size_t count, to_copy;
687
688 to_copy = pagecpy;
689 if (pagecpy == len)
690 to_copy--; /* Final '\0' */
691 count = lib_ring_buffer_do_strcpy(config,
692 rpages->p[index].virt
693 + (offset & ~PAGE_MASK),
694 src, to_copy);
695 offset += count;
696 /* Padding */
697 if (unlikely(count < to_copy)) {
698 size_t pad_len = to_copy - count;
699
700 /* Next pages will have padding */
701 src_terminated = 1;
702 lib_ring_buffer_do_memset(rpages->p[index].virt
703 + (offset & ~PAGE_MASK),
704 pad, pad_len);
705 offset += pad_len;
706 }
707 } else {
708 size_t pad_len;
709
710 pad_len = pagecpy;
711 if (pagecpy == len)
712 pad_len--; /* Final '\0' */
713 lib_ring_buffer_do_memset(rpages->p[index].virt
714 + (offset & ~PAGE_MASK),
715 pad, pad_len);
716 offset += pad_len;
717 }
718 } while (unlikely(len != pagecpy));
719 /* Ending '\0' */
720 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
721 '\0', 1);
722}
723EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
724
725/**
7b8ea3a5 726 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
727 * @bufb : buffer backend
728 * @offset : offset within the buffer
729 * @src : source address
730 * @len : length to write
731 * @pagecpy : page size copied so far
732 *
733 * This function deals with userspace pointers, it should never be called
734 * directly without having the src pointer checked with access_ok()
735 * previously.
736 */
7b8ea3a5 737void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
738 size_t offset,
739 const void __user *src, size_t len,
bfe529f9 740 size_t pagecpy)
4ea00e4f
JD
741{
742 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 743 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
744 size_t sbidx, index;
745 struct lib_ring_buffer_backend_pages *rpages;
746 unsigned long sb_bindex, id;
747 int ret;
748
749 do {
750 len -= pagecpy;
751 src += pagecpy;
752 offset += pagecpy;
753 sbidx = offset >> chanb->subbuf_size_order;
754 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
755
756 /*
757 * Underlying layer should never ask for writes across
758 * subbuffers.
759 */
760 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
761
762 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
763 id = bufb->buf_wsb[sbidx].id;
764 sb_bindex = subbuffer_id_get_index(config, id);
765 rpages = bufb->array[sb_bindex];
766 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
767 && subbuffer_id_is_noref(config, id));
7b8ea3a5 768 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
769 + (offset & ~PAGE_MASK),
770 src, pagecpy) != 0;
771 if (ret > 0) {
d87a9f03 772 /* Copy failed. */
4ea00e4f
JD
773 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
774 break; /* stop copy */
775 }
776 } while (unlikely(len != pagecpy));
777}
7b8ea3a5 778EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 779
16f78f3a
MD
780/**
781 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
782 * @bufb : buffer backend
783 * @offset : offset within the buffer
784 * @src : source address
785 * @len : length to write
786 * @pagecpy : page size copied so far
787 * @pad : character to use for padding
788 *
789 * This function deals with userspace pointers, it should never be called
790 * directly without having the src pointer checked with access_ok()
791 * previously.
792 */
793void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
794 size_t offset, const char __user *src, size_t len,
795 size_t pagecpy, int pad)
796{
797 struct channel_backend *chanb = &bufb->chan->backend;
798 const struct lib_ring_buffer_config *config = &chanb->config;
799 size_t sbidx, index;
800 struct lib_ring_buffer_backend_pages *rpages;
801 unsigned long sb_bindex, id;
802 int src_terminated = 0;
803
804 offset += pagecpy;
805 do {
806 len -= pagecpy;
807 if (!src_terminated)
808 src += pagecpy;
809 sbidx = offset >> chanb->subbuf_size_order;
810 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
811
812 /*
813 * Underlying layer should never ask for writes across
814 * subbuffers.
815 */
816 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
817
818 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
819 id = bufb->buf_wsb[sbidx].id;
820 sb_bindex = subbuffer_id_get_index(config, id);
821 rpages = bufb->array[sb_bindex];
822 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
823 && subbuffer_id_is_noref(config, id));
824
825 if (likely(!src_terminated)) {
826 size_t count, to_copy;
827
828 to_copy = pagecpy;
829 if (pagecpy == len)
830 to_copy--; /* Final '\0' */
831 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
832 rpages->p[index].virt
833 + (offset & ~PAGE_MASK),
834 src, to_copy);
835 offset += count;
836 /* Padding */
837 if (unlikely(count < to_copy)) {
838 size_t pad_len = to_copy - count;
839
840 /* Next pages will have padding */
841 src_terminated = 1;
842 lib_ring_buffer_do_memset(rpages->p[index].virt
843 + (offset & ~PAGE_MASK),
844 pad, pad_len);
845 offset += pad_len;
846 }
847 } else {
848 size_t pad_len;
849
850 pad_len = pagecpy;
851 if (pagecpy == len)
852 pad_len--; /* Final '\0' */
853 lib_ring_buffer_do_memset(rpages->p[index].virt
854 + (offset & ~PAGE_MASK),
855 pad, pad_len);
856 offset += pad_len;
857 }
858 } while (unlikely(len != pagecpy));
859 /* Ending '\0' */
860 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
861 '\0', 1);
862}
863EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
864
f3bc08c5
MD
865/**
866 * lib_ring_buffer_read - read data from ring_buffer_buffer.
867 * @bufb : buffer backend
868 * @offset : offset within the buffer
869 * @dest : destination address
870 * @len : length to copy to destination
871 *
872 * Should be protected by get_subbuf/put_subbuf.
873 * Returns the length copied.
874 */
875size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
876 void *dest, size_t len)
877{
878 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 879 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 880 size_t index, pagecpy, orig_len;
f3bc08c5
MD
881 struct lib_ring_buffer_backend_pages *rpages;
882 unsigned long sb_bindex, id;
883
884 orig_len = len;
885 offset &= chanb->buf_size - 1;
886 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
887 if (unlikely(!len))
888 return 0;
889 for (;;) {
890 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
891 id = bufb->buf_rsb.id;
892 sb_bindex = subbuffer_id_get_index(config, id);
893 rpages = bufb->array[sb_bindex];
894 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
895 && subbuffer_id_is_noref(config, id));
896 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
897 pagecpy);
898 len -= pagecpy;
899 if (likely(!len))
900 break;
901 dest += pagecpy;
902 offset += pagecpy;
903 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
904 /*
905 * Underlying layer should never ask for reads across
906 * subbuffers.
907 */
908 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
909 }
910 return orig_len;
911}
912EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
913
914/**
915 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
916 * @bufb : buffer backend
917 * @offset : offset within the buffer
918 * @dest : destination userspace address
919 * @len : length to copy to destination
920 *
921 * Should be protected by get_subbuf/put_subbuf.
922 * access_ok() must have been performed on dest addresses prior to call this
923 * function.
924 * Returns -EFAULT on error, 0 if ok.
925 */
926int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
927 size_t offset, void __user *dest, size_t len)
928{
929 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 930 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 931 size_t index;
88dfd899 932 ssize_t pagecpy;
f3bc08c5
MD
933 struct lib_ring_buffer_backend_pages *rpages;
934 unsigned long sb_bindex, id;
935
f3bc08c5
MD
936 offset &= chanb->buf_size - 1;
937 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
938 if (unlikely(!len))
939 return 0;
940 for (;;) {
941 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
942 id = bufb->buf_rsb.id;
943 sb_bindex = subbuffer_id_get_index(config, id);
944 rpages = bufb->array[sb_bindex];
945 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
946 && subbuffer_id_is_noref(config, id));
947 if (__copy_to_user(dest,
948 rpages->p[index].virt + (offset & ~PAGE_MASK),
949 pagecpy))
950 return -EFAULT;
951 len -= pagecpy;
952 if (likely(!len))
953 break;
954 dest += pagecpy;
955 offset += pagecpy;
956 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
957 /*
958 * Underlying layer should never ask for reads across
959 * subbuffers.
960 */
961 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
962 }
963 return 0;
964}
965EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
966
967/**
968 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
969 * @bufb : buffer backend
970 * @offset : offset within the buffer
971 * @dest : destination address
972 * @len : destination's length
973 *
61eb4c39 974 * Return string's length, or -EINVAL on error.
f3bc08c5 975 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 976 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
977 */
978int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
979 void *dest, size_t len)
980{
981 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 982 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
983 size_t index;
984 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
985 char *str;
986 struct lib_ring_buffer_backend_pages *rpages;
987 unsigned long sb_bindex, id;
988
989 offset &= chanb->buf_size - 1;
990 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
991 orig_offset = offset;
61eb4c39
MD
992 if (unlikely(!len))
993 return -EINVAL;
f3bc08c5
MD
994 for (;;) {
995 id = bufb->buf_rsb.id;
996 sb_bindex = subbuffer_id_get_index(config, id);
997 rpages = bufb->array[sb_bindex];
998 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
999 && subbuffer_id_is_noref(config, id));
1000 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1001 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1002 strpagelen = strnlen(str, pagelen);
1003 if (len) {
1004 pagecpy = min_t(size_t, len, strpagelen);
1005 if (dest) {
1006 memcpy(dest, str, pagecpy);
1007 dest += pagecpy;
1008 }
1009 len -= pagecpy;
1010 }
1011 offset += strpagelen;
1012 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1013 if (strpagelen < pagelen)
1014 break;
1015 /*
1016 * Underlying layer should never ask for reads across
1017 * subbuffers.
1018 */
1019 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1020 }
1021 if (dest && len)
1022 ((char *)dest)[0] = 0;
1023 return offset - orig_offset;
1024}
1025EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1026
1027/**
0112cb7b 1028 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1029 * @bufb : buffer backend
1030 * @offset : offset within the buffer
1031 * @virt : pointer to page address (output)
1032 *
1033 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1034 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1035 */
0112cb7b 1036unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
f3bc08c5
MD
1037 size_t offset, void ***virt)
1038{
1039 size_t index;
1040 struct lib_ring_buffer_backend_pages *rpages;
1041 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1042 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1043 unsigned long sb_bindex, id;
1044
1045 offset &= chanb->buf_size - 1;
1046 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1047 id = bufb->buf_rsb.id;
1048 sb_bindex = subbuffer_id_get_index(config, id);
1049 rpages = bufb->array[sb_bindex];
1050 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1051 && subbuffer_id_is_noref(config, id));
1052 *virt = &rpages->p[index].virt;
0112cb7b 1053 return &rpages->p[index].pfn;
f3bc08c5 1054}
0112cb7b 1055EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1056
1057/**
1058 * lib_ring_buffer_read_offset_address - get address of a buffer location
1059 * @bufb : buffer backend
1060 * @offset : offset within the buffer.
1061 *
1062 * Return the address where a given offset is located (for read).
1063 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1064 * it's never on a page boundary, it's safe to read/write directly
1065 * from/to this address, as long as the read/write is never bigger than a
1066 * page size.
f3bc08c5
MD
1067 */
1068void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
1069 size_t offset)
1070{
1071 size_t index;
1072 struct lib_ring_buffer_backend_pages *rpages;
1073 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1074 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1075 unsigned long sb_bindex, id;
1076
1077 offset &= chanb->buf_size - 1;
1078 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1079 id = bufb->buf_rsb.id;
1080 sb_bindex = subbuffer_id_get_index(config, id);
1081 rpages = bufb->array[sb_bindex];
1082 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1083 && subbuffer_id_is_noref(config, id));
1084 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1085}
1086EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1087
1088/**
1089 * lib_ring_buffer_offset_address - get address of a location within the buffer
1090 * @bufb : buffer backend
1091 * @offset : offset within the buffer.
1092 *
1093 * Return the address where a given offset is located.
1094 * Should be used to get the current subbuffer header pointer. Given we know
1095 * it's always at the beginning of a page, it's safe to write directly to this
1096 * address, as long as the write is never bigger than a page size.
1097 */
1098void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1099 size_t offset)
1100{
1101 size_t sbidx, index;
1102 struct lib_ring_buffer_backend_pages *rpages;
1103 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1104 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1105 unsigned long sb_bindex, id;
1106
1107 offset &= chanb->buf_size - 1;
1108 sbidx = offset >> chanb->subbuf_size_order;
1109 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1110 id = bufb->buf_wsb[sbidx].id;
1111 sb_bindex = subbuffer_id_get_index(config, id);
1112 rpages = bufb->array[sb_bindex];
1113 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1114 && subbuffer_id_is_noref(config, id));
1115 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1116}
1117EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.084513 seconds and 4 git commands to generate.