Fix: out of memory error handling
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_backend.c
3 *
886d51a3 4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 5 *
886d51a3
MD
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
19 */
20
f3bc08c5
MD
21#include <linux/stddef.h>
22#include <linux/module.h>
23#include <linux/string.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/mm.h>
df388b78 30#include <linux/vmalloc.h>
f3bc08c5 31
c075712b
MD
32#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
33#include <wrapper/ringbuffer/config.h>
34#include <wrapper/ringbuffer/backend.h>
35#include <wrapper/ringbuffer/frontend.h>
f3bc08c5
MD
36
37/**
38 * lib_ring_buffer_backend_allocate - allocate a channel buffer
39 * @config: ring buffer instance configuration
40 * @buf: the buffer struct
41 * @size: total size of the buffer
42 * @num_subbuf: number of subbuffers
43 * @extra_reader_sb: need extra subbuffer for reader
44 */
45static
46int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
47 struct lib_ring_buffer_backend *bufb,
48 size_t size, size_t num_subbuf,
49 int extra_reader_sb)
50{
51 struct channel_backend *chanb = &bufb->chan->backend;
52 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
53 unsigned long subbuf_size, mmap_offset = 0;
54 unsigned long num_subbuf_alloc;
55 struct page **pages;
f3bc08c5
MD
56 unsigned long i;
57
58 num_pages = size >> PAGE_SHIFT;
59 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
60 subbuf_size = chanb->subbuf_size;
61 num_subbuf_alloc = num_subbuf;
62
63 if (extra_reader_sb) {
64 num_pages += num_pages_per_subbuf; /* Add pages for reader */
65 num_subbuf_alloc++;
66 }
67
df388b78 68 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 69 1 << INTERNODE_CACHE_SHIFT),
df388b78 70 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
71 if (unlikely(!pages))
72 goto pages_error;
73
fd0cebd8 74 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
f3bc08c5
MD
75 * num_subbuf_alloc,
76 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
77 GFP_KERNEL | __GFP_NOWARN,
78 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
79 if (unlikely(!bufb->array))
80 goto array_error;
81
82 for (i = 0; i < num_pages; i++) {
83 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 84 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
85 if (unlikely(!pages[i]))
86 goto depopulate;
f3bc08c5
MD
87 }
88 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
89
90 /* Allocate backend pages array elements */
91 for (i = 0; i < num_subbuf_alloc; i++) {
92 bufb->array[i] =
fd0cebd8 93 lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
94 sizeof(struct lib_ring_buffer_backend_pages) +
95 sizeof(struct lib_ring_buffer_backend_page)
96 * num_pages_per_subbuf,
97 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
98 GFP_KERNEL | __GFP_NOWARN,
99 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
100 if (!bufb->array[i])
101 goto free_array;
102 }
103
104 /* Allocate write-side subbuffer table */
fd0cebd8 105 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
106 sizeof(struct lib_ring_buffer_backend_subbuffer)
107 * num_subbuf,
108 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
109 GFP_KERNEL | __GFP_NOWARN,
110 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
111 if (unlikely(!bufb->buf_wsb))
112 goto free_array;
113
114 for (i = 0; i < num_subbuf; i++)
115 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
116
117 /* Assign read-side subbuffer table */
118 if (extra_reader_sb)
119 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
120 num_subbuf_alloc - 1);
121 else
122 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
123
5b3cf4f9 124 /* Allocate subbuffer packet counter table */
fd0cebd8 125 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
5b3cf4f9
JD
126 sizeof(struct lib_ring_buffer_backend_counts)
127 * num_subbuf,
128 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
129 GFP_KERNEL | __GFP_NOWARN,
130 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
131 if (unlikely(!bufb->buf_cnt))
132 goto free_wsb;
133
f3bc08c5
MD
134 /* Assign pages to page index */
135 for (i = 0; i < num_subbuf_alloc; i++) {
136 for (j = 0; j < num_pages_per_subbuf; j++) {
137 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
138 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
139 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
140 page_idx++;
141 }
142 if (config->output == RING_BUFFER_MMAP) {
143 bufb->array[i]->mmap_offset = mmap_offset;
144 mmap_offset += subbuf_size;
145 }
146 }
147
148 /*
149 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
150 * will not fault.
151 */
6d2a620c 152 wrapper_vmalloc_sync_all();
df388b78 153 vfree(pages);
f3bc08c5
MD
154 return 0;
155
5b3cf4f9 156free_wsb:
fd0cebd8 157 lttng_kvfree(bufb->buf_wsb);
f3bc08c5
MD
158free_array:
159 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
fd0cebd8 160 lttng_kvfree(bufb->array[i]);
f3bc08c5
MD
161depopulate:
162 /* Free all allocated pages */
163 for (i = 0; (i < num_pages && pages[i]); i++)
164 __free_page(pages[i]);
fd0cebd8 165 lttng_kvfree(bufb->array);
f3bc08c5 166array_error:
df388b78 167 vfree(pages);
f3bc08c5
MD
168pages_error:
169 return -ENOMEM;
170}
171
172int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
173 struct channel_backend *chanb, int cpu)
174{
5a8fd222 175 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
176
177 bufb->chan = container_of(chanb, struct channel, backend);
178 bufb->cpu = cpu;
179
180 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
181 chanb->num_subbuf,
182 chanb->extra_reader_sb);
183}
184
185void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
186{
187 struct channel_backend *chanb = &bufb->chan->backend;
188 unsigned long i, j, num_subbuf_alloc;
189
190 num_subbuf_alloc = chanb->num_subbuf;
191 if (chanb->extra_reader_sb)
192 num_subbuf_alloc++;
193
fd0cebd8
MJ
194 lttng_kvfree(bufb->buf_wsb);
195 lttng_kvfree(bufb->buf_cnt);
f3bc08c5
MD
196 for (i = 0; i < num_subbuf_alloc; i++) {
197 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 198 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
fd0cebd8 199 lttng_kvfree(bufb->array[i]);
f3bc08c5 200 }
fd0cebd8 201 lttng_kvfree(bufb->array);
f3bc08c5
MD
202 bufb->allocated = 0;
203}
204
205void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
206{
207 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 208 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
209 unsigned long num_subbuf_alloc;
210 unsigned int i;
211
212 num_subbuf_alloc = chanb->num_subbuf;
213 if (chanb->extra_reader_sb)
214 num_subbuf_alloc++;
215
216 for (i = 0; i < chanb->num_subbuf; i++)
217 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
218 if (chanb->extra_reader_sb)
219 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
220 num_subbuf_alloc - 1);
221 else
222 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
223
224 for (i = 0; i < num_subbuf_alloc; i++) {
225 /* Don't reset mmap_offset */
226 v_set(config, &bufb->array[i]->records_commit, 0);
227 v_set(config, &bufb->array[i]->records_unread, 0);
228 bufb->array[i]->data_size = 0;
229 /* Don't reset backend page and virt addresses */
230 }
231 /* Don't reset num_pages_per_subbuf, cpu, allocated */
232 v_set(config, &bufb->records_read, 0);
233}
234
235/*
236 * The frontend is responsible for also calling ring_buffer_backend_reset for
237 * each buffer when calling channel_backend_reset.
238 */
239void channel_backend_reset(struct channel_backend *chanb)
240{
241 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 242 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
243
244 /*
245 * Don't reset buf_size, subbuf_size, subbuf_size_order,
246 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
247 * priv, notifiers, config, cpumask and name.
248 */
249 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
250}
251
1e367326
MD
252#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
253
254/*
255 * No need to implement a "dead" callback to do a buffer switch here,
256 * because it will happen when tracing is stopped, or will be done by
257 * switch timer CPU DEAD callback.
258 * We don't free buffers when CPU go away, because it would make trace
259 * data vanish, which is unwanted.
260 */
261int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
262 struct lttng_cpuhp_node *node)
263{
264 struct channel_backend *chanb = container_of(node,
265 struct channel_backend, cpuhp_prepare);
266 const struct lib_ring_buffer_config *config = &chanb->config;
267 struct lib_ring_buffer *buf;
268 int ret;
269
270 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
271
272 buf = per_cpu_ptr(chanb->buf, cpu);
273 ret = lib_ring_buffer_create(buf, chanb, cpu);
274 if (ret) {
275 printk(KERN_ERR
276 "ring_buffer_cpu_hp_callback: cpu %d "
277 "buffer creation failed\n", cpu);
278 return ret;
279 }
280 return 0;
281}
282EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
283
284#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
285
f3bc08c5 286#ifdef CONFIG_HOTPLUG_CPU
1e367326 287
f3bc08c5
MD
288/**
289 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
290 * @nb: notifier block
291 * @action: hotplug action to take
292 * @hcpu: CPU number
293 *
294 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
295 */
296static
e8f071d5 297int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
298 unsigned long action,
299 void *hcpu)
300{
301 unsigned int cpu = (unsigned long)hcpu;
302 struct channel_backend *chanb = container_of(nb, struct channel_backend,
303 cpu_hp_notifier);
5a8fd222 304 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
305 struct lib_ring_buffer *buf;
306 int ret;
307
308 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
309
310 switch (action) {
311 case CPU_UP_PREPARE:
312 case CPU_UP_PREPARE_FROZEN:
313 buf = per_cpu_ptr(chanb->buf, cpu);
314 ret = lib_ring_buffer_create(buf, chanb, cpu);
315 if (ret) {
316 printk(KERN_ERR
317 "ring_buffer_cpu_hp_callback: cpu %d "
318 "buffer creation failed\n", cpu);
319 return NOTIFY_BAD;
320 }
321 break;
322 case CPU_DEAD:
323 case CPU_DEAD_FROZEN:
324 /* No need to do a buffer switch here, because it will happen
325 * when tracing is stopped, or will be done by switch timer CPU
326 * DEAD callback. */
327 break;
328 }
329 return NOTIFY_OK;
330}
1e367326 331
f3bc08c5
MD
332#endif
333
1e367326
MD
334#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
335
f3bc08c5
MD
336/**
337 * channel_backend_init - initialize a channel backend
338 * @chanb: channel backend
339 * @name: channel name
340 * @config: client ring buffer configuration
341 * @priv: client private data
342 * @parent: dentry of parent directory, %NULL for root directory
343 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
344 * @num_subbuf: number of sub-buffers (power of 2)
345 *
346 * Returns channel pointer if successful, %NULL otherwise.
347 *
348 * Creates per-cpu channel buffers using the sizes and attributes
349 * specified. The created channel buffer files will be named
350 * name_0...name_N-1. File permissions will be %S_IRUSR.
351 *
352 * Called with CPU hotplug disabled.
353 */
354int channel_backend_init(struct channel_backend *chanb,
355 const char *name,
356 const struct lib_ring_buffer_config *config,
357 void *priv, size_t subbuf_size, size_t num_subbuf)
358{
359 struct channel *chan = container_of(chanb, struct channel, backend);
360 unsigned int i;
361 int ret;
362
363 if (!name)
364 return -EPERM;
365
f3bc08c5 366 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
367 if (subbuf_size < PAGE_SIZE)
368 return -EINVAL;
f3bc08c5
MD
369
370 /*
bbda3a00
MD
371 * Make sure the number of subbuffers and subbuffer size are
372 * power of 2 and nonzero.
f3bc08c5 373 */
bbda3a00 374 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 375 return -EINVAL;
bbda3a00 376 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 377 return -EINVAL;
5140d2b3
MD
378 /*
379 * Overwrite mode buffers require at least 2 subbuffers per
380 * buffer.
381 */
382 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
383 return -EINVAL;
f3bc08c5
MD
384
385 ret = subbuffer_id_check_index(config, num_subbuf);
386 if (ret)
387 return ret;
388
389 chanb->priv = priv;
390 chanb->buf_size = num_subbuf * subbuf_size;
391 chanb->subbuf_size = subbuf_size;
392 chanb->buf_size_order = get_count_order(chanb->buf_size);
393 chanb->subbuf_size_order = get_count_order(subbuf_size);
394 chanb->num_subbuf_order = get_count_order(num_subbuf);
395 chanb->extra_reader_sb =
396 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
397 chanb->num_subbuf = num_subbuf;
398 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 399 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
400
401 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
402 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
403 return -ENOMEM;
404 }
405
406 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
407 /* Allocating the buffer per-cpu structures */
408 chanb->buf = alloc_percpu(struct lib_ring_buffer);
409 if (!chanb->buf)
410 goto free_cpumask;
411
1e367326
MD
412#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
413 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
414 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
415 &chanb->cpuhp_prepare.node);
416 if (ret)
417 goto free_bufs;
418#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
419
420 {
421 /*
422 * In case of non-hotplug cpu, if the ring-buffer is allocated
423 * in early initcall, it will not be notified of secondary cpus.
424 * In that off case, we need to allocate for all possible cpus.
425 */
f3bc08c5 426#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
427 /*
428 * buf->backend.allocated test takes care of concurrent CPU
429 * hotplug.
430 * Priority higher than frontend, so we create the ring buffer
431 * before we start the timer.
432 */
433 chanb->cpu_hp_notifier.notifier_call =
434 lib_ring_buffer_cpu_hp_callback;
435 chanb->cpu_hp_notifier.priority = 5;
436 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
437
438 get_online_cpus();
439 for_each_online_cpu(i) {
440 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
441 chanb, i);
442 if (ret)
443 goto free_bufs; /* cpu hotplug locked */
444 }
445 put_online_cpus();
f3bc08c5 446#else
1e367326
MD
447 for_each_possible_cpu(i) {
448 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
449 chanb, i);
450 if (ret)
451 goto free_bufs;
452 }
f3bc08c5 453#endif
1e367326
MD
454 }
455#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5
MD
456 } else {
457 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
458 if (!chanb->buf)
459 goto free_cpumask;
460 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
461 if (ret)
462 goto free_bufs;
463 }
464 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
465
466 return 0;
467
468free_bufs:
469 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
1e367326 470#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
fbbb0005
MD
471 /*
472 * Teardown of lttng_rb_hp_prepare instance
473 * on "add" error is handled within cpu hotplug,
474 * no teardown to do from the caller.
475 */
1e367326
MD
476#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
477#ifdef CONFIG_HOTPLUG_CPU
478 put_online_cpus();
479 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
480#endif
481#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 482 for_each_possible_cpu(i) {
1e367326
MD
483 struct lib_ring_buffer *buf =
484 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
485
486 if (!buf->backend.allocated)
487 continue;
488 lib_ring_buffer_free(buf);
489 }
f3bc08c5
MD
490 free_percpu(chanb->buf);
491 } else
492 kfree(chanb->buf);
493free_cpumask:
494 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
495 free_cpumask_var(chanb->cpumask);
496 return -ENOMEM;
497}
498
499/**
500 * channel_backend_unregister_notifiers - unregister notifiers
501 * @chan: the channel
502 *
503 * Holds CPU hotplug.
504 */
505void channel_backend_unregister_notifiers(struct channel_backend *chanb)
506{
5a8fd222 507 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 508
1e367326
MD
509 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
510#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
511 int ret;
512
513 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
514 &chanb->cpuhp_prepare.node);
515 WARN_ON(ret);
516#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 517 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
1e367326
MD
518#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
519 }
f3bc08c5
MD
520}
521
522/**
523 * channel_backend_free - destroy the channel
524 * @chan: the channel
525 *
526 * Destroy all channel buffers and frees the channel.
527 */
528void channel_backend_free(struct channel_backend *chanb)
529{
5a8fd222 530 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
531 unsigned int i;
532
533 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
534 for_each_possible_cpu(i) {
535 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
536
537 if (!buf->backend.allocated)
538 continue;
539 lib_ring_buffer_free(buf);
540 }
541 free_cpumask_var(chanb->cpumask);
542 free_percpu(chanb->buf);
543 } else {
544 struct lib_ring_buffer *buf = chanb->buf;
545
546 CHAN_WARN_ON(chanb, !buf->backend.allocated);
547 lib_ring_buffer_free(buf);
548 kfree(buf);
549 }
550}
551
552/**
553 * lib_ring_buffer_write - write data to a ring_buffer buffer.
554 * @bufb : buffer backend
555 * @offset : offset within the buffer
556 * @src : source address
557 * @len : length to write
558 * @pagecpy : page size copied so far
559 */
560void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 561 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
562{
563 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 564 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
565 size_t sbidx, index;
566 struct lib_ring_buffer_backend_pages *rpages;
567 unsigned long sb_bindex, id;
568
569 do {
570 len -= pagecpy;
571 src += pagecpy;
572 offset += pagecpy;
573 sbidx = offset >> chanb->subbuf_size_order;
574 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
575
576 /*
577 * Underlying layer should never ask for writes across
578 * subbuffers.
579 */
580 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
581
582 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
583 id = bufb->buf_wsb[sbidx].id;
584 sb_bindex = subbuffer_id_get_index(config, id);
585 rpages = bufb->array[sb_bindex];
586 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
587 && subbuffer_id_is_noref(config, id));
588 lib_ring_buffer_do_copy(config,
589 rpages->p[index].virt
590 + (offset & ~PAGE_MASK),
591 src, pagecpy);
592 } while (unlikely(len != pagecpy));
593}
594EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
595
4ea00e4f
JD
596
597/**
598 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
599 * @bufb : buffer backend
600 * @offset : offset within the buffer
601 * @c : the byte to write
602 * @len : length to write
603 * @pagecpy : page size copied so far
604 */
605void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
606 size_t offset,
bfe529f9 607 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
608{
609 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 610 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
611 size_t sbidx, index;
612 struct lib_ring_buffer_backend_pages *rpages;
613 unsigned long sb_bindex, id;
614
615 do {
616 len -= pagecpy;
617 offset += pagecpy;
618 sbidx = offset >> chanb->subbuf_size_order;
619 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
620
621 /*
622 * Underlying layer should never ask for writes across
623 * subbuffers.
624 */
625 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
626
627 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
628 id = bufb->buf_wsb[sbidx].id;
629 sb_bindex = subbuffer_id_get_index(config, id);
630 rpages = bufb->array[sb_bindex];
631 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
632 && subbuffer_id_is_noref(config, id));
633 lib_ring_buffer_do_memset(rpages->p[index].virt
634 + (offset & ~PAGE_MASK),
635 c, pagecpy);
636 } while (unlikely(len != pagecpy));
637}
638EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
639
16f78f3a
MD
640/**
641 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
642 * @bufb : buffer backend
643 * @offset : offset within the buffer
644 * @src : source address
645 * @len : length to write
646 * @pagecpy : page size copied so far
647 * @pad : character to use for padding
648 */
649void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
650 size_t offset, const char *src, size_t len,
651 size_t pagecpy, int pad)
652{
653 struct channel_backend *chanb = &bufb->chan->backend;
654 const struct lib_ring_buffer_config *config = &chanb->config;
655 size_t sbidx, index;
656 struct lib_ring_buffer_backend_pages *rpages;
657 unsigned long sb_bindex, id;
658 int src_terminated = 0;
659
660 CHAN_WARN_ON(chanb, !len);
661 offset += pagecpy;
662 do {
663 len -= pagecpy;
664 if (!src_terminated)
665 src += pagecpy;
666 sbidx = offset >> chanb->subbuf_size_order;
667 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
668
669 /*
670 * Underlying layer should never ask for writes across
671 * subbuffers.
672 */
673 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
674
675 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
676 id = bufb->buf_wsb[sbidx].id;
677 sb_bindex = subbuffer_id_get_index(config, id);
678 rpages = bufb->array[sb_bindex];
679 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
680 && subbuffer_id_is_noref(config, id));
681
682 if (likely(!src_terminated)) {
683 size_t count, to_copy;
684
685 to_copy = pagecpy;
686 if (pagecpy == len)
687 to_copy--; /* Final '\0' */
688 count = lib_ring_buffer_do_strcpy(config,
689 rpages->p[index].virt
690 + (offset & ~PAGE_MASK),
691 src, to_copy);
692 offset += count;
693 /* Padding */
694 if (unlikely(count < to_copy)) {
695 size_t pad_len = to_copy - count;
696
697 /* Next pages will have padding */
698 src_terminated = 1;
699 lib_ring_buffer_do_memset(rpages->p[index].virt
700 + (offset & ~PAGE_MASK),
701 pad, pad_len);
702 offset += pad_len;
703 }
704 } else {
705 size_t pad_len;
706
707 pad_len = pagecpy;
708 if (pagecpy == len)
709 pad_len--; /* Final '\0' */
710 lib_ring_buffer_do_memset(rpages->p[index].virt
711 + (offset & ~PAGE_MASK),
712 pad, pad_len);
713 offset += pad_len;
714 }
715 } while (unlikely(len != pagecpy));
716 /* Ending '\0' */
717 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
718 '\0', 1);
719}
720EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
721
722/**
7b8ea3a5 723 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
724 * @bufb : buffer backend
725 * @offset : offset within the buffer
726 * @src : source address
727 * @len : length to write
728 * @pagecpy : page size copied so far
729 *
730 * This function deals with userspace pointers, it should never be called
731 * directly without having the src pointer checked with access_ok()
732 * previously.
733 */
7b8ea3a5 734void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
735 size_t offset,
736 const void __user *src, size_t len,
bfe529f9 737 size_t pagecpy)
4ea00e4f
JD
738{
739 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 740 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
741 size_t sbidx, index;
742 struct lib_ring_buffer_backend_pages *rpages;
743 unsigned long sb_bindex, id;
744 int ret;
745
746 do {
747 len -= pagecpy;
748 src += pagecpy;
749 offset += pagecpy;
750 sbidx = offset >> chanb->subbuf_size_order;
751 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
752
753 /*
754 * Underlying layer should never ask for writes across
755 * subbuffers.
756 */
757 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
758
759 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
760 id = bufb->buf_wsb[sbidx].id;
761 sb_bindex = subbuffer_id_get_index(config, id);
762 rpages = bufb->array[sb_bindex];
763 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
764 && subbuffer_id_is_noref(config, id));
7b8ea3a5 765 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
766 + (offset & ~PAGE_MASK),
767 src, pagecpy) != 0;
768 if (ret > 0) {
d87a9f03 769 /* Copy failed. */
4ea00e4f
JD
770 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
771 break; /* stop copy */
772 }
773 } while (unlikely(len != pagecpy));
774}
7b8ea3a5 775EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 776
16f78f3a
MD
777/**
778 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
779 * @bufb : buffer backend
780 * @offset : offset within the buffer
781 * @src : source address
782 * @len : length to write
783 * @pagecpy : page size copied so far
784 * @pad : character to use for padding
785 *
786 * This function deals with userspace pointers, it should never be called
787 * directly without having the src pointer checked with access_ok()
788 * previously.
789 */
790void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
791 size_t offset, const char __user *src, size_t len,
792 size_t pagecpy, int pad)
793{
794 struct channel_backend *chanb = &bufb->chan->backend;
795 const struct lib_ring_buffer_config *config = &chanb->config;
796 size_t sbidx, index;
797 struct lib_ring_buffer_backend_pages *rpages;
798 unsigned long sb_bindex, id;
799 int src_terminated = 0;
800
801 offset += pagecpy;
802 do {
803 len -= pagecpy;
804 if (!src_terminated)
805 src += pagecpy;
806 sbidx = offset >> chanb->subbuf_size_order;
807 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
808
809 /*
810 * Underlying layer should never ask for writes across
811 * subbuffers.
812 */
813 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
814
815 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
816 id = bufb->buf_wsb[sbidx].id;
817 sb_bindex = subbuffer_id_get_index(config, id);
818 rpages = bufb->array[sb_bindex];
819 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
820 && subbuffer_id_is_noref(config, id));
821
822 if (likely(!src_terminated)) {
823 size_t count, to_copy;
824
825 to_copy = pagecpy;
826 if (pagecpy == len)
827 to_copy--; /* Final '\0' */
828 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
829 rpages->p[index].virt
830 + (offset & ~PAGE_MASK),
831 src, to_copy);
832 offset += count;
833 /* Padding */
834 if (unlikely(count < to_copy)) {
835 size_t pad_len = to_copy - count;
836
837 /* Next pages will have padding */
838 src_terminated = 1;
839 lib_ring_buffer_do_memset(rpages->p[index].virt
840 + (offset & ~PAGE_MASK),
841 pad, pad_len);
842 offset += pad_len;
843 }
844 } else {
845 size_t pad_len;
846
847 pad_len = pagecpy;
848 if (pagecpy == len)
849 pad_len--; /* Final '\0' */
850 lib_ring_buffer_do_memset(rpages->p[index].virt
851 + (offset & ~PAGE_MASK),
852 pad, pad_len);
853 offset += pad_len;
854 }
855 } while (unlikely(len != pagecpy));
856 /* Ending '\0' */
857 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
858 '\0', 1);
859}
860EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
861
f3bc08c5
MD
862/**
863 * lib_ring_buffer_read - read data from ring_buffer_buffer.
864 * @bufb : buffer backend
865 * @offset : offset within the buffer
866 * @dest : destination address
867 * @len : length to copy to destination
868 *
869 * Should be protected by get_subbuf/put_subbuf.
870 * Returns the length copied.
871 */
872size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
873 void *dest, size_t len)
874{
875 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 876 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 877 size_t index, pagecpy, orig_len;
f3bc08c5
MD
878 struct lib_ring_buffer_backend_pages *rpages;
879 unsigned long sb_bindex, id;
880
881 orig_len = len;
882 offset &= chanb->buf_size - 1;
883 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
884 if (unlikely(!len))
885 return 0;
886 for (;;) {
887 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
888 id = bufb->buf_rsb.id;
889 sb_bindex = subbuffer_id_get_index(config, id);
890 rpages = bufb->array[sb_bindex];
891 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
892 && subbuffer_id_is_noref(config, id));
893 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
894 pagecpy);
895 len -= pagecpy;
896 if (likely(!len))
897 break;
898 dest += pagecpy;
899 offset += pagecpy;
900 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
901 /*
902 * Underlying layer should never ask for reads across
903 * subbuffers.
904 */
905 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
906 }
907 return orig_len;
908}
909EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
910
911/**
912 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
913 * @bufb : buffer backend
914 * @offset : offset within the buffer
915 * @dest : destination userspace address
916 * @len : length to copy to destination
917 *
918 * Should be protected by get_subbuf/put_subbuf.
919 * access_ok() must have been performed on dest addresses prior to call this
920 * function.
921 * Returns -EFAULT on error, 0 if ok.
922 */
923int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
924 size_t offset, void __user *dest, size_t len)
925{
926 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 927 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 928 size_t index;
88dfd899 929 ssize_t pagecpy;
f3bc08c5
MD
930 struct lib_ring_buffer_backend_pages *rpages;
931 unsigned long sb_bindex, id;
932
f3bc08c5
MD
933 offset &= chanb->buf_size - 1;
934 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
935 if (unlikely(!len))
936 return 0;
937 for (;;) {
938 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
939 id = bufb->buf_rsb.id;
940 sb_bindex = subbuffer_id_get_index(config, id);
941 rpages = bufb->array[sb_bindex];
942 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
943 && subbuffer_id_is_noref(config, id));
944 if (__copy_to_user(dest,
945 rpages->p[index].virt + (offset & ~PAGE_MASK),
946 pagecpy))
947 return -EFAULT;
948 len -= pagecpy;
949 if (likely(!len))
950 break;
951 dest += pagecpy;
952 offset += pagecpy;
953 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
954 /*
955 * Underlying layer should never ask for reads across
956 * subbuffers.
957 */
958 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
959 }
960 return 0;
961}
962EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
963
964/**
965 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
966 * @bufb : buffer backend
967 * @offset : offset within the buffer
968 * @dest : destination address
969 * @len : destination's length
970 *
61eb4c39 971 * Return string's length, or -EINVAL on error.
f3bc08c5 972 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 973 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
974 */
975int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
976 void *dest, size_t len)
977{
978 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 979 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
980 size_t index;
981 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
982 char *str;
983 struct lib_ring_buffer_backend_pages *rpages;
984 unsigned long sb_bindex, id;
985
986 offset &= chanb->buf_size - 1;
987 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
988 orig_offset = offset;
61eb4c39
MD
989 if (unlikely(!len))
990 return -EINVAL;
f3bc08c5
MD
991 for (;;) {
992 id = bufb->buf_rsb.id;
993 sb_bindex = subbuffer_id_get_index(config, id);
994 rpages = bufb->array[sb_bindex];
995 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
996 && subbuffer_id_is_noref(config, id));
997 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
998 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
999 strpagelen = strnlen(str, pagelen);
1000 if (len) {
1001 pagecpy = min_t(size_t, len, strpagelen);
1002 if (dest) {
1003 memcpy(dest, str, pagecpy);
1004 dest += pagecpy;
1005 }
1006 len -= pagecpy;
1007 }
1008 offset += strpagelen;
1009 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1010 if (strpagelen < pagelen)
1011 break;
1012 /*
1013 * Underlying layer should never ask for reads across
1014 * subbuffers.
1015 */
1016 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1017 }
1018 if (dest && len)
1019 ((char *)dest)[0] = 0;
1020 return offset - orig_offset;
1021}
1022EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1023
1024/**
0112cb7b 1025 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1026 * @bufb : buffer backend
1027 * @offset : offset within the buffer
1028 * @virt : pointer to page address (output)
1029 *
1030 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1031 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1032 */
0112cb7b 1033unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
f3bc08c5
MD
1034 size_t offset, void ***virt)
1035{
1036 size_t index;
1037 struct lib_ring_buffer_backend_pages *rpages;
1038 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1039 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1040 unsigned long sb_bindex, id;
1041
1042 offset &= chanb->buf_size - 1;
1043 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1044 id = bufb->buf_rsb.id;
1045 sb_bindex = subbuffer_id_get_index(config, id);
1046 rpages = bufb->array[sb_bindex];
1047 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1048 && subbuffer_id_is_noref(config, id));
1049 *virt = &rpages->p[index].virt;
0112cb7b 1050 return &rpages->p[index].pfn;
f3bc08c5 1051}
0112cb7b 1052EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1053
1054/**
1055 * lib_ring_buffer_read_offset_address - get address of a buffer location
1056 * @bufb : buffer backend
1057 * @offset : offset within the buffer.
1058 *
1059 * Return the address where a given offset is located (for read).
1060 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1061 * it's never on a page boundary, it's safe to read/write directly
1062 * from/to this address, as long as the read/write is never bigger than a
1063 * page size.
f3bc08c5
MD
1064 */
1065void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
1066 size_t offset)
1067{
1068 size_t index;
1069 struct lib_ring_buffer_backend_pages *rpages;
1070 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1071 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1072 unsigned long sb_bindex, id;
1073
1074 offset &= chanb->buf_size - 1;
1075 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1076 id = bufb->buf_rsb.id;
1077 sb_bindex = subbuffer_id_get_index(config, id);
1078 rpages = bufb->array[sb_bindex];
1079 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1080 && subbuffer_id_is_noref(config, id));
1081 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1082}
1083EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1084
1085/**
1086 * lib_ring_buffer_offset_address - get address of a location within the buffer
1087 * @bufb : buffer backend
1088 * @offset : offset within the buffer.
1089 *
1090 * Return the address where a given offset is located.
1091 * Should be used to get the current subbuffer header pointer. Given we know
1092 * it's always at the beginning of a page, it's safe to write directly to this
1093 * address, as long as the write is never bigger than a page size.
1094 */
1095void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1096 size_t offset)
1097{
1098 size_t sbidx, index;
1099 struct lib_ring_buffer_backend_pages *rpages;
1100 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1101 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1102 unsigned long sb_bindex, id;
1103
1104 offset &= chanb->buf_size - 1;
1105 sbidx = offset >> chanb->subbuf_size_order;
1106 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1107 id = bufb->buf_wsb[sbidx].id;
1108 sb_bindex = subbuffer_id_get_index(config, id);
1109 rpages = bufb->array[sb_bindex];
1110 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1111 && subbuffer_id_is_noref(config, id));
1112 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1113}
1114EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.082843 seconds and 4 git commands to generate.