Fix: implicit declarations caused by buffer size checks.
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_backend.c
3 *
886d51a3 4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 5 *
886d51a3
MD
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
19 */
20
f3bc08c5
MD
21#include <linux/stddef.h>
22#include <linux/module.h>
23#include <linux/string.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
28#include <linux/cpu.h>
29#include <linux/mm.h>
df388b78 30#include <linux/vmalloc.h>
f3bc08c5 31
7ee26d2e 32#include <wrapper/mm.h>
c075712b
MD
33#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
34#include <wrapper/ringbuffer/config.h>
35#include <wrapper/ringbuffer/backend.h>
36#include <wrapper/ringbuffer/frontend.h>
f3bc08c5
MD
37
38/**
39 * lib_ring_buffer_backend_allocate - allocate a channel buffer
40 * @config: ring buffer instance configuration
41 * @buf: the buffer struct
42 * @size: total size of the buffer
43 * @num_subbuf: number of subbuffers
44 * @extra_reader_sb: need extra subbuffer for reader
45 */
46static
47int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
48 struct lib_ring_buffer_backend *bufb,
49 size_t size, size_t num_subbuf,
50 int extra_reader_sb)
51{
52 struct channel_backend *chanb = &bufb->chan->backend;
53 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
54 unsigned long subbuf_size, mmap_offset = 0;
55 unsigned long num_subbuf_alloc;
56 struct page **pages;
f3bc08c5
MD
57 unsigned long i;
58
59 num_pages = size >> PAGE_SHIFT;
1790d63f
FD
60
61 /*
7ee26d2e
FD
62 * Verify that there is enough free pages available on the system for
63 * the current allocation request.
64 * wrapper_check_enough_free_pages uses si_mem_available() if available
65 * and returns if there should be enough free pages based on the
66 * current estimate.
1790d63f 67 */
7ee26d2e 68 if (!wrapper_check_enough_free_pages(num_pages))
1790d63f
FD
69 goto not_enough_pages;
70
71 /*
72 * Set the current user thread as the first target of the OOM killer.
73 * If the estimate received by si_mem_available() was off, and we do
74 * end up running out of memory because of this buffer allocation, we
75 * want to kill the offending app first.
76 */
7ee26d2e 77 wrapper_set_current_oom_origin();
1790d63f 78
f3bc08c5
MD
79 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
80 subbuf_size = chanb->subbuf_size;
81 num_subbuf_alloc = num_subbuf;
82
83 if (extra_reader_sb) {
84 num_pages += num_pages_per_subbuf; /* Add pages for reader */
85 num_subbuf_alloc++;
86 }
87
df388b78 88 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 89 1 << INTERNODE_CACHE_SHIFT),
df388b78 90 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
91 if (unlikely(!pages))
92 goto pages_error;
93
fd0cebd8 94 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
f3bc08c5
MD
95 * num_subbuf_alloc,
96 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
97 GFP_KERNEL | __GFP_NOWARN,
98 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
99 if (unlikely(!bufb->array))
100 goto array_error;
101
102 for (i = 0; i < num_pages; i++) {
103 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 104 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
105 if (unlikely(!pages[i]))
106 goto depopulate;
f3bc08c5
MD
107 }
108 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
109
110 /* Allocate backend pages array elements */
111 for (i = 0; i < num_subbuf_alloc; i++) {
112 bufb->array[i] =
fd0cebd8 113 lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
114 sizeof(struct lib_ring_buffer_backend_pages) +
115 sizeof(struct lib_ring_buffer_backend_page)
116 * num_pages_per_subbuf,
117 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
118 GFP_KERNEL | __GFP_NOWARN,
119 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
120 if (!bufb->array[i])
121 goto free_array;
122 }
123
124 /* Allocate write-side subbuffer table */
fd0cebd8 125 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
126 sizeof(struct lib_ring_buffer_backend_subbuffer)
127 * num_subbuf,
128 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
129 GFP_KERNEL | __GFP_NOWARN,
130 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
131 if (unlikely(!bufb->buf_wsb))
132 goto free_array;
133
134 for (i = 0; i < num_subbuf; i++)
135 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
136
137 /* Assign read-side subbuffer table */
138 if (extra_reader_sb)
139 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
140 num_subbuf_alloc - 1);
141 else
142 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
143
5b3cf4f9 144 /* Allocate subbuffer packet counter table */
fd0cebd8 145 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
5b3cf4f9
JD
146 sizeof(struct lib_ring_buffer_backend_counts)
147 * num_subbuf,
148 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
149 GFP_KERNEL | __GFP_NOWARN,
150 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
151 if (unlikely(!bufb->buf_cnt))
152 goto free_wsb;
153
f3bc08c5
MD
154 /* Assign pages to page index */
155 for (i = 0; i < num_subbuf_alloc; i++) {
156 for (j = 0; j < num_pages_per_subbuf; j++) {
157 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
158 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
159 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
160 page_idx++;
161 }
162 if (config->output == RING_BUFFER_MMAP) {
163 bufb->array[i]->mmap_offset = mmap_offset;
164 mmap_offset += subbuf_size;
165 }
166 }
167
168 /*
169 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
170 * will not fault.
171 */
6d2a620c 172 wrapper_vmalloc_sync_all();
7ee26d2e 173 wrapper_clear_current_oom_origin();
df388b78 174 vfree(pages);
f3bc08c5
MD
175 return 0;
176
5b3cf4f9 177free_wsb:
fd0cebd8 178 lttng_kvfree(bufb->buf_wsb);
f3bc08c5
MD
179free_array:
180 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
fd0cebd8 181 lttng_kvfree(bufb->array[i]);
f3bc08c5
MD
182depopulate:
183 /* Free all allocated pages */
184 for (i = 0; (i < num_pages && pages[i]); i++)
185 __free_page(pages[i]);
fd0cebd8 186 lttng_kvfree(bufb->array);
f3bc08c5 187array_error:
df388b78 188 vfree(pages);
f3bc08c5 189pages_error:
7ee26d2e 190 wrapper_clear_current_oom_origin();
1790d63f 191not_enough_pages:
f3bc08c5
MD
192 return -ENOMEM;
193}
194
195int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
196 struct channel_backend *chanb, int cpu)
197{
5a8fd222 198 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
199
200 bufb->chan = container_of(chanb, struct channel, backend);
201 bufb->cpu = cpu;
202
203 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
204 chanb->num_subbuf,
205 chanb->extra_reader_sb);
206}
207
208void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
209{
210 struct channel_backend *chanb = &bufb->chan->backend;
211 unsigned long i, j, num_subbuf_alloc;
212
213 num_subbuf_alloc = chanb->num_subbuf;
214 if (chanb->extra_reader_sb)
215 num_subbuf_alloc++;
216
fd0cebd8
MJ
217 lttng_kvfree(bufb->buf_wsb);
218 lttng_kvfree(bufb->buf_cnt);
f3bc08c5
MD
219 for (i = 0; i < num_subbuf_alloc; i++) {
220 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 221 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
fd0cebd8 222 lttng_kvfree(bufb->array[i]);
f3bc08c5 223 }
fd0cebd8 224 lttng_kvfree(bufb->array);
f3bc08c5
MD
225 bufb->allocated = 0;
226}
227
228void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
229{
230 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 231 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
232 unsigned long num_subbuf_alloc;
233 unsigned int i;
234
235 num_subbuf_alloc = chanb->num_subbuf;
236 if (chanb->extra_reader_sb)
237 num_subbuf_alloc++;
238
239 for (i = 0; i < chanb->num_subbuf; i++)
240 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
241 if (chanb->extra_reader_sb)
242 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
243 num_subbuf_alloc - 1);
244 else
245 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
246
247 for (i = 0; i < num_subbuf_alloc; i++) {
248 /* Don't reset mmap_offset */
249 v_set(config, &bufb->array[i]->records_commit, 0);
250 v_set(config, &bufb->array[i]->records_unread, 0);
251 bufb->array[i]->data_size = 0;
252 /* Don't reset backend page and virt addresses */
253 }
254 /* Don't reset num_pages_per_subbuf, cpu, allocated */
255 v_set(config, &bufb->records_read, 0);
256}
257
258/*
259 * The frontend is responsible for also calling ring_buffer_backend_reset for
260 * each buffer when calling channel_backend_reset.
261 */
262void channel_backend_reset(struct channel_backend *chanb)
263{
264 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 265 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
266
267 /*
268 * Don't reset buf_size, subbuf_size, subbuf_size_order,
269 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
270 * priv, notifiers, config, cpumask and name.
271 */
272 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
273}
274
1e367326
MD
275#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
276
277/*
278 * No need to implement a "dead" callback to do a buffer switch here,
279 * because it will happen when tracing is stopped, or will be done by
280 * switch timer CPU DEAD callback.
281 * We don't free buffers when CPU go away, because it would make trace
282 * data vanish, which is unwanted.
283 */
284int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
285 struct lttng_cpuhp_node *node)
286{
287 struct channel_backend *chanb = container_of(node,
288 struct channel_backend, cpuhp_prepare);
289 const struct lib_ring_buffer_config *config = &chanb->config;
290 struct lib_ring_buffer *buf;
291 int ret;
292
293 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
294
295 buf = per_cpu_ptr(chanb->buf, cpu);
296 ret = lib_ring_buffer_create(buf, chanb, cpu);
297 if (ret) {
298 printk(KERN_ERR
299 "ring_buffer_cpu_hp_callback: cpu %d "
300 "buffer creation failed\n", cpu);
301 return ret;
302 }
303 return 0;
304}
305EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
306
307#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
308
f3bc08c5 309#ifdef CONFIG_HOTPLUG_CPU
1e367326 310
f3bc08c5
MD
311/**
312 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
313 * @nb: notifier block
314 * @action: hotplug action to take
315 * @hcpu: CPU number
316 *
317 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
318 */
319static
e8f071d5 320int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
321 unsigned long action,
322 void *hcpu)
323{
324 unsigned int cpu = (unsigned long)hcpu;
325 struct channel_backend *chanb = container_of(nb, struct channel_backend,
326 cpu_hp_notifier);
5a8fd222 327 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
328 struct lib_ring_buffer *buf;
329 int ret;
330
331 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
332
333 switch (action) {
334 case CPU_UP_PREPARE:
335 case CPU_UP_PREPARE_FROZEN:
336 buf = per_cpu_ptr(chanb->buf, cpu);
337 ret = lib_ring_buffer_create(buf, chanb, cpu);
338 if (ret) {
339 printk(KERN_ERR
340 "ring_buffer_cpu_hp_callback: cpu %d "
341 "buffer creation failed\n", cpu);
342 return NOTIFY_BAD;
343 }
344 break;
345 case CPU_DEAD:
346 case CPU_DEAD_FROZEN:
347 /* No need to do a buffer switch here, because it will happen
348 * when tracing is stopped, or will be done by switch timer CPU
349 * DEAD callback. */
350 break;
351 }
352 return NOTIFY_OK;
353}
1e367326 354
f3bc08c5
MD
355#endif
356
1e367326
MD
357#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
358
f3bc08c5
MD
359/**
360 * channel_backend_init - initialize a channel backend
361 * @chanb: channel backend
362 * @name: channel name
363 * @config: client ring buffer configuration
364 * @priv: client private data
365 * @parent: dentry of parent directory, %NULL for root directory
366 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
367 * @num_subbuf: number of sub-buffers (power of 2)
368 *
369 * Returns channel pointer if successful, %NULL otherwise.
370 *
371 * Creates per-cpu channel buffers using the sizes and attributes
372 * specified. The created channel buffer files will be named
373 * name_0...name_N-1. File permissions will be %S_IRUSR.
374 *
375 * Called with CPU hotplug disabled.
376 */
377int channel_backend_init(struct channel_backend *chanb,
378 const char *name,
379 const struct lib_ring_buffer_config *config,
380 void *priv, size_t subbuf_size, size_t num_subbuf)
381{
382 struct channel *chan = container_of(chanb, struct channel, backend);
383 unsigned int i;
384 int ret;
385
386 if (!name)
387 return -EPERM;
388
f3bc08c5 389 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
390 if (subbuf_size < PAGE_SIZE)
391 return -EINVAL;
f3bc08c5
MD
392
393 /*
bbda3a00
MD
394 * Make sure the number of subbuffers and subbuffer size are
395 * power of 2 and nonzero.
f3bc08c5 396 */
bbda3a00 397 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 398 return -EINVAL;
bbda3a00 399 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 400 return -EINVAL;
5140d2b3
MD
401 /*
402 * Overwrite mode buffers require at least 2 subbuffers per
403 * buffer.
404 */
405 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
406 return -EINVAL;
f3bc08c5
MD
407
408 ret = subbuffer_id_check_index(config, num_subbuf);
409 if (ret)
410 return ret;
411
412 chanb->priv = priv;
413 chanb->buf_size = num_subbuf * subbuf_size;
414 chanb->subbuf_size = subbuf_size;
415 chanb->buf_size_order = get_count_order(chanb->buf_size);
416 chanb->subbuf_size_order = get_count_order(subbuf_size);
417 chanb->num_subbuf_order = get_count_order(num_subbuf);
418 chanb->extra_reader_sb =
419 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
420 chanb->num_subbuf = num_subbuf;
421 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 422 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
423
424 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
425 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
426 return -ENOMEM;
427 }
428
429 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
430 /* Allocating the buffer per-cpu structures */
431 chanb->buf = alloc_percpu(struct lib_ring_buffer);
432 if (!chanb->buf)
433 goto free_cpumask;
434
1e367326
MD
435#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
436 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
437 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
438 &chanb->cpuhp_prepare.node);
439 if (ret)
440 goto free_bufs;
441#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
442
443 {
444 /*
445 * In case of non-hotplug cpu, if the ring-buffer is allocated
446 * in early initcall, it will not be notified of secondary cpus.
447 * In that off case, we need to allocate for all possible cpus.
448 */
f3bc08c5 449#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
450 /*
451 * buf->backend.allocated test takes care of concurrent CPU
452 * hotplug.
453 * Priority higher than frontend, so we create the ring buffer
454 * before we start the timer.
455 */
456 chanb->cpu_hp_notifier.notifier_call =
457 lib_ring_buffer_cpu_hp_callback;
458 chanb->cpu_hp_notifier.priority = 5;
459 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
460
461 get_online_cpus();
462 for_each_online_cpu(i) {
463 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
464 chanb, i);
465 if (ret)
466 goto free_bufs; /* cpu hotplug locked */
467 }
468 put_online_cpus();
f3bc08c5 469#else
1e367326
MD
470 for_each_possible_cpu(i) {
471 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
472 chanb, i);
473 if (ret)
474 goto free_bufs;
475 }
f3bc08c5 476#endif
1e367326
MD
477 }
478#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5
MD
479 } else {
480 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
481 if (!chanb->buf)
482 goto free_cpumask;
483 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
484 if (ret)
485 goto free_bufs;
486 }
487 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
488
489 return 0;
490
491free_bufs:
492 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
1e367326 493#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
fbbb0005
MD
494 /*
495 * Teardown of lttng_rb_hp_prepare instance
496 * on "add" error is handled within cpu hotplug,
497 * no teardown to do from the caller.
498 */
1e367326
MD
499#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
500#ifdef CONFIG_HOTPLUG_CPU
501 put_online_cpus();
502 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
503#endif
504#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 505 for_each_possible_cpu(i) {
1e367326
MD
506 struct lib_ring_buffer *buf =
507 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
508
509 if (!buf->backend.allocated)
510 continue;
511 lib_ring_buffer_free(buf);
512 }
f3bc08c5
MD
513 free_percpu(chanb->buf);
514 } else
515 kfree(chanb->buf);
516free_cpumask:
517 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
518 free_cpumask_var(chanb->cpumask);
519 return -ENOMEM;
520}
521
522/**
523 * channel_backend_unregister_notifiers - unregister notifiers
524 * @chan: the channel
525 *
526 * Holds CPU hotplug.
527 */
528void channel_backend_unregister_notifiers(struct channel_backend *chanb)
529{
5a8fd222 530 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 531
1e367326
MD
532 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
533#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
534 int ret;
535
536 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
537 &chanb->cpuhp_prepare.node);
538 WARN_ON(ret);
539#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 540 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
1e367326
MD
541#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
542 }
f3bc08c5
MD
543}
544
545/**
546 * channel_backend_free - destroy the channel
547 * @chan: the channel
548 *
549 * Destroy all channel buffers and frees the channel.
550 */
551void channel_backend_free(struct channel_backend *chanb)
552{
5a8fd222 553 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
554 unsigned int i;
555
556 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
557 for_each_possible_cpu(i) {
558 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
559
560 if (!buf->backend.allocated)
561 continue;
562 lib_ring_buffer_free(buf);
563 }
564 free_cpumask_var(chanb->cpumask);
565 free_percpu(chanb->buf);
566 } else {
567 struct lib_ring_buffer *buf = chanb->buf;
568
569 CHAN_WARN_ON(chanb, !buf->backend.allocated);
570 lib_ring_buffer_free(buf);
571 kfree(buf);
572 }
573}
574
575/**
576 * lib_ring_buffer_write - write data to a ring_buffer buffer.
577 * @bufb : buffer backend
578 * @offset : offset within the buffer
579 * @src : source address
580 * @len : length to write
581 * @pagecpy : page size copied so far
582 */
583void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 584 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
585{
586 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 587 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
588 size_t sbidx, index;
589 struct lib_ring_buffer_backend_pages *rpages;
590 unsigned long sb_bindex, id;
591
592 do {
593 len -= pagecpy;
594 src += pagecpy;
595 offset += pagecpy;
596 sbidx = offset >> chanb->subbuf_size_order;
597 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
598
599 /*
600 * Underlying layer should never ask for writes across
601 * subbuffers.
602 */
603 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
604
605 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
606 id = bufb->buf_wsb[sbidx].id;
607 sb_bindex = subbuffer_id_get_index(config, id);
608 rpages = bufb->array[sb_bindex];
609 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
610 && subbuffer_id_is_noref(config, id));
611 lib_ring_buffer_do_copy(config,
612 rpages->p[index].virt
613 + (offset & ~PAGE_MASK),
614 src, pagecpy);
615 } while (unlikely(len != pagecpy));
616}
617EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
618
4ea00e4f
JD
619
620/**
621 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
622 * @bufb : buffer backend
623 * @offset : offset within the buffer
624 * @c : the byte to write
625 * @len : length to write
626 * @pagecpy : page size copied so far
627 */
628void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
629 size_t offset,
bfe529f9 630 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
631{
632 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 633 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
634 size_t sbidx, index;
635 struct lib_ring_buffer_backend_pages *rpages;
636 unsigned long sb_bindex, id;
637
638 do {
639 len -= pagecpy;
640 offset += pagecpy;
641 sbidx = offset >> chanb->subbuf_size_order;
642 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
643
644 /*
645 * Underlying layer should never ask for writes across
646 * subbuffers.
647 */
648 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
649
650 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
651 id = bufb->buf_wsb[sbidx].id;
652 sb_bindex = subbuffer_id_get_index(config, id);
653 rpages = bufb->array[sb_bindex];
654 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
655 && subbuffer_id_is_noref(config, id));
656 lib_ring_buffer_do_memset(rpages->p[index].virt
657 + (offset & ~PAGE_MASK),
658 c, pagecpy);
659 } while (unlikely(len != pagecpy));
660}
661EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
662
16f78f3a
MD
663/**
664 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
665 * @bufb : buffer backend
666 * @offset : offset within the buffer
667 * @src : source address
668 * @len : length to write
669 * @pagecpy : page size copied so far
670 * @pad : character to use for padding
671 */
672void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
673 size_t offset, const char *src, size_t len,
674 size_t pagecpy, int pad)
675{
676 struct channel_backend *chanb = &bufb->chan->backend;
677 const struct lib_ring_buffer_config *config = &chanb->config;
678 size_t sbidx, index;
679 struct lib_ring_buffer_backend_pages *rpages;
680 unsigned long sb_bindex, id;
681 int src_terminated = 0;
682
683 CHAN_WARN_ON(chanb, !len);
684 offset += pagecpy;
685 do {
686 len -= pagecpy;
687 if (!src_terminated)
688 src += pagecpy;
689 sbidx = offset >> chanb->subbuf_size_order;
690 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
691
692 /*
693 * Underlying layer should never ask for writes across
694 * subbuffers.
695 */
696 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
697
698 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
699 id = bufb->buf_wsb[sbidx].id;
700 sb_bindex = subbuffer_id_get_index(config, id);
701 rpages = bufb->array[sb_bindex];
702 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
703 && subbuffer_id_is_noref(config, id));
704
705 if (likely(!src_terminated)) {
706 size_t count, to_copy;
707
708 to_copy = pagecpy;
709 if (pagecpy == len)
710 to_copy--; /* Final '\0' */
711 count = lib_ring_buffer_do_strcpy(config,
712 rpages->p[index].virt
713 + (offset & ~PAGE_MASK),
714 src, to_copy);
715 offset += count;
716 /* Padding */
717 if (unlikely(count < to_copy)) {
718 size_t pad_len = to_copy - count;
719
720 /* Next pages will have padding */
721 src_terminated = 1;
722 lib_ring_buffer_do_memset(rpages->p[index].virt
723 + (offset & ~PAGE_MASK),
724 pad, pad_len);
725 offset += pad_len;
726 }
727 } else {
728 size_t pad_len;
729
730 pad_len = pagecpy;
731 if (pagecpy == len)
732 pad_len--; /* Final '\0' */
733 lib_ring_buffer_do_memset(rpages->p[index].virt
734 + (offset & ~PAGE_MASK),
735 pad, pad_len);
736 offset += pad_len;
737 }
738 } while (unlikely(len != pagecpy));
739 /* Ending '\0' */
740 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
741 '\0', 1);
742}
743EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
744
745/**
7b8ea3a5 746 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
747 * @bufb : buffer backend
748 * @offset : offset within the buffer
749 * @src : source address
750 * @len : length to write
751 * @pagecpy : page size copied so far
752 *
753 * This function deals with userspace pointers, it should never be called
754 * directly without having the src pointer checked with access_ok()
755 * previously.
756 */
7b8ea3a5 757void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
758 size_t offset,
759 const void __user *src, size_t len,
bfe529f9 760 size_t pagecpy)
4ea00e4f
JD
761{
762 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 763 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
764 size_t sbidx, index;
765 struct lib_ring_buffer_backend_pages *rpages;
766 unsigned long sb_bindex, id;
767 int ret;
768
769 do {
770 len -= pagecpy;
771 src += pagecpy;
772 offset += pagecpy;
773 sbidx = offset >> chanb->subbuf_size_order;
774 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
775
776 /*
777 * Underlying layer should never ask for writes across
778 * subbuffers.
779 */
780 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
781
782 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
783 id = bufb->buf_wsb[sbidx].id;
784 sb_bindex = subbuffer_id_get_index(config, id);
785 rpages = bufb->array[sb_bindex];
786 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
787 && subbuffer_id_is_noref(config, id));
7b8ea3a5 788 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
789 + (offset & ~PAGE_MASK),
790 src, pagecpy) != 0;
791 if (ret > 0) {
d87a9f03 792 /* Copy failed. */
4ea00e4f
JD
793 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
794 break; /* stop copy */
795 }
796 } while (unlikely(len != pagecpy));
797}
7b8ea3a5 798EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 799
16f78f3a
MD
800/**
801 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
802 * @bufb : buffer backend
803 * @offset : offset within the buffer
804 * @src : source address
805 * @len : length to write
806 * @pagecpy : page size copied so far
807 * @pad : character to use for padding
808 *
809 * This function deals with userspace pointers, it should never be called
810 * directly without having the src pointer checked with access_ok()
811 * previously.
812 */
813void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
814 size_t offset, const char __user *src, size_t len,
815 size_t pagecpy, int pad)
816{
817 struct channel_backend *chanb = &bufb->chan->backend;
818 const struct lib_ring_buffer_config *config = &chanb->config;
819 size_t sbidx, index;
820 struct lib_ring_buffer_backend_pages *rpages;
821 unsigned long sb_bindex, id;
822 int src_terminated = 0;
823
824 offset += pagecpy;
825 do {
826 len -= pagecpy;
827 if (!src_terminated)
828 src += pagecpy;
829 sbidx = offset >> chanb->subbuf_size_order;
830 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
831
832 /*
833 * Underlying layer should never ask for writes across
834 * subbuffers.
835 */
836 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
837
838 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
839 id = bufb->buf_wsb[sbidx].id;
840 sb_bindex = subbuffer_id_get_index(config, id);
841 rpages = bufb->array[sb_bindex];
842 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
843 && subbuffer_id_is_noref(config, id));
844
845 if (likely(!src_terminated)) {
846 size_t count, to_copy;
847
848 to_copy = pagecpy;
849 if (pagecpy == len)
850 to_copy--; /* Final '\0' */
851 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
852 rpages->p[index].virt
853 + (offset & ~PAGE_MASK),
854 src, to_copy);
855 offset += count;
856 /* Padding */
857 if (unlikely(count < to_copy)) {
858 size_t pad_len = to_copy - count;
859
860 /* Next pages will have padding */
861 src_terminated = 1;
862 lib_ring_buffer_do_memset(rpages->p[index].virt
863 + (offset & ~PAGE_MASK),
864 pad, pad_len);
865 offset += pad_len;
866 }
867 } else {
868 size_t pad_len;
869
870 pad_len = pagecpy;
871 if (pagecpy == len)
872 pad_len--; /* Final '\0' */
873 lib_ring_buffer_do_memset(rpages->p[index].virt
874 + (offset & ~PAGE_MASK),
875 pad, pad_len);
876 offset += pad_len;
877 }
878 } while (unlikely(len != pagecpy));
879 /* Ending '\0' */
880 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
881 '\0', 1);
882}
883EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
884
f3bc08c5
MD
885/**
886 * lib_ring_buffer_read - read data from ring_buffer_buffer.
887 * @bufb : buffer backend
888 * @offset : offset within the buffer
889 * @dest : destination address
890 * @len : length to copy to destination
891 *
892 * Should be protected by get_subbuf/put_subbuf.
893 * Returns the length copied.
894 */
895size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
896 void *dest, size_t len)
897{
898 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 899 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 900 size_t index, pagecpy, orig_len;
f3bc08c5
MD
901 struct lib_ring_buffer_backend_pages *rpages;
902 unsigned long sb_bindex, id;
903
904 orig_len = len;
905 offset &= chanb->buf_size - 1;
906 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
907 if (unlikely(!len))
908 return 0;
909 for (;;) {
910 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
911 id = bufb->buf_rsb.id;
912 sb_bindex = subbuffer_id_get_index(config, id);
913 rpages = bufb->array[sb_bindex];
914 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
915 && subbuffer_id_is_noref(config, id));
916 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
917 pagecpy);
918 len -= pagecpy;
919 if (likely(!len))
920 break;
921 dest += pagecpy;
922 offset += pagecpy;
923 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
924 /*
925 * Underlying layer should never ask for reads across
926 * subbuffers.
927 */
928 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
929 }
930 return orig_len;
931}
932EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
933
934/**
935 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
936 * @bufb : buffer backend
937 * @offset : offset within the buffer
938 * @dest : destination userspace address
939 * @len : length to copy to destination
940 *
941 * Should be protected by get_subbuf/put_subbuf.
942 * access_ok() must have been performed on dest addresses prior to call this
943 * function.
944 * Returns -EFAULT on error, 0 if ok.
945 */
946int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
947 size_t offset, void __user *dest, size_t len)
948{
949 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 950 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 951 size_t index;
88dfd899 952 ssize_t pagecpy;
f3bc08c5
MD
953 struct lib_ring_buffer_backend_pages *rpages;
954 unsigned long sb_bindex, id;
955
f3bc08c5
MD
956 offset &= chanb->buf_size - 1;
957 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
958 if (unlikely(!len))
959 return 0;
960 for (;;) {
961 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
962 id = bufb->buf_rsb.id;
963 sb_bindex = subbuffer_id_get_index(config, id);
964 rpages = bufb->array[sb_bindex];
965 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
966 && subbuffer_id_is_noref(config, id));
967 if (__copy_to_user(dest,
968 rpages->p[index].virt + (offset & ~PAGE_MASK),
969 pagecpy))
970 return -EFAULT;
971 len -= pagecpy;
972 if (likely(!len))
973 break;
974 dest += pagecpy;
975 offset += pagecpy;
976 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
977 /*
978 * Underlying layer should never ask for reads across
979 * subbuffers.
980 */
981 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
982 }
983 return 0;
984}
985EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
986
987/**
988 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
989 * @bufb : buffer backend
990 * @offset : offset within the buffer
991 * @dest : destination address
992 * @len : destination's length
993 *
61eb4c39 994 * Return string's length, or -EINVAL on error.
f3bc08c5 995 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 996 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
997 */
998int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
999 void *dest, size_t len)
1000{
1001 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1002 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1003 size_t index;
1004 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
1005 char *str;
1006 struct lib_ring_buffer_backend_pages *rpages;
1007 unsigned long sb_bindex, id;
1008
1009 offset &= chanb->buf_size - 1;
1010 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1011 orig_offset = offset;
61eb4c39
MD
1012 if (unlikely(!len))
1013 return -EINVAL;
f3bc08c5
MD
1014 for (;;) {
1015 id = bufb->buf_rsb.id;
1016 sb_bindex = subbuffer_id_get_index(config, id);
1017 rpages = bufb->array[sb_bindex];
1018 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1019 && subbuffer_id_is_noref(config, id));
1020 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1021 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1022 strpagelen = strnlen(str, pagelen);
1023 if (len) {
1024 pagecpy = min_t(size_t, len, strpagelen);
1025 if (dest) {
1026 memcpy(dest, str, pagecpy);
1027 dest += pagecpy;
1028 }
1029 len -= pagecpy;
1030 }
1031 offset += strpagelen;
1032 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1033 if (strpagelen < pagelen)
1034 break;
1035 /*
1036 * Underlying layer should never ask for reads across
1037 * subbuffers.
1038 */
1039 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1040 }
1041 if (dest && len)
1042 ((char *)dest)[0] = 0;
1043 return offset - orig_offset;
1044}
1045EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1046
1047/**
0112cb7b 1048 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1049 * @bufb : buffer backend
1050 * @offset : offset within the buffer
1051 * @virt : pointer to page address (output)
1052 *
1053 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1054 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1055 */
0112cb7b 1056unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
f3bc08c5
MD
1057 size_t offset, void ***virt)
1058{
1059 size_t index;
1060 struct lib_ring_buffer_backend_pages *rpages;
1061 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1062 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1063 unsigned long sb_bindex, id;
1064
1065 offset &= chanb->buf_size - 1;
1066 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1067 id = bufb->buf_rsb.id;
1068 sb_bindex = subbuffer_id_get_index(config, id);
1069 rpages = bufb->array[sb_bindex];
1070 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1071 && subbuffer_id_is_noref(config, id));
1072 *virt = &rpages->p[index].virt;
0112cb7b 1073 return &rpages->p[index].pfn;
f3bc08c5 1074}
0112cb7b 1075EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1076
1077/**
1078 * lib_ring_buffer_read_offset_address - get address of a buffer location
1079 * @bufb : buffer backend
1080 * @offset : offset within the buffer.
1081 *
1082 * Return the address where a given offset is located (for read).
1083 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1084 * it's never on a page boundary, it's safe to read/write directly
1085 * from/to this address, as long as the read/write is never bigger than a
1086 * page size.
f3bc08c5
MD
1087 */
1088void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
1089 size_t offset)
1090{
1091 size_t index;
1092 struct lib_ring_buffer_backend_pages *rpages;
1093 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1094 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1095 unsigned long sb_bindex, id;
1096
1097 offset &= chanb->buf_size - 1;
1098 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1099 id = bufb->buf_rsb.id;
1100 sb_bindex = subbuffer_id_get_index(config, id);
1101 rpages = bufb->array[sb_bindex];
1102 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1103 && subbuffer_id_is_noref(config, id));
1104 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1105}
1106EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1107
1108/**
1109 * lib_ring_buffer_offset_address - get address of a location within the buffer
1110 * @bufb : buffer backend
1111 * @offset : offset within the buffer.
1112 *
1113 * Return the address where a given offset is located.
1114 * Should be used to get the current subbuffer header pointer. Given we know
1115 * it's always at the beginning of a page, it's safe to write directly to this
1116 * address, as long as the write is never bigger than a page size.
1117 */
1118void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1119 size_t offset)
1120{
1121 size_t sbidx, index;
1122 struct lib_ring_buffer_backend_pages *rpages;
1123 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1124 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1125 unsigned long sb_bindex, id;
1126
1127 offset &= chanb->buf_size - 1;
1128 sbidx = offset >> chanb->subbuf_size_order;
1129 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1130 id = bufb->buf_wsb[sbidx].id;
1131 sb_bindex = subbuffer_id_get_index(config, id);
1132 rpages = bufb->array[sb_bindex];
1133 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1134 && subbuffer_id_is_noref(config, id));
1135 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1136}
1137EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.079606 seconds and 4 git commands to generate.