Prevent allocation of buffers if exceeding available memory
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_backend.c
3 *
886d51a3 4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 5 *
886d51a3
MD
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
19 */
20
f3bc08c5
MD
21#include <linux/stddef.h>
22#include <linux/module.h>
23#include <linux/string.h>
24#include <linux/bitops.h>
25#include <linux/delay.h>
26#include <linux/errno.h>
27#include <linux/slab.h>
1790d63f 28#include <linux/oom.h>
f3bc08c5
MD
29#include <linux/cpu.h>
30#include <linux/mm.h>
df388b78 31#include <linux/vmalloc.h>
f3bc08c5 32
c075712b
MD
33#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
34#include <wrapper/ringbuffer/config.h>
35#include <wrapper/ringbuffer/backend.h>
36#include <wrapper/ringbuffer/frontend.h>
f3bc08c5
MD
37
38/**
39 * lib_ring_buffer_backend_allocate - allocate a channel buffer
40 * @config: ring buffer instance configuration
41 * @buf: the buffer struct
42 * @size: total size of the buffer
43 * @num_subbuf: number of subbuffers
44 * @extra_reader_sb: need extra subbuffer for reader
45 */
46static
47int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config,
48 struct lib_ring_buffer_backend *bufb,
49 size_t size, size_t num_subbuf,
50 int extra_reader_sb)
51{
52 struct channel_backend *chanb = &bufb->chan->backend;
53 unsigned long j, num_pages, num_pages_per_subbuf, page_idx = 0;
54 unsigned long subbuf_size, mmap_offset = 0;
55 unsigned long num_subbuf_alloc;
56 struct page **pages;
f3bc08c5
MD
57 unsigned long i;
58
59 num_pages = size >> PAGE_SHIFT;
1790d63f
FD
60
61 /*
62 * Verify that the number of pages requested for that buffer is smaller
63 * than the number of available pages on the system. si_mem_available()
64 * returns an _estimate_ of the number of available pages.
65 */
66 if (num_pages > si_mem_available())
67 goto not_enough_pages;
68
69 /*
70 * Set the current user thread as the first target of the OOM killer.
71 * If the estimate received by si_mem_available() was off, and we do
72 * end up running out of memory because of this buffer allocation, we
73 * want to kill the offending app first.
74 */
75 set_current_oom_origin();
76
f3bc08c5
MD
77 num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
78 subbuf_size = chanb->subbuf_size;
79 num_subbuf_alloc = num_subbuf;
80
81 if (extra_reader_sb) {
82 num_pages += num_pages_per_subbuf; /* Add pages for reader */
83 num_subbuf_alloc++;
84 }
85
df388b78 86 pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
f3bc08c5 87 1 << INTERNODE_CACHE_SHIFT),
df388b78 88 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
89 if (unlikely(!pages))
90 goto pages_error;
91
fd0cebd8 92 bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
f3bc08c5
MD
93 * num_subbuf_alloc,
94 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
95 GFP_KERNEL | __GFP_NOWARN,
96 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
97 if (unlikely(!bufb->array))
98 goto array_error;
99
100 for (i = 0; i < num_pages; i++) {
101 pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
df388b78 102 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
f3bc08c5
MD
103 if (unlikely(!pages[i]))
104 goto depopulate;
f3bc08c5
MD
105 }
106 bufb->num_pages_per_subbuf = num_pages_per_subbuf;
107
108 /* Allocate backend pages array elements */
109 for (i = 0; i < num_subbuf_alloc; i++) {
110 bufb->array[i] =
fd0cebd8 111 lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
112 sizeof(struct lib_ring_buffer_backend_pages) +
113 sizeof(struct lib_ring_buffer_backend_page)
114 * num_pages_per_subbuf,
115 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
116 GFP_KERNEL | __GFP_NOWARN,
117 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
118 if (!bufb->array[i])
119 goto free_array;
120 }
121
122 /* Allocate write-side subbuffer table */
fd0cebd8 123 bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
f3bc08c5
MD
124 sizeof(struct lib_ring_buffer_backend_subbuffer)
125 * num_subbuf,
126 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
127 GFP_KERNEL | __GFP_NOWARN,
128 cpu_to_node(max(bufb->cpu, 0)));
f3bc08c5
MD
129 if (unlikely(!bufb->buf_wsb))
130 goto free_array;
131
132 for (i = 0; i < num_subbuf; i++)
133 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
134
135 /* Assign read-side subbuffer table */
136 if (extra_reader_sb)
137 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
138 num_subbuf_alloc - 1);
139 else
140 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
141
5b3cf4f9 142 /* Allocate subbuffer packet counter table */
fd0cebd8 143 bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
5b3cf4f9
JD
144 sizeof(struct lib_ring_buffer_backend_counts)
145 * num_subbuf,
146 1 << INTERNODE_CACHE_SHIFT),
df388b78
MD
147 GFP_KERNEL | __GFP_NOWARN,
148 cpu_to_node(max(bufb->cpu, 0)));
5b3cf4f9
JD
149 if (unlikely(!bufb->buf_cnt))
150 goto free_wsb;
151
f3bc08c5
MD
152 /* Assign pages to page index */
153 for (i = 0; i < num_subbuf_alloc; i++) {
154 for (j = 0; j < num_pages_per_subbuf; j++) {
155 CHAN_WARN_ON(chanb, page_idx > num_pages);
0112cb7b
MD
156 bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
157 bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
f3bc08c5
MD
158 page_idx++;
159 }
160 if (config->output == RING_BUFFER_MMAP) {
161 bufb->array[i]->mmap_offset = mmap_offset;
162 mmap_offset += subbuf_size;
163 }
164 }
165
166 /*
167 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
168 * will not fault.
169 */
6d2a620c 170 wrapper_vmalloc_sync_all();
1790d63f 171 clear_current_oom_origin();
df388b78 172 vfree(pages);
f3bc08c5
MD
173 return 0;
174
5b3cf4f9 175free_wsb:
fd0cebd8 176 lttng_kvfree(bufb->buf_wsb);
f3bc08c5
MD
177free_array:
178 for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
fd0cebd8 179 lttng_kvfree(bufb->array[i]);
f3bc08c5
MD
180depopulate:
181 /* Free all allocated pages */
182 for (i = 0; (i < num_pages && pages[i]); i++)
183 __free_page(pages[i]);
fd0cebd8 184 lttng_kvfree(bufb->array);
f3bc08c5 185array_error:
df388b78 186 vfree(pages);
f3bc08c5 187pages_error:
1790d63f
FD
188 clear_current_oom_origin();
189not_enough_pages:
f3bc08c5
MD
190 return -ENOMEM;
191}
192
193int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
194 struct channel_backend *chanb, int cpu)
195{
5a8fd222 196 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
197
198 bufb->chan = container_of(chanb, struct channel, backend);
199 bufb->cpu = cpu;
200
201 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
202 chanb->num_subbuf,
203 chanb->extra_reader_sb);
204}
205
206void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
207{
208 struct channel_backend *chanb = &bufb->chan->backend;
209 unsigned long i, j, num_subbuf_alloc;
210
211 num_subbuf_alloc = chanb->num_subbuf;
212 if (chanb->extra_reader_sb)
213 num_subbuf_alloc++;
214
fd0cebd8
MJ
215 lttng_kvfree(bufb->buf_wsb);
216 lttng_kvfree(bufb->buf_cnt);
f3bc08c5
MD
217 for (i = 0; i < num_subbuf_alloc; i++) {
218 for (j = 0; j < bufb->num_pages_per_subbuf; j++)
0112cb7b 219 __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
fd0cebd8 220 lttng_kvfree(bufb->array[i]);
f3bc08c5 221 }
fd0cebd8 222 lttng_kvfree(bufb->array);
f3bc08c5
MD
223 bufb->allocated = 0;
224}
225
226void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
227{
228 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 229 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
230 unsigned long num_subbuf_alloc;
231 unsigned int i;
232
233 num_subbuf_alloc = chanb->num_subbuf;
234 if (chanb->extra_reader_sb)
235 num_subbuf_alloc++;
236
237 for (i = 0; i < chanb->num_subbuf; i++)
238 bufb->buf_wsb[i].id = subbuffer_id(config, 0, 1, i);
239 if (chanb->extra_reader_sb)
240 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
241 num_subbuf_alloc - 1);
242 else
243 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
244
245 for (i = 0; i < num_subbuf_alloc; i++) {
246 /* Don't reset mmap_offset */
247 v_set(config, &bufb->array[i]->records_commit, 0);
248 v_set(config, &bufb->array[i]->records_unread, 0);
249 bufb->array[i]->data_size = 0;
250 /* Don't reset backend page and virt addresses */
251 }
252 /* Don't reset num_pages_per_subbuf, cpu, allocated */
253 v_set(config, &bufb->records_read, 0);
254}
255
256/*
257 * The frontend is responsible for also calling ring_buffer_backend_reset for
258 * each buffer when calling channel_backend_reset.
259 */
260void channel_backend_reset(struct channel_backend *chanb)
261{
262 struct channel *chan = container_of(chanb, struct channel, backend);
5a8fd222 263 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
264
265 /*
266 * Don't reset buf_size, subbuf_size, subbuf_size_order,
267 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
268 * priv, notifiers, config, cpumask and name.
269 */
270 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
271}
272
1e367326
MD
273#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
274
275/*
276 * No need to implement a "dead" callback to do a buffer switch here,
277 * because it will happen when tracing is stopped, or will be done by
278 * switch timer CPU DEAD callback.
279 * We don't free buffers when CPU go away, because it would make trace
280 * data vanish, which is unwanted.
281 */
282int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
283 struct lttng_cpuhp_node *node)
284{
285 struct channel_backend *chanb = container_of(node,
286 struct channel_backend, cpuhp_prepare);
287 const struct lib_ring_buffer_config *config = &chanb->config;
288 struct lib_ring_buffer *buf;
289 int ret;
290
291 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
292
293 buf = per_cpu_ptr(chanb->buf, cpu);
294 ret = lib_ring_buffer_create(buf, chanb, cpu);
295 if (ret) {
296 printk(KERN_ERR
297 "ring_buffer_cpu_hp_callback: cpu %d "
298 "buffer creation failed\n", cpu);
299 return ret;
300 }
301 return 0;
302}
303EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
304
305#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
306
f3bc08c5 307#ifdef CONFIG_HOTPLUG_CPU
1e367326 308
f3bc08c5
MD
309/**
310 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
311 * @nb: notifier block
312 * @action: hotplug action to take
313 * @hcpu: CPU number
314 *
315 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
316 */
317static
e8f071d5 318int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
f3bc08c5
MD
319 unsigned long action,
320 void *hcpu)
321{
322 unsigned int cpu = (unsigned long)hcpu;
323 struct channel_backend *chanb = container_of(nb, struct channel_backend,
324 cpu_hp_notifier);
5a8fd222 325 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
326 struct lib_ring_buffer *buf;
327 int ret;
328
329 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
330
331 switch (action) {
332 case CPU_UP_PREPARE:
333 case CPU_UP_PREPARE_FROZEN:
334 buf = per_cpu_ptr(chanb->buf, cpu);
335 ret = lib_ring_buffer_create(buf, chanb, cpu);
336 if (ret) {
337 printk(KERN_ERR
338 "ring_buffer_cpu_hp_callback: cpu %d "
339 "buffer creation failed\n", cpu);
340 return NOTIFY_BAD;
341 }
342 break;
343 case CPU_DEAD:
344 case CPU_DEAD_FROZEN:
345 /* No need to do a buffer switch here, because it will happen
346 * when tracing is stopped, or will be done by switch timer CPU
347 * DEAD callback. */
348 break;
349 }
350 return NOTIFY_OK;
351}
1e367326 352
f3bc08c5
MD
353#endif
354
1e367326
MD
355#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
356
f3bc08c5
MD
357/**
358 * channel_backend_init - initialize a channel backend
359 * @chanb: channel backend
360 * @name: channel name
361 * @config: client ring buffer configuration
362 * @priv: client private data
363 * @parent: dentry of parent directory, %NULL for root directory
364 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
365 * @num_subbuf: number of sub-buffers (power of 2)
366 *
367 * Returns channel pointer if successful, %NULL otherwise.
368 *
369 * Creates per-cpu channel buffers using the sizes and attributes
370 * specified. The created channel buffer files will be named
371 * name_0...name_N-1. File permissions will be %S_IRUSR.
372 *
373 * Called with CPU hotplug disabled.
374 */
375int channel_backend_init(struct channel_backend *chanb,
376 const char *name,
377 const struct lib_ring_buffer_config *config,
378 void *priv, size_t subbuf_size, size_t num_subbuf)
379{
380 struct channel *chan = container_of(chanb, struct channel, backend);
381 unsigned int i;
382 int ret;
383
384 if (!name)
385 return -EPERM;
386
f3bc08c5 387 /* Check that the subbuffer size is larger than a page. */
2fb46300
MD
388 if (subbuf_size < PAGE_SIZE)
389 return -EINVAL;
f3bc08c5
MD
390
391 /*
bbda3a00
MD
392 * Make sure the number of subbuffers and subbuffer size are
393 * power of 2 and nonzero.
f3bc08c5 394 */
bbda3a00 395 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
863497fa 396 return -EINVAL;
bbda3a00 397 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
863497fa 398 return -EINVAL;
5140d2b3
MD
399 /*
400 * Overwrite mode buffers require at least 2 subbuffers per
401 * buffer.
402 */
403 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
404 return -EINVAL;
f3bc08c5
MD
405
406 ret = subbuffer_id_check_index(config, num_subbuf);
407 if (ret)
408 return ret;
409
410 chanb->priv = priv;
411 chanb->buf_size = num_subbuf * subbuf_size;
412 chanb->subbuf_size = subbuf_size;
413 chanb->buf_size_order = get_count_order(chanb->buf_size);
414 chanb->subbuf_size_order = get_count_order(subbuf_size);
415 chanb->num_subbuf_order = get_count_order(num_subbuf);
416 chanb->extra_reader_sb =
417 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
418 chanb->num_subbuf = num_subbuf;
419 strlcpy(chanb->name, name, NAME_MAX);
5a8fd222 420 memcpy(&chanb->config, config, sizeof(chanb->config));
f3bc08c5
MD
421
422 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
423 if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
424 return -ENOMEM;
425 }
426
427 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
428 /* Allocating the buffer per-cpu structures */
429 chanb->buf = alloc_percpu(struct lib_ring_buffer);
430 if (!chanb->buf)
431 goto free_cpumask;
432
1e367326
MD
433#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
434 chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
435 ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
436 &chanb->cpuhp_prepare.node);
437 if (ret)
438 goto free_bufs;
439#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
440
441 {
442 /*
443 * In case of non-hotplug cpu, if the ring-buffer is allocated
444 * in early initcall, it will not be notified of secondary cpus.
445 * In that off case, we need to allocate for all possible cpus.
446 */
f3bc08c5 447#ifdef CONFIG_HOTPLUG_CPU
1e367326
MD
448 /*
449 * buf->backend.allocated test takes care of concurrent CPU
450 * hotplug.
451 * Priority higher than frontend, so we create the ring buffer
452 * before we start the timer.
453 */
454 chanb->cpu_hp_notifier.notifier_call =
455 lib_ring_buffer_cpu_hp_callback;
456 chanb->cpu_hp_notifier.priority = 5;
457 register_hotcpu_notifier(&chanb->cpu_hp_notifier);
458
459 get_online_cpus();
460 for_each_online_cpu(i) {
461 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
462 chanb, i);
463 if (ret)
464 goto free_bufs; /* cpu hotplug locked */
465 }
466 put_online_cpus();
f3bc08c5 467#else
1e367326
MD
468 for_each_possible_cpu(i) {
469 ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
470 chanb, i);
471 if (ret)
472 goto free_bufs;
473 }
f3bc08c5 474#endif
1e367326
MD
475 }
476#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5
MD
477 } else {
478 chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
479 if (!chanb->buf)
480 goto free_cpumask;
481 ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
482 if (ret)
483 goto free_bufs;
484 }
485 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
486
487 return 0;
488
489free_bufs:
490 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
1e367326 491#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
fbbb0005
MD
492 /*
493 * Teardown of lttng_rb_hp_prepare instance
494 * on "add" error is handled within cpu hotplug,
495 * no teardown to do from the caller.
496 */
1e367326
MD
497#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
498#ifdef CONFIG_HOTPLUG_CPU
499 put_online_cpus();
500 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
501#endif
502#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 503 for_each_possible_cpu(i) {
1e367326
MD
504 struct lib_ring_buffer *buf =
505 per_cpu_ptr(chanb->buf, i);
f3bc08c5
MD
506
507 if (!buf->backend.allocated)
508 continue;
509 lib_ring_buffer_free(buf);
510 }
f3bc08c5
MD
511 free_percpu(chanb->buf);
512 } else
513 kfree(chanb->buf);
514free_cpumask:
515 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
516 free_cpumask_var(chanb->cpumask);
517 return -ENOMEM;
518}
519
520/**
521 * channel_backend_unregister_notifiers - unregister notifiers
522 * @chan: the channel
523 *
524 * Holds CPU hotplug.
525 */
526void channel_backend_unregister_notifiers(struct channel_backend *chanb)
527{
5a8fd222 528 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 529
1e367326
MD
530 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
531#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
532 int ret;
533
534 ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
535 &chanb->cpuhp_prepare.node);
536 WARN_ON(ret);
537#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
f3bc08c5 538 unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
1e367326
MD
539#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
540 }
f3bc08c5
MD
541}
542
543/**
544 * channel_backend_free - destroy the channel
545 * @chan: the channel
546 *
547 * Destroy all channel buffers and frees the channel.
548 */
549void channel_backend_free(struct channel_backend *chanb)
550{
5a8fd222 551 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
552 unsigned int i;
553
554 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
555 for_each_possible_cpu(i) {
556 struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
557
558 if (!buf->backend.allocated)
559 continue;
560 lib_ring_buffer_free(buf);
561 }
562 free_cpumask_var(chanb->cpumask);
563 free_percpu(chanb->buf);
564 } else {
565 struct lib_ring_buffer *buf = chanb->buf;
566
567 CHAN_WARN_ON(chanb, !buf->backend.allocated);
568 lib_ring_buffer_free(buf);
569 kfree(buf);
570 }
571}
572
573/**
574 * lib_ring_buffer_write - write data to a ring_buffer buffer.
575 * @bufb : buffer backend
576 * @offset : offset within the buffer
577 * @src : source address
578 * @len : length to write
579 * @pagecpy : page size copied so far
580 */
581void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
bfe529f9 582 const void *src, size_t len, size_t pagecpy)
f3bc08c5
MD
583{
584 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 585 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
586 size_t sbidx, index;
587 struct lib_ring_buffer_backend_pages *rpages;
588 unsigned long sb_bindex, id;
589
590 do {
591 len -= pagecpy;
592 src += pagecpy;
593 offset += pagecpy;
594 sbidx = offset >> chanb->subbuf_size_order;
595 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
596
597 /*
598 * Underlying layer should never ask for writes across
599 * subbuffers.
600 */
601 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
602
603 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
604 id = bufb->buf_wsb[sbidx].id;
605 sb_bindex = subbuffer_id_get_index(config, id);
606 rpages = bufb->array[sb_bindex];
607 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
608 && subbuffer_id_is_noref(config, id));
609 lib_ring_buffer_do_copy(config,
610 rpages->p[index].virt
611 + (offset & ~PAGE_MASK),
612 src, pagecpy);
613 } while (unlikely(len != pagecpy));
614}
615EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
616
4ea00e4f
JD
617
618/**
619 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
620 * @bufb : buffer backend
621 * @offset : offset within the buffer
622 * @c : the byte to write
623 * @len : length to write
624 * @pagecpy : page size copied so far
625 */
626void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
627 size_t offset,
bfe529f9 628 int c, size_t len, size_t pagecpy)
4ea00e4f
JD
629{
630 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 631 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
632 size_t sbidx, index;
633 struct lib_ring_buffer_backend_pages *rpages;
634 unsigned long sb_bindex, id;
635
636 do {
637 len -= pagecpy;
638 offset += pagecpy;
639 sbidx = offset >> chanb->subbuf_size_order;
640 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
641
642 /*
643 * Underlying layer should never ask for writes across
644 * subbuffers.
645 */
646 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
647
648 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
649 id = bufb->buf_wsb[sbidx].id;
650 sb_bindex = subbuffer_id_get_index(config, id);
651 rpages = bufb->array[sb_bindex];
652 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
653 && subbuffer_id_is_noref(config, id));
654 lib_ring_buffer_do_memset(rpages->p[index].virt
655 + (offset & ~PAGE_MASK),
656 c, pagecpy);
657 } while (unlikely(len != pagecpy));
658}
659EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
660
16f78f3a
MD
661/**
662 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
663 * @bufb : buffer backend
664 * @offset : offset within the buffer
665 * @src : source address
666 * @len : length to write
667 * @pagecpy : page size copied so far
668 * @pad : character to use for padding
669 */
670void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
671 size_t offset, const char *src, size_t len,
672 size_t pagecpy, int pad)
673{
674 struct channel_backend *chanb = &bufb->chan->backend;
675 const struct lib_ring_buffer_config *config = &chanb->config;
676 size_t sbidx, index;
677 struct lib_ring_buffer_backend_pages *rpages;
678 unsigned long sb_bindex, id;
679 int src_terminated = 0;
680
681 CHAN_WARN_ON(chanb, !len);
682 offset += pagecpy;
683 do {
684 len -= pagecpy;
685 if (!src_terminated)
686 src += pagecpy;
687 sbidx = offset >> chanb->subbuf_size_order;
688 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
689
690 /*
691 * Underlying layer should never ask for writes across
692 * subbuffers.
693 */
694 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
695
696 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
697 id = bufb->buf_wsb[sbidx].id;
698 sb_bindex = subbuffer_id_get_index(config, id);
699 rpages = bufb->array[sb_bindex];
700 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
701 && subbuffer_id_is_noref(config, id));
702
703 if (likely(!src_terminated)) {
704 size_t count, to_copy;
705
706 to_copy = pagecpy;
707 if (pagecpy == len)
708 to_copy--; /* Final '\0' */
709 count = lib_ring_buffer_do_strcpy(config,
710 rpages->p[index].virt
711 + (offset & ~PAGE_MASK),
712 src, to_copy);
713 offset += count;
714 /* Padding */
715 if (unlikely(count < to_copy)) {
716 size_t pad_len = to_copy - count;
717
718 /* Next pages will have padding */
719 src_terminated = 1;
720 lib_ring_buffer_do_memset(rpages->p[index].virt
721 + (offset & ~PAGE_MASK),
722 pad, pad_len);
723 offset += pad_len;
724 }
725 } else {
726 size_t pad_len;
727
728 pad_len = pagecpy;
729 if (pagecpy == len)
730 pad_len--; /* Final '\0' */
731 lib_ring_buffer_do_memset(rpages->p[index].virt
732 + (offset & ~PAGE_MASK),
733 pad, pad_len);
734 offset += pad_len;
735 }
736 } while (unlikely(len != pagecpy));
737 /* Ending '\0' */
738 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
739 '\0', 1);
740}
741EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
4ea00e4f
JD
742
743/**
7b8ea3a5 744 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
4ea00e4f
JD
745 * @bufb : buffer backend
746 * @offset : offset within the buffer
747 * @src : source address
748 * @len : length to write
749 * @pagecpy : page size copied so far
750 *
751 * This function deals with userspace pointers, it should never be called
752 * directly without having the src pointer checked with access_ok()
753 * previously.
754 */
7b8ea3a5 755void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
4ea00e4f
JD
756 size_t offset,
757 const void __user *src, size_t len,
bfe529f9 758 size_t pagecpy)
4ea00e4f
JD
759{
760 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 761 const struct lib_ring_buffer_config *config = &chanb->config;
4ea00e4f
JD
762 size_t sbidx, index;
763 struct lib_ring_buffer_backend_pages *rpages;
764 unsigned long sb_bindex, id;
765 int ret;
766
767 do {
768 len -= pagecpy;
769 src += pagecpy;
770 offset += pagecpy;
771 sbidx = offset >> chanb->subbuf_size_order;
772 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
773
774 /*
775 * Underlying layer should never ask for writes across
776 * subbuffers.
777 */
778 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
779
780 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
781 id = bufb->buf_wsb[sbidx].id;
782 sb_bindex = subbuffer_id_get_index(config, id);
783 rpages = bufb->array[sb_bindex];
784 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
785 && subbuffer_id_is_noref(config, id));
7b8ea3a5 786 ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
4ea00e4f
JD
787 + (offset & ~PAGE_MASK),
788 src, pagecpy) != 0;
789 if (ret > 0) {
d87a9f03 790 /* Copy failed. */
4ea00e4f
JD
791 _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
792 break; /* stop copy */
793 }
794 } while (unlikely(len != pagecpy));
795}
7b8ea3a5 796EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
4ea00e4f 797
16f78f3a
MD
798/**
799 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
800 * @bufb : buffer backend
801 * @offset : offset within the buffer
802 * @src : source address
803 * @len : length to write
804 * @pagecpy : page size copied so far
805 * @pad : character to use for padding
806 *
807 * This function deals with userspace pointers, it should never be called
808 * directly without having the src pointer checked with access_ok()
809 * previously.
810 */
811void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
812 size_t offset, const char __user *src, size_t len,
813 size_t pagecpy, int pad)
814{
815 struct channel_backend *chanb = &bufb->chan->backend;
816 const struct lib_ring_buffer_config *config = &chanb->config;
817 size_t sbidx, index;
818 struct lib_ring_buffer_backend_pages *rpages;
819 unsigned long sb_bindex, id;
820 int src_terminated = 0;
821
822 offset += pagecpy;
823 do {
824 len -= pagecpy;
825 if (!src_terminated)
826 src += pagecpy;
827 sbidx = offset >> chanb->subbuf_size_order;
828 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
829
830 /*
831 * Underlying layer should never ask for writes across
832 * subbuffers.
833 */
834 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
835
836 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
837 id = bufb->buf_wsb[sbidx].id;
838 sb_bindex = subbuffer_id_get_index(config, id);
839 rpages = bufb->array[sb_bindex];
840 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
841 && subbuffer_id_is_noref(config, id));
842
843 if (likely(!src_terminated)) {
844 size_t count, to_copy;
845
846 to_copy = pagecpy;
847 if (pagecpy == len)
848 to_copy--; /* Final '\0' */
849 count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
850 rpages->p[index].virt
851 + (offset & ~PAGE_MASK),
852 src, to_copy);
853 offset += count;
854 /* Padding */
855 if (unlikely(count < to_copy)) {
856 size_t pad_len = to_copy - count;
857
858 /* Next pages will have padding */
859 src_terminated = 1;
860 lib_ring_buffer_do_memset(rpages->p[index].virt
861 + (offset & ~PAGE_MASK),
862 pad, pad_len);
863 offset += pad_len;
864 }
865 } else {
866 size_t pad_len;
867
868 pad_len = pagecpy;
869 if (pagecpy == len)
870 pad_len--; /* Final '\0' */
871 lib_ring_buffer_do_memset(rpages->p[index].virt
872 + (offset & ~PAGE_MASK),
873 pad, pad_len);
874 offset += pad_len;
875 }
876 } while (unlikely(len != pagecpy));
877 /* Ending '\0' */
878 lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
879 '\0', 1);
880}
881EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
882
f3bc08c5
MD
883/**
884 * lib_ring_buffer_read - read data from ring_buffer_buffer.
885 * @bufb : buffer backend
886 * @offset : offset within the buffer
887 * @dest : destination address
888 * @len : length to copy to destination
889 *
890 * Should be protected by get_subbuf/put_subbuf.
891 * Returns the length copied.
892 */
893size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
894 void *dest, size_t len)
895{
896 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 897 const struct lib_ring_buffer_config *config = &chanb->config;
bfe529f9 898 size_t index, pagecpy, orig_len;
f3bc08c5
MD
899 struct lib_ring_buffer_backend_pages *rpages;
900 unsigned long sb_bindex, id;
901
902 orig_len = len;
903 offset &= chanb->buf_size - 1;
904 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
905 if (unlikely(!len))
906 return 0;
907 for (;;) {
908 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
909 id = bufb->buf_rsb.id;
910 sb_bindex = subbuffer_id_get_index(config, id);
911 rpages = bufb->array[sb_bindex];
912 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
913 && subbuffer_id_is_noref(config, id));
914 memcpy(dest, rpages->p[index].virt + (offset & ~PAGE_MASK),
915 pagecpy);
916 len -= pagecpy;
917 if (likely(!len))
918 break;
919 dest += pagecpy;
920 offset += pagecpy;
921 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
922 /*
923 * Underlying layer should never ask for reads across
924 * subbuffers.
925 */
926 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
927 }
928 return orig_len;
929}
930EXPORT_SYMBOL_GPL(lib_ring_buffer_read);
931
932/**
933 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
934 * @bufb : buffer backend
935 * @offset : offset within the buffer
936 * @dest : destination userspace address
937 * @len : length to copy to destination
938 *
939 * Should be protected by get_subbuf/put_subbuf.
940 * access_ok() must have been performed on dest addresses prior to call this
941 * function.
942 * Returns -EFAULT on error, 0 if ok.
943 */
944int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend *bufb,
945 size_t offset, void __user *dest, size_t len)
946{
947 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 948 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5 949 size_t index;
88dfd899 950 ssize_t pagecpy;
f3bc08c5
MD
951 struct lib_ring_buffer_backend_pages *rpages;
952 unsigned long sb_bindex, id;
953
f3bc08c5
MD
954 offset &= chanb->buf_size - 1;
955 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
956 if (unlikely(!len))
957 return 0;
958 for (;;) {
959 pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
960 id = bufb->buf_rsb.id;
961 sb_bindex = subbuffer_id_get_index(config, id);
962 rpages = bufb->array[sb_bindex];
963 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
964 && subbuffer_id_is_noref(config, id));
965 if (__copy_to_user(dest,
966 rpages->p[index].virt + (offset & ~PAGE_MASK),
967 pagecpy))
968 return -EFAULT;
969 len -= pagecpy;
970 if (likely(!len))
971 break;
972 dest += pagecpy;
973 offset += pagecpy;
974 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
975 /*
976 * Underlying layer should never ask for reads across
977 * subbuffers.
978 */
979 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
980 }
981 return 0;
982}
983EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
984
985/**
986 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
987 * @bufb : buffer backend
988 * @offset : offset within the buffer
989 * @dest : destination address
990 * @len : destination's length
991 *
61eb4c39 992 * Return string's length, or -EINVAL on error.
f3bc08c5 993 * Should be protected by get_subbuf/put_subbuf.
61eb4c39 994 * Destination length should be at least 1 to hold '\0'.
f3bc08c5
MD
995 */
996int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
997 void *dest, size_t len)
998{
999 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1000 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1001 size_t index;
1002 ssize_t pagecpy, pagelen, strpagelen, orig_offset;
1003 char *str;
1004 struct lib_ring_buffer_backend_pages *rpages;
1005 unsigned long sb_bindex, id;
1006
1007 offset &= chanb->buf_size - 1;
1008 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1009 orig_offset = offset;
61eb4c39
MD
1010 if (unlikely(!len))
1011 return -EINVAL;
f3bc08c5
MD
1012 for (;;) {
1013 id = bufb->buf_rsb.id;
1014 sb_bindex = subbuffer_id_get_index(config, id);
1015 rpages = bufb->array[sb_bindex];
1016 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1017 && subbuffer_id_is_noref(config, id));
1018 str = (char *)rpages->p[index].virt + (offset & ~PAGE_MASK);
1019 pagelen = PAGE_SIZE - (offset & ~PAGE_MASK);
1020 strpagelen = strnlen(str, pagelen);
1021 if (len) {
1022 pagecpy = min_t(size_t, len, strpagelen);
1023 if (dest) {
1024 memcpy(dest, str, pagecpy);
1025 dest += pagecpy;
1026 }
1027 len -= pagecpy;
1028 }
1029 offset += strpagelen;
1030 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1031 if (strpagelen < pagelen)
1032 break;
1033 /*
1034 * Underlying layer should never ask for reads across
1035 * subbuffers.
1036 */
1037 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
1038 }
1039 if (dest && len)
1040 ((char *)dest)[0] = 0;
1041 return offset - orig_offset;
1042}
1043EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
1044
1045/**
0112cb7b 1046 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
f3bc08c5
MD
1047 * @bufb : buffer backend
1048 * @offset : offset within the buffer
1049 * @virt : pointer to page address (output)
1050 *
1051 * Should be protected by get_subbuf/put_subbuf.
0112cb7b 1052 * Returns the pointer to the page frame number unsigned long.
f3bc08c5 1053 */
0112cb7b 1054unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
f3bc08c5
MD
1055 size_t offset, void ***virt)
1056{
1057 size_t index;
1058 struct lib_ring_buffer_backend_pages *rpages;
1059 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1060 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1061 unsigned long sb_bindex, id;
1062
1063 offset &= chanb->buf_size - 1;
1064 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1065 id = bufb->buf_rsb.id;
1066 sb_bindex = subbuffer_id_get_index(config, id);
1067 rpages = bufb->array[sb_bindex];
1068 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1069 && subbuffer_id_is_noref(config, id));
1070 *virt = &rpages->p[index].virt;
0112cb7b 1071 return &rpages->p[index].pfn;
f3bc08c5 1072}
0112cb7b 1073EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
f3bc08c5
MD
1074
1075/**
1076 * lib_ring_buffer_read_offset_address - get address of a buffer location
1077 * @bufb : buffer backend
1078 * @offset : offset within the buffer.
1079 *
1080 * Return the address where a given offset is located (for read).
1081 * Should be used to get the current subbuffer header pointer. Given we know
759d02c1
MD
1082 * it's never on a page boundary, it's safe to read/write directly
1083 * from/to this address, as long as the read/write is never bigger than a
1084 * page size.
f3bc08c5
MD
1085 */
1086void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
1087 size_t offset)
1088{
1089 size_t index;
1090 struct lib_ring_buffer_backend_pages *rpages;
1091 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1092 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1093 unsigned long sb_bindex, id;
1094
1095 offset &= chanb->buf_size - 1;
1096 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1097 id = bufb->buf_rsb.id;
1098 sb_bindex = subbuffer_id_get_index(config, id);
1099 rpages = bufb->array[sb_bindex];
1100 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1101 && subbuffer_id_is_noref(config, id));
1102 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1103}
1104EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address);
1105
1106/**
1107 * lib_ring_buffer_offset_address - get address of a location within the buffer
1108 * @bufb : buffer backend
1109 * @offset : offset within the buffer.
1110 *
1111 * Return the address where a given offset is located.
1112 * Should be used to get the current subbuffer header pointer. Given we know
1113 * it's always at the beginning of a page, it's safe to write directly to this
1114 * address, as long as the write is never bigger than a page size.
1115 */
1116void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend *bufb,
1117 size_t offset)
1118{
1119 size_t sbidx, index;
1120 struct lib_ring_buffer_backend_pages *rpages;
1121 struct channel_backend *chanb = &bufb->chan->backend;
5a8fd222 1122 const struct lib_ring_buffer_config *config = &chanb->config;
f3bc08c5
MD
1123 unsigned long sb_bindex, id;
1124
1125 offset &= chanb->buf_size - 1;
1126 sbidx = offset >> chanb->subbuf_size_order;
1127 index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
1128 id = bufb->buf_wsb[sbidx].id;
1129 sb_bindex = subbuffer_id_get_index(config, id);
1130 rpages = bufb->array[sb_bindex];
1131 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
1132 && subbuffer_id_is_noref(config, id));
1133 return rpages->p[index].virt + (offset & ~PAGE_MASK);
1134}
1135EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address);
This page took 0.081261 seconds and 4 git commands to generate.