1 /* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
3 * ring_buffer_backend.c
5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/stddef.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/oom.h>
16 #include <linux/cpu.h>
18 #include <linux/vmalloc.h>
20 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
21 #include <wrapper/ringbuffer/config.h>
22 #include <wrapper/ringbuffer/backend.h>
23 #include <wrapper/ringbuffer/frontend.h>
26 * lib_ring_buffer_backend_allocate - allocate a channel buffer
27 * @config: ring buffer instance configuration
28 * @buf: the buffer struct
29 * @size: total size of the buffer
30 * @num_subbuf: number of subbuffers
31 * @extra_reader_sb: need extra subbuffer for reader
34 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
35 struct lib_ring_buffer_backend
*bufb
,
36 size_t size
, size_t num_subbuf
,
39 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
40 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
41 unsigned long subbuf_size
, mmap_offset
= 0;
42 unsigned long num_subbuf_alloc
;
46 num_pages
= size
>> PAGE_SHIFT
;
49 * Verify that the number of pages requested for that buffer is smaller
50 * than the number of available pages on the system. si_mem_available()
51 * returns an _estimate_ of the number of available pages.
53 if (num_pages
> si_mem_available())
54 goto not_enough_pages
;
57 * Set the current user thread as the first target of the OOM killer.
58 * If the estimate received by si_mem_available() was off, and we do
59 * end up running out of memory because of this buffer allocation, we
60 * want to kill the offending app first.
62 set_current_oom_origin();
64 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
65 subbuf_size
= chanb
->subbuf_size
;
66 num_subbuf_alloc
= num_subbuf
;
68 if (extra_reader_sb
) {
69 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
73 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
74 1 << INTERNODE_CACHE_SHIFT
),
75 cpu_to_node(max(bufb
->cpu
, 0)));
79 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
81 1 << INTERNODE_CACHE_SHIFT
),
82 GFP_KERNEL
| __GFP_NOWARN
,
83 cpu_to_node(max(bufb
->cpu
, 0)));
84 if (unlikely(!bufb
->array
))
87 for (i
= 0; i
< num_pages
; i
++) {
88 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
89 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
90 if (unlikely(!pages
[i
]))
93 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
95 /* Allocate backend pages array elements */
96 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
98 lttng_kvzalloc_node(ALIGN(
99 sizeof(struct lib_ring_buffer_backend_pages
) +
100 sizeof(struct lib_ring_buffer_backend_page
)
101 * num_pages_per_subbuf
,
102 1 << INTERNODE_CACHE_SHIFT
),
103 GFP_KERNEL
| __GFP_NOWARN
,
104 cpu_to_node(max(bufb
->cpu
, 0)));
109 /* Allocate write-side subbuffer table */
110 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
111 sizeof(struct lib_ring_buffer_backend_subbuffer
)
113 1 << INTERNODE_CACHE_SHIFT
),
114 GFP_KERNEL
| __GFP_NOWARN
,
115 cpu_to_node(max(bufb
->cpu
, 0)));
116 if (unlikely(!bufb
->buf_wsb
))
119 for (i
= 0; i
< num_subbuf
; i
++)
120 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
122 /* Assign read-side subbuffer table */
124 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
125 num_subbuf_alloc
- 1);
127 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
129 /* Allocate subbuffer packet counter table */
130 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
131 sizeof(struct lib_ring_buffer_backend_counts
)
133 1 << INTERNODE_CACHE_SHIFT
),
134 GFP_KERNEL
| __GFP_NOWARN
,
135 cpu_to_node(max(bufb
->cpu
, 0)));
136 if (unlikely(!bufb
->buf_cnt
))
139 /* Assign pages to page index */
140 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
141 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
142 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
143 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
144 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
147 if (config
->output
== RING_BUFFER_MMAP
) {
148 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
149 mmap_offset
+= subbuf_size
;
154 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
157 wrapper_vmalloc_sync_all();
158 clear_current_oom_origin();
163 lttng_kvfree(bufb
->buf_wsb
);
165 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
166 lttng_kvfree(bufb
->array
[i
]);
168 /* Free all allocated pages */
169 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
170 __free_page(pages
[i
]);
171 lttng_kvfree(bufb
->array
);
175 clear_current_oom_origin();
180 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
181 struct channel_backend
*chanb
, int cpu
)
183 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
185 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
188 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
190 chanb
->extra_reader_sb
);
193 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
195 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
196 unsigned long i
, j
, num_subbuf_alloc
;
198 num_subbuf_alloc
= chanb
->num_subbuf
;
199 if (chanb
->extra_reader_sb
)
202 lttng_kvfree(bufb
->buf_wsb
);
203 lttng_kvfree(bufb
->buf_cnt
);
204 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
205 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
206 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
207 lttng_kvfree(bufb
->array
[i
]);
209 lttng_kvfree(bufb
->array
);
213 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
215 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
216 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
217 unsigned long num_subbuf_alloc
;
220 num_subbuf_alloc
= chanb
->num_subbuf
;
221 if (chanb
->extra_reader_sb
)
224 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
225 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
226 if (chanb
->extra_reader_sb
)
227 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
228 num_subbuf_alloc
- 1);
230 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
232 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
233 /* Don't reset mmap_offset */
234 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
235 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
236 bufb
->array
[i
]->data_size
= 0;
237 /* Don't reset backend page and virt addresses */
239 /* Don't reset num_pages_per_subbuf, cpu, allocated */
240 v_set(config
, &bufb
->records_read
, 0);
244 * The frontend is responsible for also calling ring_buffer_backend_reset for
245 * each buffer when calling channel_backend_reset.
247 void channel_backend_reset(struct channel_backend
*chanb
)
249 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
250 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
253 * Don't reset buf_size, subbuf_size, subbuf_size_order,
254 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
255 * priv, notifiers, config, cpumask and name.
257 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
260 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
263 * No need to implement a "dead" callback to do a buffer switch here,
264 * because it will happen when tracing is stopped, or will be done by
265 * switch timer CPU DEAD callback.
266 * We don't free buffers when CPU go away, because it would make trace
267 * data vanish, which is unwanted.
269 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
270 struct lttng_cpuhp_node
*node
)
272 struct channel_backend
*chanb
= container_of(node
,
273 struct channel_backend
, cpuhp_prepare
);
274 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
275 struct lib_ring_buffer
*buf
;
278 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
280 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
281 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
284 "ring_buffer_cpu_hp_callback: cpu %d "
285 "buffer creation failed\n", cpu
);
290 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
292 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
294 #ifdef CONFIG_HOTPLUG_CPU
297 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
298 * @nb: notifier block
299 * @action: hotplug action to take
302 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
305 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
306 unsigned long action
,
309 unsigned int cpu
= (unsigned long)hcpu
;
310 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
312 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
313 struct lib_ring_buffer
*buf
;
316 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
320 case CPU_UP_PREPARE_FROZEN
:
321 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
322 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
325 "ring_buffer_cpu_hp_callback: cpu %d "
326 "buffer creation failed\n", cpu
);
331 case CPU_DEAD_FROZEN
:
332 /* No need to do a buffer switch here, because it will happen
333 * when tracing is stopped, or will be done by switch timer CPU
342 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
345 * channel_backend_init - initialize a channel backend
346 * @chanb: channel backend
347 * @name: channel name
348 * @config: client ring buffer configuration
349 * @priv: client private data
350 * @parent: dentry of parent directory, %NULL for root directory
351 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
352 * @num_subbuf: number of sub-buffers (power of 2)
354 * Returns channel pointer if successful, %NULL otherwise.
356 * Creates per-cpu channel buffers using the sizes and attributes
357 * specified. The created channel buffer files will be named
358 * name_0...name_N-1. File permissions will be %S_IRUSR.
360 * Called with CPU hotplug disabled.
362 int channel_backend_init(struct channel_backend
*chanb
,
364 const struct lib_ring_buffer_config
*config
,
365 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
367 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
374 /* Check that the subbuffer size is larger than a page. */
375 if (subbuf_size
< PAGE_SIZE
)
379 * Make sure the number of subbuffers and subbuffer size are
380 * power of 2 and nonzero.
382 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
384 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
387 * Overwrite mode buffers require at least 2 subbuffers per
390 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
393 ret
= subbuffer_id_check_index(config
, num_subbuf
);
398 chanb
->buf_size
= num_subbuf
* subbuf_size
;
399 chanb
->subbuf_size
= subbuf_size
;
400 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
401 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
402 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
403 chanb
->extra_reader_sb
=
404 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
405 chanb
->num_subbuf
= num_subbuf
;
406 strlcpy(chanb
->name
, name
, NAME_MAX
);
407 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
409 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
410 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
414 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
415 /* Allocating the buffer per-cpu structures */
416 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
420 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
421 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
422 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
423 &chanb
->cpuhp_prepare
.node
);
426 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
430 * In case of non-hotplug cpu, if the ring-buffer is allocated
431 * in early initcall, it will not be notified of secondary cpus.
432 * In that off case, we need to allocate for all possible cpus.
434 #ifdef CONFIG_HOTPLUG_CPU
436 * buf->backend.allocated test takes care of concurrent CPU
438 * Priority higher than frontend, so we create the ring buffer
439 * before we start the timer.
441 chanb
->cpu_hp_notifier
.notifier_call
=
442 lib_ring_buffer_cpu_hp_callback
;
443 chanb
->cpu_hp_notifier
.priority
= 5;
444 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
447 for_each_online_cpu(i
) {
448 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
451 goto free_bufs
; /* cpu hotplug locked */
455 for_each_possible_cpu(i
) {
456 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
463 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
465 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
468 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
472 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
477 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
478 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
480 * Teardown of lttng_rb_hp_prepare instance
481 * on "add" error is handled within cpu hotplug,
482 * no teardown to do from the caller.
484 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
485 #ifdef CONFIG_HOTPLUG_CPU
487 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
489 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
490 for_each_possible_cpu(i
) {
491 struct lib_ring_buffer
*buf
=
492 per_cpu_ptr(chanb
->buf
, i
);
494 if (!buf
->backend
.allocated
)
496 lib_ring_buffer_free(buf
);
498 free_percpu(chanb
->buf
);
502 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
503 free_cpumask_var(chanb
->cpumask
);
508 * channel_backend_unregister_notifiers - unregister notifiers
513 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
515 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
517 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
518 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
521 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
522 &chanb
->cpuhp_prepare
.node
);
524 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
525 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
526 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
531 * channel_backend_free - destroy the channel
534 * Destroy all channel buffers and frees the channel.
536 void channel_backend_free(struct channel_backend
*chanb
)
538 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
541 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
542 for_each_possible_cpu(i
) {
543 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
545 if (!buf
->backend
.allocated
)
547 lib_ring_buffer_free(buf
);
549 free_cpumask_var(chanb
->cpumask
);
550 free_percpu(chanb
->buf
);
552 struct lib_ring_buffer
*buf
= chanb
->buf
;
554 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
555 lib_ring_buffer_free(buf
);
561 * lib_ring_buffer_write - write data to a ring_buffer buffer.
562 * @bufb : buffer backend
563 * @offset : offset within the buffer
564 * @src : source address
565 * @len : length to write
566 * @pagecpy : page size copied so far
568 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
569 const void *src
, size_t len
, size_t pagecpy
)
571 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
572 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
574 struct lib_ring_buffer_backend_pages
*rpages
;
575 unsigned long sb_bindex
, id
;
581 sbidx
= offset
>> chanb
->subbuf_size_order
;
582 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
585 * Underlying layer should never ask for writes across
588 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
590 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
591 id
= bufb
->buf_wsb
[sbidx
].id
;
592 sb_bindex
= subbuffer_id_get_index(config
, id
);
593 rpages
= bufb
->array
[sb_bindex
];
594 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
595 && subbuffer_id_is_noref(config
, id
));
596 lib_ring_buffer_do_copy(config
,
597 rpages
->p
[index
].virt
598 + (offset
& ~PAGE_MASK
),
600 } while (unlikely(len
!= pagecpy
));
602 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
606 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
607 * @bufb : buffer backend
608 * @offset : offset within the buffer
609 * @c : the byte to write
610 * @len : length to write
611 * @pagecpy : page size copied so far
613 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
615 int c
, size_t len
, size_t pagecpy
)
617 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
618 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
620 struct lib_ring_buffer_backend_pages
*rpages
;
621 unsigned long sb_bindex
, id
;
626 sbidx
= offset
>> chanb
->subbuf_size_order
;
627 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
630 * Underlying layer should never ask for writes across
633 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
635 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
636 id
= bufb
->buf_wsb
[sbidx
].id
;
637 sb_bindex
= subbuffer_id_get_index(config
, id
);
638 rpages
= bufb
->array
[sb_bindex
];
639 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
640 && subbuffer_id_is_noref(config
, id
));
641 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
642 + (offset
& ~PAGE_MASK
),
644 } while (unlikely(len
!= pagecpy
));
646 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
649 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
650 * @bufb : buffer backend
651 * @offset : offset within the buffer
652 * @src : source address
653 * @len : length to write
654 * @pagecpy : page size copied so far
655 * @pad : character to use for padding
657 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
658 size_t offset
, const char *src
, size_t len
,
659 size_t pagecpy
, int pad
)
661 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
662 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
664 struct lib_ring_buffer_backend_pages
*rpages
;
665 unsigned long sb_bindex
, id
;
666 int src_terminated
= 0;
668 CHAN_WARN_ON(chanb
, !len
);
674 sbidx
= offset
>> chanb
->subbuf_size_order
;
675 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
678 * Underlying layer should never ask for writes across
681 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
683 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
684 id
= bufb
->buf_wsb
[sbidx
].id
;
685 sb_bindex
= subbuffer_id_get_index(config
, id
);
686 rpages
= bufb
->array
[sb_bindex
];
687 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
688 && subbuffer_id_is_noref(config
, id
));
690 if (likely(!src_terminated
)) {
691 size_t count
, to_copy
;
695 to_copy
--; /* Final '\0' */
696 count
= lib_ring_buffer_do_strcpy(config
,
697 rpages
->p
[index
].virt
698 + (offset
& ~PAGE_MASK
),
702 if (unlikely(count
< to_copy
)) {
703 size_t pad_len
= to_copy
- count
;
705 /* Next pages will have padding */
707 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
708 + (offset
& ~PAGE_MASK
),
717 pad_len
--; /* Final '\0' */
718 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
719 + (offset
& ~PAGE_MASK
),
723 } while (unlikely(len
!= pagecpy
));
725 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
728 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
731 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
732 * @bufb : buffer backend
733 * @offset : offset within the buffer
734 * @src : source address
735 * @len : length to write
736 * @pagecpy : page size copied so far
738 * This function deals with userspace pointers, it should never be called
739 * directly without having the src pointer checked with access_ok()
742 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
744 const void __user
*src
, size_t len
,
747 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
748 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
750 struct lib_ring_buffer_backend_pages
*rpages
;
751 unsigned long sb_bindex
, id
;
758 sbidx
= offset
>> chanb
->subbuf_size_order
;
759 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
762 * Underlying layer should never ask for writes across
765 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
767 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
768 id
= bufb
->buf_wsb
[sbidx
].id
;
769 sb_bindex
= subbuffer_id_get_index(config
, id
);
770 rpages
= bufb
->array
[sb_bindex
];
771 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
772 && subbuffer_id_is_noref(config
, id
));
773 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
774 + (offset
& ~PAGE_MASK
),
778 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
779 break; /* stop copy */
781 } while (unlikely(len
!= pagecpy
));
783 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
786 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
787 * @bufb : buffer backend
788 * @offset : offset within the buffer
789 * @src : source address
790 * @len : length to write
791 * @pagecpy : page size copied so far
792 * @pad : character to use for padding
794 * This function deals with userspace pointers, it should never be called
795 * directly without having the src pointer checked with access_ok()
798 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
799 size_t offset
, const char __user
*src
, size_t len
,
800 size_t pagecpy
, int pad
)
802 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
803 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
805 struct lib_ring_buffer_backend_pages
*rpages
;
806 unsigned long sb_bindex
, id
;
807 int src_terminated
= 0;
814 sbidx
= offset
>> chanb
->subbuf_size_order
;
815 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
818 * Underlying layer should never ask for writes across
821 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
823 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
824 id
= bufb
->buf_wsb
[sbidx
].id
;
825 sb_bindex
= subbuffer_id_get_index(config
, id
);
826 rpages
= bufb
->array
[sb_bindex
];
827 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
828 && subbuffer_id_is_noref(config
, id
));
830 if (likely(!src_terminated
)) {
831 size_t count
, to_copy
;
835 to_copy
--; /* Final '\0' */
836 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
837 rpages
->p
[index
].virt
838 + (offset
& ~PAGE_MASK
),
842 if (unlikely(count
< to_copy
)) {
843 size_t pad_len
= to_copy
- count
;
845 /* Next pages will have padding */
847 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
848 + (offset
& ~PAGE_MASK
),
857 pad_len
--; /* Final '\0' */
858 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
859 + (offset
& ~PAGE_MASK
),
863 } while (unlikely(len
!= pagecpy
));
865 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
868 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
871 * lib_ring_buffer_read - read data from ring_buffer_buffer.
872 * @bufb : buffer backend
873 * @offset : offset within the buffer
874 * @dest : destination address
875 * @len : length to copy to destination
877 * Should be protected by get_subbuf/put_subbuf.
878 * Returns the length copied.
880 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
881 void *dest
, size_t len
)
883 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
884 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
885 size_t index
, pagecpy
, orig_len
;
886 struct lib_ring_buffer_backend_pages
*rpages
;
887 unsigned long sb_bindex
, id
;
890 offset
&= chanb
->buf_size
- 1;
891 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
895 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
896 id
= bufb
->buf_rsb
.id
;
897 sb_bindex
= subbuffer_id_get_index(config
, id
);
898 rpages
= bufb
->array
[sb_bindex
];
899 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
900 && subbuffer_id_is_noref(config
, id
));
901 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
908 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
910 * Underlying layer should never ask for reads across
913 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
917 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
920 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
921 * @bufb : buffer backend
922 * @offset : offset within the buffer
923 * @dest : destination userspace address
924 * @len : length to copy to destination
926 * Should be protected by get_subbuf/put_subbuf.
927 * access_ok() must have been performed on dest addresses prior to call this
929 * Returns -EFAULT on error, 0 if ok.
931 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
932 size_t offset
, void __user
*dest
, size_t len
)
934 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
935 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
938 struct lib_ring_buffer_backend_pages
*rpages
;
939 unsigned long sb_bindex
, id
;
941 offset
&= chanb
->buf_size
- 1;
942 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
946 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
947 id
= bufb
->buf_rsb
.id
;
948 sb_bindex
= subbuffer_id_get_index(config
, id
);
949 rpages
= bufb
->array
[sb_bindex
];
950 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
951 && subbuffer_id_is_noref(config
, id
));
952 if (__copy_to_user(dest
,
953 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
961 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
963 * Underlying layer should never ask for reads across
966 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
970 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
973 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
974 * @bufb : buffer backend
975 * @offset : offset within the buffer
976 * @dest : destination address
977 * @len : destination's length
979 * Return string's length, or -EINVAL on error.
980 * Should be protected by get_subbuf/put_subbuf.
981 * Destination length should be at least 1 to hold '\0'.
983 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
984 void *dest
, size_t len
)
986 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
987 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
989 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
991 struct lib_ring_buffer_backend_pages
*rpages
;
992 unsigned long sb_bindex
, id
;
994 offset
&= chanb
->buf_size
- 1;
995 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
996 orig_offset
= offset
;
1000 id
= bufb
->buf_rsb
.id
;
1001 sb_bindex
= subbuffer_id_get_index(config
, id
);
1002 rpages
= bufb
->array
[sb_bindex
];
1003 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1004 && subbuffer_id_is_noref(config
, id
));
1005 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1006 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
1007 strpagelen
= strnlen(str
, pagelen
);
1009 pagecpy
= min_t(size_t, len
, strpagelen
);
1011 memcpy(dest
, str
, pagecpy
);
1016 offset
+= strpagelen
;
1017 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1018 if (strpagelen
< pagelen
)
1021 * Underlying layer should never ask for reads across
1024 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1027 ((char *)dest
)[0] = 0;
1028 return offset
- orig_offset
;
1030 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1033 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1034 * @bufb : buffer backend
1035 * @offset : offset within the buffer
1036 * @virt : pointer to page address (output)
1038 * Should be protected by get_subbuf/put_subbuf.
1039 * Returns the pointer to the page frame number unsigned long.
1041 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1042 size_t offset
, void ***virt
)
1045 struct lib_ring_buffer_backend_pages
*rpages
;
1046 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1047 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1048 unsigned long sb_bindex
, id
;
1050 offset
&= chanb
->buf_size
- 1;
1051 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1052 id
= bufb
->buf_rsb
.id
;
1053 sb_bindex
= subbuffer_id_get_index(config
, id
);
1054 rpages
= bufb
->array
[sb_bindex
];
1055 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1056 && subbuffer_id_is_noref(config
, id
));
1057 *virt
= &rpages
->p
[index
].virt
;
1058 return &rpages
->p
[index
].pfn
;
1060 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1063 * lib_ring_buffer_read_offset_address - get address of a buffer location
1064 * @bufb : buffer backend
1065 * @offset : offset within the buffer.
1067 * Return the address where a given offset is located (for read).
1068 * Should be used to get the current subbuffer header pointer. Given we know
1069 * it's never on a page boundary, it's safe to read/write directly
1070 * from/to this address, as long as the read/write is never bigger than a
1073 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1077 struct lib_ring_buffer_backend_pages
*rpages
;
1078 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1079 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1080 unsigned long sb_bindex
, id
;
1082 offset
&= chanb
->buf_size
- 1;
1083 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1084 id
= bufb
->buf_rsb
.id
;
1085 sb_bindex
= subbuffer_id_get_index(config
, id
);
1086 rpages
= bufb
->array
[sb_bindex
];
1087 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1088 && subbuffer_id_is_noref(config
, id
));
1089 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1091 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1094 * lib_ring_buffer_offset_address - get address of a location within the buffer
1095 * @bufb : buffer backend
1096 * @offset : offset within the buffer.
1098 * Return the address where a given offset is located.
1099 * Should be used to get the current subbuffer header pointer. Given we know
1100 * it's always at the beginning of a page, it's safe to write directly to this
1101 * address, as long as the write is never bigger than a page size.
1103 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1106 size_t sbidx
, index
;
1107 struct lib_ring_buffer_backend_pages
*rpages
;
1108 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1109 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1110 unsigned long sb_bindex
, id
;
1112 offset
&= chanb
->buf_size
- 1;
1113 sbidx
= offset
>> chanb
->subbuf_size_order
;
1114 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1115 id
= bufb
->buf_wsb
[sbidx
].id
;
1116 sb_bindex
= subbuffer_id_get_index(config
, id
);
1117 rpages
= bufb
->array
[sb_bindex
];
1118 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1119 && subbuffer_id_is_noref(config
, id
));
1120 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1122 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);