1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
3 * ring_buffer_backend.c
5 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 #include <linux/stddef.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/errno.h>
14 #include <linux/slab.h>
15 #include <linux/cpu.h>
17 #include <linux/vmalloc.h>
18 #include <linux/oom.h>
20 #include <wrapper/ringbuffer/config.h>
21 #include <wrapper/ringbuffer/backend.h>
22 #include <wrapper/ringbuffer/frontend.h>
25 * lib_ring_buffer_backend_allocate - allocate a channel buffer
26 * @config: ring buffer instance configuration
27 * @buf: the buffer struct
28 * @size: total size of the buffer
29 * @num_subbuf: number of subbuffers
30 * @extra_reader_sb: need extra subbuffer for reader
33 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
34 struct lib_ring_buffer_backend
*bufb
,
35 size_t size
, size_t num_subbuf
,
38 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
39 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
40 unsigned long subbuf_size
, mmap_offset
= 0;
41 unsigned long num_subbuf_alloc
;
45 num_pages
= size
>> PAGE_SHIFT
;
48 * Verify that there is enough free pages available on the system for
49 * the current allocation request.
50 * wrapper_check_enough_free_pages uses si_mem_available() if available
51 * and returns if there should be enough free pages based on the
54 if (num_pages
>= si_mem_available())
55 goto not_enough_pages
;
58 * Set the current user thread as the first target of the OOM killer.
59 * If the estimate received by si_mem_available() was off, and we do
60 * end up running out of memory because of this buffer allocation, we
61 * want to kill the offending app first.
63 set_current_oom_origin();
65 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
66 subbuf_size
= chanb
->subbuf_size
;
67 num_subbuf_alloc
= num_subbuf
;
69 if (extra_reader_sb
) {
70 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
74 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
75 1 << INTERNODE_CACHE_SHIFT
),
76 cpu_to_node(max(bufb
->cpu
, 0)));
80 bufb
->array
= kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
82 1 << INTERNODE_CACHE_SHIFT
),
83 GFP_KERNEL
| __GFP_NOWARN
,
84 cpu_to_node(max(bufb
->cpu
, 0)));
85 if (unlikely(!bufb
->array
))
87 for (i
= 0; i
< num_pages
; i
++) {
88 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
89 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
90 if (unlikely(!pages
[i
]))
93 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
95 /* Allocate backend pages array elements */
96 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
99 sizeof(struct lib_ring_buffer_backend_pages
) +
100 sizeof(struct lib_ring_buffer_backend_page
)
101 * num_pages_per_subbuf
,
102 1 << INTERNODE_CACHE_SHIFT
),
103 GFP_KERNEL
| __GFP_NOWARN
,
104 cpu_to_node(max(bufb
->cpu
, 0)));
109 /* Allocate write-side subbuffer table */
110 bufb
->buf_wsb
= kvzalloc_node(ALIGN(
111 sizeof(struct lib_ring_buffer_backend_subbuffer
)
113 1 << INTERNODE_CACHE_SHIFT
),
114 GFP_KERNEL
| __GFP_NOWARN
,
115 cpu_to_node(max(bufb
->cpu
, 0)));
116 if (unlikely(!bufb
->buf_wsb
))
119 for (i
= 0; i
< num_subbuf
; i
++)
120 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
122 /* Assign read-side subbuffer table */
124 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
125 num_subbuf_alloc
- 1);
127 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
129 /* Allocate subbuffer packet counter table */
130 bufb
->buf_cnt
= kvzalloc_node(ALIGN(
131 sizeof(struct lib_ring_buffer_backend_counts
)
133 1 << INTERNODE_CACHE_SHIFT
),
134 GFP_KERNEL
| __GFP_NOWARN
,
135 cpu_to_node(max(bufb
->cpu
, 0)));
136 if (unlikely(!bufb
->buf_cnt
))
139 /* Assign pages to page index */
140 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
141 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
142 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
143 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
144 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
147 if (config
->output
== RING_BUFFER_MMAP
) {
148 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
149 mmap_offset
+= subbuf_size
;
153 clear_current_oom_origin();
158 kvfree(bufb
->buf_wsb
);
160 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
161 kvfree(bufb
->array
[i
]);
163 /* Free all allocated pages */
164 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
165 __free_page(pages
[i
]);
170 clear_current_oom_origin();
175 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
176 struct channel_backend
*chanb
, int cpu
)
178 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
180 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
183 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
185 chanb
->extra_reader_sb
);
188 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
190 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
191 unsigned long i
, j
, num_subbuf_alloc
;
193 num_subbuf_alloc
= chanb
->num_subbuf
;
194 if (chanb
->extra_reader_sb
)
197 kvfree(bufb
->buf_wsb
);
198 kvfree(bufb
->buf_cnt
);
199 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
200 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
201 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
202 kvfree(bufb
->array
[i
]);
208 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
210 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
211 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
212 unsigned long num_subbuf_alloc
;
215 num_subbuf_alloc
= chanb
->num_subbuf
;
216 if (chanb
->extra_reader_sb
)
219 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
220 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
221 if (chanb
->extra_reader_sb
)
222 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
223 num_subbuf_alloc
- 1);
225 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
227 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
228 /* Don't reset mmap_offset */
229 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
230 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
231 bufb
->array
[i
]->data_size
= 0;
232 /* Don't reset backend page and virt addresses */
234 /* Don't reset num_pages_per_subbuf, cpu, allocated */
235 v_set(config
, &bufb
->records_read
, 0);
239 * The frontend is responsible for also calling ring_buffer_backend_reset for
240 * each buffer when calling channel_backend_reset.
242 void channel_backend_reset(struct channel_backend
*chanb
)
244 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
245 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
248 * Don't reset buf_size, subbuf_size, subbuf_size_order,
249 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
250 * priv, notifiers, config, cpumask and name.
252 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
255 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
258 * No need to implement a "dead" callback to do a buffer switch here,
259 * because it will happen when tracing is stopped, or will be done by
260 * switch timer CPU DEAD callback.
261 * We don't free buffers when CPU go away, because it would make trace
262 * data vanish, which is unwanted.
264 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
265 struct lttng_cpuhp_node
*node
)
267 struct channel_backend
*chanb
= container_of(node
,
268 struct channel_backend
, cpuhp_prepare
);
269 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
270 struct lib_ring_buffer
*buf
;
273 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
275 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
276 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
279 "ring_buffer_cpu_hp_callback: cpu %d "
280 "buffer creation failed\n", cpu
);
285 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
287 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
289 #ifdef CONFIG_HOTPLUG_CPU
292 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
293 * @nb: notifier block
294 * @action: hotplug action to take
297 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
300 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
301 unsigned long action
,
304 unsigned int cpu
= (unsigned long)hcpu
;
305 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
307 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
308 struct lib_ring_buffer
*buf
;
311 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
315 case CPU_UP_PREPARE_FROZEN
:
316 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
317 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
320 "ring_buffer_cpu_hp_callback: cpu %d "
321 "buffer creation failed\n", cpu
);
326 case CPU_DEAD_FROZEN
:
327 /* No need to do a buffer switch here, because it will happen
328 * when tracing is stopped, or will be done by switch timer CPU
337 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
340 * channel_backend_init - initialize a channel backend
341 * @chanb: channel backend
342 * @name: channel name
343 * @config: client ring buffer configuration
344 * @priv: client private data
345 * @parent: dentry of parent directory, %NULL for root directory
346 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
347 * @num_subbuf: number of sub-buffers (power of 2)
349 * Returns channel pointer if successful, %NULL otherwise.
351 * Creates per-cpu channel buffers using the sizes and attributes
352 * specified. The created channel buffer files will be named
353 * name_0...name_N-1. File permissions will be %S_IRUSR.
355 * Called with CPU hotplug disabled.
357 int channel_backend_init(struct channel_backend
*chanb
,
359 const struct lib_ring_buffer_config
*config
,
360 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
362 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
369 /* Check that the subbuffer size is larger than a page. */
370 if (subbuf_size
< PAGE_SIZE
)
374 * Make sure the number of subbuffers and subbuffer size are
375 * power of 2 and nonzero.
377 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
379 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
382 * Overwrite mode buffers require at least 2 subbuffers per
385 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
388 ret
= subbuffer_id_check_index(config
, num_subbuf
);
393 chanb
->buf_size
= num_subbuf
* subbuf_size
;
394 chanb
->subbuf_size
= subbuf_size
;
395 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
396 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
397 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
398 chanb
->extra_reader_sb
=
399 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
400 chanb
->num_subbuf
= num_subbuf
;
401 strlcpy(chanb
->name
, name
, NAME_MAX
);
402 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
404 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
405 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
409 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
410 /* Allocating the buffer per-cpu structures */
411 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
415 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
416 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
417 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
418 &chanb
->cpuhp_prepare
.node
);
421 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
425 * In case of non-hotplug cpu, if the ring-buffer is allocated
426 * in early initcall, it will not be notified of secondary cpus.
427 * In that off case, we need to allocate for all possible cpus.
429 #ifdef CONFIG_HOTPLUG_CPU
431 * buf->backend.allocated test takes care of concurrent CPU
433 * Priority higher than frontend, so we create the ring buffer
434 * before we start the timer.
436 chanb
->cpu_hp_notifier
.notifier_call
=
437 lib_ring_buffer_cpu_hp_callback
;
438 chanb
->cpu_hp_notifier
.priority
= 5;
439 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
442 for_each_online_cpu(i
) {
443 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
446 goto free_bufs
; /* cpu hotplug locked */
450 for_each_possible_cpu(i
) {
451 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
458 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
460 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
463 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
467 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
472 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
473 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
475 * Teardown of lttng_rb_hp_prepare instance
476 * on "add" error is handled within cpu hotplug,
477 * no teardown to do from the caller.
479 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
480 #ifdef CONFIG_HOTPLUG_CPU
482 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
484 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
485 for_each_possible_cpu(i
) {
486 struct lib_ring_buffer
*buf
=
487 per_cpu_ptr(chanb
->buf
, i
);
489 if (!buf
->backend
.allocated
)
491 lib_ring_buffer_free(buf
);
493 free_percpu(chanb
->buf
);
497 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
498 free_cpumask_var(chanb
->cpumask
);
503 * channel_backend_unregister_notifiers - unregister notifiers
508 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
510 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
512 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
516 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
517 &chanb
->cpuhp_prepare
.node
);
519 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
520 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
521 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
526 * channel_backend_free - destroy the channel
529 * Destroy all channel buffers and frees the channel.
531 void channel_backend_free(struct channel_backend
*chanb
)
533 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
536 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
537 for_each_possible_cpu(i
) {
538 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
540 if (!buf
->backend
.allocated
)
542 lib_ring_buffer_free(buf
);
544 free_cpumask_var(chanb
->cpumask
);
545 free_percpu(chanb
->buf
);
547 struct lib_ring_buffer
*buf
= chanb
->buf
;
549 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
550 lib_ring_buffer_free(buf
);
556 * lib_ring_buffer_write - write data to a ring_buffer buffer.
557 * @bufb : buffer backend
558 * @offset : offset within the buffer
559 * @src : source address
560 * @len : length to write
561 * @pagecpy : page size copied so far
563 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
564 const void *src
, size_t len
, size_t pagecpy
)
566 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
567 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
569 struct lib_ring_buffer_backend_pages
*rpages
;
570 unsigned long sb_bindex
, id
;
576 sbidx
= offset
>> chanb
->subbuf_size_order
;
577 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
580 * Underlying layer should never ask for writes across
583 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
585 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
586 id
= bufb
->buf_wsb
[sbidx
].id
;
587 sb_bindex
= subbuffer_id_get_index(config
, id
);
588 rpages
= bufb
->array
[sb_bindex
];
589 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
590 && subbuffer_id_is_noref(config
, id
));
591 lib_ring_buffer_do_copy(config
,
592 rpages
->p
[index
].virt
593 + (offset
& ~PAGE_MASK
),
595 } while (unlikely(len
!= pagecpy
));
597 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
601 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
602 * @bufb : buffer backend
603 * @offset : offset within the buffer
604 * @c : the byte to write
605 * @len : length to write
606 * @pagecpy : page size copied so far
608 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
610 int c
, size_t len
, size_t pagecpy
)
612 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
613 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
615 struct lib_ring_buffer_backend_pages
*rpages
;
616 unsigned long sb_bindex
, id
;
621 sbidx
= offset
>> chanb
->subbuf_size_order
;
622 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
625 * Underlying layer should never ask for writes across
628 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
630 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
631 id
= bufb
->buf_wsb
[sbidx
].id
;
632 sb_bindex
= subbuffer_id_get_index(config
, id
);
633 rpages
= bufb
->array
[sb_bindex
];
634 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
635 && subbuffer_id_is_noref(config
, id
));
636 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
637 + (offset
& ~PAGE_MASK
),
639 } while (unlikely(len
!= pagecpy
));
641 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
644 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
645 * @bufb : buffer backend
646 * @offset : offset within the buffer
647 * @src : source address
648 * @len : length to write
649 * @pagecpy : page size copied so far
650 * @pad : character to use for padding
652 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
653 size_t offset
, const char *src
, size_t len
,
654 size_t pagecpy
, int pad
)
656 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
657 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
659 struct lib_ring_buffer_backend_pages
*rpages
;
660 unsigned long sb_bindex
, id
;
661 int src_terminated
= 0;
663 CHAN_WARN_ON(chanb
, !len
);
669 sbidx
= offset
>> chanb
->subbuf_size_order
;
670 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
673 * Underlying layer should never ask for writes across
676 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
678 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
679 id
= bufb
->buf_wsb
[sbidx
].id
;
680 sb_bindex
= subbuffer_id_get_index(config
, id
);
681 rpages
= bufb
->array
[sb_bindex
];
682 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
683 && subbuffer_id_is_noref(config
, id
));
685 if (likely(!src_terminated
)) {
686 size_t count
, to_copy
;
690 to_copy
--; /* Final '\0' */
691 count
= lib_ring_buffer_do_strcpy(config
,
692 rpages
->p
[index
].virt
693 + (offset
& ~PAGE_MASK
),
697 if (unlikely(count
< to_copy
)) {
698 size_t pad_len
= to_copy
- count
;
700 /* Next pages will have padding */
702 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
703 + (offset
& ~PAGE_MASK
),
712 pad_len
--; /* Final '\0' */
713 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
714 + (offset
& ~PAGE_MASK
),
718 } while (unlikely(len
!= pagecpy
));
720 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
723 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
726 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
727 * @bufb : buffer backend
728 * @offset : offset within the buffer
729 * @src : source address
730 * @len : length to write
731 * @pagecpy : page size copied so far
733 * This function deals with userspace pointers, it should never be called
734 * directly without having the src pointer checked with access_ok()
737 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
739 const void __user
*src
, size_t len
,
742 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
743 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
745 struct lib_ring_buffer_backend_pages
*rpages
;
746 unsigned long sb_bindex
, id
;
753 sbidx
= offset
>> chanb
->subbuf_size_order
;
754 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
757 * Underlying layer should never ask for writes across
760 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
762 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
763 id
= bufb
->buf_wsb
[sbidx
].id
;
764 sb_bindex
= subbuffer_id_get_index(config
, id
);
765 rpages
= bufb
->array
[sb_bindex
];
766 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
767 && subbuffer_id_is_noref(config
, id
));
768 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
769 + (offset
& ~PAGE_MASK
),
773 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
774 break; /* stop copy */
776 } while (unlikely(len
!= pagecpy
));
778 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
781 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
782 * @bufb : buffer backend
783 * @offset : offset within the buffer
784 * @src : source address
785 * @len : length to write
786 * @pagecpy : page size copied so far
787 * @pad : character to use for padding
789 * This function deals with userspace pointers, it should never be called
790 * directly without having the src pointer checked with access_ok()
793 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
794 size_t offset
, const char __user
*src
, size_t len
,
795 size_t pagecpy
, int pad
)
797 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
798 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
800 struct lib_ring_buffer_backend_pages
*rpages
;
801 unsigned long sb_bindex
, id
;
802 int src_terminated
= 0;
809 sbidx
= offset
>> chanb
->subbuf_size_order
;
810 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
813 * Underlying layer should never ask for writes across
816 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
818 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
819 id
= bufb
->buf_wsb
[sbidx
].id
;
820 sb_bindex
= subbuffer_id_get_index(config
, id
);
821 rpages
= bufb
->array
[sb_bindex
];
822 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
823 && subbuffer_id_is_noref(config
, id
));
825 if (likely(!src_terminated
)) {
826 size_t count
, to_copy
;
830 to_copy
--; /* Final '\0' */
831 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
832 rpages
->p
[index
].virt
833 + (offset
& ~PAGE_MASK
),
837 if (unlikely(count
< to_copy
)) {
838 size_t pad_len
= to_copy
- count
;
840 /* Next pages will have padding */
842 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
843 + (offset
& ~PAGE_MASK
),
852 pad_len
--; /* Final '\0' */
853 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
854 + (offset
& ~PAGE_MASK
),
858 } while (unlikely(len
!= pagecpy
));
860 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
863 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
866 * lib_ring_buffer_read - read data from ring_buffer_buffer.
867 * @bufb : buffer backend
868 * @offset : offset within the buffer
869 * @dest : destination address
870 * @len : length to copy to destination
872 * Should be protected by get_subbuf/put_subbuf.
873 * Returns the length copied.
875 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
876 void *dest
, size_t len
)
878 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
879 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
880 size_t index
, pagecpy
, orig_len
;
881 struct lib_ring_buffer_backend_pages
*rpages
;
882 unsigned long sb_bindex
, id
;
885 offset
&= chanb
->buf_size
- 1;
886 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
890 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
891 id
= bufb
->buf_rsb
.id
;
892 sb_bindex
= subbuffer_id_get_index(config
, id
);
893 rpages
= bufb
->array
[sb_bindex
];
894 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
895 && subbuffer_id_is_noref(config
, id
));
896 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
903 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
905 * Underlying layer should never ask for reads across
908 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
912 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
915 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
916 * @bufb : buffer backend
917 * @offset : offset within the buffer
918 * @dest : destination userspace address
919 * @len : length to copy to destination
921 * Should be protected by get_subbuf/put_subbuf.
922 * access_ok() must have been performed on dest addresses prior to call this
924 * Returns -EFAULT on error, 0 if ok.
926 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
927 size_t offset
, void __user
*dest
, size_t len
)
929 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
930 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
933 struct lib_ring_buffer_backend_pages
*rpages
;
934 unsigned long sb_bindex
, id
;
936 offset
&= chanb
->buf_size
- 1;
937 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
941 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
942 id
= bufb
->buf_rsb
.id
;
943 sb_bindex
= subbuffer_id_get_index(config
, id
);
944 rpages
= bufb
->array
[sb_bindex
];
945 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
946 && subbuffer_id_is_noref(config
, id
));
947 if (__copy_to_user(dest
,
948 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
956 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
958 * Underlying layer should never ask for reads across
961 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
965 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
968 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
969 * @bufb : buffer backend
970 * @offset : offset within the buffer
971 * @dest : destination address
972 * @len : destination's length
974 * Return string's length, or -EINVAL on error.
975 * Should be protected by get_subbuf/put_subbuf.
976 * Destination length should be at least 1 to hold '\0'.
978 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
979 void *dest
, size_t len
)
981 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
982 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
984 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
986 struct lib_ring_buffer_backend_pages
*rpages
;
987 unsigned long sb_bindex
, id
;
989 offset
&= chanb
->buf_size
- 1;
990 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
991 orig_offset
= offset
;
995 id
= bufb
->buf_rsb
.id
;
996 sb_bindex
= subbuffer_id_get_index(config
, id
);
997 rpages
= bufb
->array
[sb_bindex
];
998 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
999 && subbuffer_id_is_noref(config
, id
));
1000 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1001 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
1002 strpagelen
= strnlen(str
, pagelen
);
1004 pagecpy
= min_t(size_t, len
, strpagelen
);
1006 memcpy(dest
, str
, pagecpy
);
1011 offset
+= strpagelen
;
1012 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1013 if (strpagelen
< pagelen
)
1016 * Underlying layer should never ask for reads across
1019 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1022 ((char *)dest
)[0] = 0;
1023 return offset
- orig_offset
;
1025 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1028 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1029 * @bufb : buffer backend
1030 * @offset : offset within the buffer
1031 * @virt : pointer to page address (output)
1033 * Should be protected by get_subbuf/put_subbuf.
1034 * Returns the pointer to the page frame number unsigned long.
1036 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1037 size_t offset
, void ***virt
)
1040 struct lib_ring_buffer_backend_pages
*rpages
;
1041 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1042 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1043 unsigned long sb_bindex
, id
;
1045 offset
&= chanb
->buf_size
- 1;
1046 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1047 id
= bufb
->buf_rsb
.id
;
1048 sb_bindex
= subbuffer_id_get_index(config
, id
);
1049 rpages
= bufb
->array
[sb_bindex
];
1050 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1051 && subbuffer_id_is_noref(config
, id
));
1052 *virt
= &rpages
->p
[index
].virt
;
1053 return &rpages
->p
[index
].pfn
;
1055 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1058 * lib_ring_buffer_read_offset_address - get address of a buffer location
1059 * @bufb : buffer backend
1060 * @offset : offset within the buffer.
1062 * Return the address where a given offset is located (for read).
1063 * Should be used to get the current subbuffer header pointer. Given we know
1064 * it's never on a page boundary, it's safe to read/write directly
1065 * from/to this address, as long as the read/write is never bigger than a
1068 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1072 struct lib_ring_buffer_backend_pages
*rpages
;
1073 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1074 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1075 unsigned long sb_bindex
, id
;
1077 offset
&= chanb
->buf_size
- 1;
1078 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1079 id
= bufb
->buf_rsb
.id
;
1080 sb_bindex
= subbuffer_id_get_index(config
, id
);
1081 rpages
= bufb
->array
[sb_bindex
];
1082 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1083 && subbuffer_id_is_noref(config
, id
));
1084 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1086 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1089 * lib_ring_buffer_offset_address - get address of a location within the buffer
1090 * @bufb : buffer backend
1091 * @offset : offset within the buffer.
1093 * Return the address where a given offset is located.
1094 * Should be used to get the current subbuffer header pointer. Given we know
1095 * it's always at the beginning of a page, it's safe to write directly to this
1096 * address, as long as the write is never bigger than a page size.
1098 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1101 size_t sbidx
, index
;
1102 struct lib_ring_buffer_backend_pages
*rpages
;
1103 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1104 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1105 unsigned long sb_bindex
, id
;
1107 offset
&= chanb
->buf_size
- 1;
1108 sbidx
= offset
>> chanb
->subbuf_size_order
;
1109 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1110 id
= bufb
->buf_wsb
[sbidx
].id
;
1111 sb_bindex
= subbuffer_id_get_index(config
, id
);
1112 rpages
= bufb
->array
[sb_bindex
];
1113 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1114 && subbuffer_id_is_noref(config
, id
));
1115 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1117 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);