2 * ring_buffer_backend.c
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 #include <linux/stddef.h>
22 #include <linux/module.h>
23 #include <linux/string.h>
24 #include <linux/bitops.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/slab.h>
28 #include <linux/cpu.h>
30 #include <linux/vmalloc.h>
32 #include <wrapper/mm.h>
33 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
34 #include <wrapper/ringbuffer/config.h>
35 #include <wrapper/ringbuffer/backend.h>
36 #include <wrapper/ringbuffer/frontend.h>
39 * lib_ring_buffer_backend_allocate - allocate a channel buffer
40 * @config: ring buffer instance configuration
41 * @buf: the buffer struct
42 * @size: total size of the buffer
43 * @num_subbuf: number of subbuffers
44 * @extra_reader_sb: need extra subbuffer for reader
47 int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config
*config
,
48 struct lib_ring_buffer_backend
*bufb
,
49 size_t size
, size_t num_subbuf
,
52 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
53 unsigned long j
, num_pages
, num_pages_per_subbuf
, page_idx
= 0;
54 unsigned long subbuf_size
, mmap_offset
= 0;
55 unsigned long num_subbuf_alloc
;
59 num_pages
= size
>> PAGE_SHIFT
;
62 * Verify that there is enough free pages available on the system for
63 * the current allocation request.
64 * wrapper_check_enough_free_pages uses si_mem_available() if available
65 * and returns if there should be enough free pages based on the
68 if (!wrapper_check_enough_free_pages(num_pages
))
69 goto not_enough_pages
;
72 * Set the current user thread as the first target of the OOM killer.
73 * If the estimate received by si_mem_available() was off, and we do
74 * end up running out of memory because of this buffer allocation, we
75 * want to kill the offending app first.
77 wrapper_set_current_oom_origin();
79 num_pages_per_subbuf
= num_pages
>> get_count_order(num_subbuf
);
80 subbuf_size
= chanb
->subbuf_size
;
81 num_subbuf_alloc
= num_subbuf
;
83 if (extra_reader_sb
) {
84 num_pages
+= num_pages_per_subbuf
; /* Add pages for reader */
88 pages
= vmalloc_node(ALIGN(sizeof(*pages
) * num_pages
,
89 1 << INTERNODE_CACHE_SHIFT
),
90 cpu_to_node(max(bufb
->cpu
, 0)));
94 bufb
->array
= lttng_kvmalloc_node(ALIGN(sizeof(*bufb
->array
)
96 1 << INTERNODE_CACHE_SHIFT
),
97 GFP_KERNEL
| __GFP_NOWARN
,
98 cpu_to_node(max(bufb
->cpu
, 0)));
99 if (unlikely(!bufb
->array
))
102 for (i
= 0; i
< num_pages
; i
++) {
103 pages
[i
] = alloc_pages_node(cpu_to_node(max(bufb
->cpu
, 0)),
104 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
, 0);
105 if (unlikely(!pages
[i
]))
108 bufb
->num_pages_per_subbuf
= num_pages_per_subbuf
;
110 /* Allocate backend pages array elements */
111 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
113 lttng_kvzalloc_node(ALIGN(
114 sizeof(struct lib_ring_buffer_backend_pages
) +
115 sizeof(struct lib_ring_buffer_backend_page
)
116 * num_pages_per_subbuf
,
117 1 << INTERNODE_CACHE_SHIFT
),
118 GFP_KERNEL
| __GFP_NOWARN
,
119 cpu_to_node(max(bufb
->cpu
, 0)));
124 /* Allocate write-side subbuffer table */
125 bufb
->buf_wsb
= lttng_kvzalloc_node(ALIGN(
126 sizeof(struct lib_ring_buffer_backend_subbuffer
)
128 1 << INTERNODE_CACHE_SHIFT
),
129 GFP_KERNEL
| __GFP_NOWARN
,
130 cpu_to_node(max(bufb
->cpu
, 0)));
131 if (unlikely(!bufb
->buf_wsb
))
134 for (i
= 0; i
< num_subbuf
; i
++)
135 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
137 /* Assign read-side subbuffer table */
139 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
140 num_subbuf_alloc
- 1);
142 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
144 /* Allocate subbuffer packet counter table */
145 bufb
->buf_cnt
= lttng_kvzalloc_node(ALIGN(
146 sizeof(struct lib_ring_buffer_backend_counts
)
148 1 << INTERNODE_CACHE_SHIFT
),
149 GFP_KERNEL
| __GFP_NOWARN
,
150 cpu_to_node(max(bufb
->cpu
, 0)));
151 if (unlikely(!bufb
->buf_cnt
))
154 /* Assign pages to page index */
155 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
156 for (j
= 0; j
< num_pages_per_subbuf
; j
++) {
157 CHAN_WARN_ON(chanb
, page_idx
> num_pages
);
158 bufb
->array
[i
]->p
[j
].virt
= page_address(pages
[page_idx
]);
159 bufb
->array
[i
]->p
[j
].pfn
= page_to_pfn(pages
[page_idx
]);
162 if (config
->output
== RING_BUFFER_MMAP
) {
163 bufb
->array
[i
]->mmap_offset
= mmap_offset
;
164 mmap_offset
+= subbuf_size
;
169 * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
172 wrapper_vmalloc_sync_all();
173 wrapper_clear_current_oom_origin();
178 lttng_kvfree(bufb
->buf_wsb
);
180 for (i
= 0; (i
< num_subbuf_alloc
&& bufb
->array
[i
]); i
++)
181 lttng_kvfree(bufb
->array
[i
]);
183 /* Free all allocated pages */
184 for (i
= 0; (i
< num_pages
&& pages
[i
]); i
++)
185 __free_page(pages
[i
]);
186 lttng_kvfree(bufb
->array
);
190 wrapper_clear_current_oom_origin();
195 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend
*bufb
,
196 struct channel_backend
*chanb
, int cpu
)
198 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
200 bufb
->chan
= container_of(chanb
, struct channel
, backend
);
203 return lib_ring_buffer_backend_allocate(config
, bufb
, chanb
->buf_size
,
205 chanb
->extra_reader_sb
);
208 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend
*bufb
)
210 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
211 unsigned long i
, j
, num_subbuf_alloc
;
213 num_subbuf_alloc
= chanb
->num_subbuf
;
214 if (chanb
->extra_reader_sb
)
217 lttng_kvfree(bufb
->buf_wsb
);
218 lttng_kvfree(bufb
->buf_cnt
);
219 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
220 for (j
= 0; j
< bufb
->num_pages_per_subbuf
; j
++)
221 __free_page(pfn_to_page(bufb
->array
[i
]->p
[j
].pfn
));
222 lttng_kvfree(bufb
->array
[i
]);
224 lttng_kvfree(bufb
->array
);
228 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend
*bufb
)
230 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
231 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
232 unsigned long num_subbuf_alloc
;
235 num_subbuf_alloc
= chanb
->num_subbuf
;
236 if (chanb
->extra_reader_sb
)
239 for (i
= 0; i
< chanb
->num_subbuf
; i
++)
240 bufb
->buf_wsb
[i
].id
= subbuffer_id(config
, 0, 1, i
);
241 if (chanb
->extra_reader_sb
)
242 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1,
243 num_subbuf_alloc
- 1);
245 bufb
->buf_rsb
.id
= subbuffer_id(config
, 0, 1, 0);
247 for (i
= 0; i
< num_subbuf_alloc
; i
++) {
248 /* Don't reset mmap_offset */
249 v_set(config
, &bufb
->array
[i
]->records_commit
, 0);
250 v_set(config
, &bufb
->array
[i
]->records_unread
, 0);
251 bufb
->array
[i
]->data_size
= 0;
252 /* Don't reset backend page and virt addresses */
254 /* Don't reset num_pages_per_subbuf, cpu, allocated */
255 v_set(config
, &bufb
->records_read
, 0);
259 * The frontend is responsible for also calling ring_buffer_backend_reset for
260 * each buffer when calling channel_backend_reset.
262 void channel_backend_reset(struct channel_backend
*chanb
)
264 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
265 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
268 * Don't reset buf_size, subbuf_size, subbuf_size_order,
269 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
270 * priv, notifiers, config, cpumask and name.
272 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
275 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
278 * No need to implement a "dead" callback to do a buffer switch here,
279 * because it will happen when tracing is stopped, or will be done by
280 * switch timer CPU DEAD callback.
281 * We don't free buffers when CPU go away, because it would make trace
282 * data vanish, which is unwanted.
284 int lttng_cpuhp_rb_backend_prepare(unsigned int cpu
,
285 struct lttng_cpuhp_node
*node
)
287 struct channel_backend
*chanb
= container_of(node
,
288 struct channel_backend
, cpuhp_prepare
);
289 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
290 struct lib_ring_buffer
*buf
;
293 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
295 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
296 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
299 "ring_buffer_cpu_hp_callback: cpu %d "
300 "buffer creation failed\n", cpu
);
305 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare
);
307 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
309 #ifdef CONFIG_HOTPLUG_CPU
312 * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
313 * @nb: notifier block
314 * @action: hotplug action to take
317 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
320 int lib_ring_buffer_cpu_hp_callback(struct notifier_block
*nb
,
321 unsigned long action
,
324 unsigned int cpu
= (unsigned long)hcpu
;
325 struct channel_backend
*chanb
= container_of(nb
, struct channel_backend
,
327 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
328 struct lib_ring_buffer
*buf
;
331 CHAN_WARN_ON(chanb
, config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
);
335 case CPU_UP_PREPARE_FROZEN
:
336 buf
= per_cpu_ptr(chanb
->buf
, cpu
);
337 ret
= lib_ring_buffer_create(buf
, chanb
, cpu
);
340 "ring_buffer_cpu_hp_callback: cpu %d "
341 "buffer creation failed\n", cpu
);
346 case CPU_DEAD_FROZEN
:
347 /* No need to do a buffer switch here, because it will happen
348 * when tracing is stopped, or will be done by switch timer CPU
357 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
360 * channel_backend_init - initialize a channel backend
361 * @chanb: channel backend
362 * @name: channel name
363 * @config: client ring buffer configuration
364 * @priv: client private data
365 * @parent: dentry of parent directory, %NULL for root directory
366 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
367 * @num_subbuf: number of sub-buffers (power of 2)
369 * Returns channel pointer if successful, %NULL otherwise.
371 * Creates per-cpu channel buffers using the sizes and attributes
372 * specified. The created channel buffer files will be named
373 * name_0...name_N-1. File permissions will be %S_IRUSR.
375 * Called with CPU hotplug disabled.
377 int channel_backend_init(struct channel_backend
*chanb
,
379 const struct lib_ring_buffer_config
*config
,
380 void *priv
, size_t subbuf_size
, size_t num_subbuf
)
382 struct channel
*chan
= container_of(chanb
, struct channel
, backend
);
389 /* Check that the subbuffer size is larger than a page. */
390 if (subbuf_size
< PAGE_SIZE
)
394 * Make sure the number of subbuffers and subbuffer size are
395 * power of 2 and nonzero.
397 if (!subbuf_size
|| (subbuf_size
& (subbuf_size
- 1)))
399 if (!num_subbuf
|| (num_subbuf
& (num_subbuf
- 1)))
402 * Overwrite mode buffers require at least 2 subbuffers per
405 if (config
->mode
== RING_BUFFER_OVERWRITE
&& num_subbuf
< 2)
408 ret
= subbuffer_id_check_index(config
, num_subbuf
);
413 chanb
->buf_size
= num_subbuf
* subbuf_size
;
414 chanb
->subbuf_size
= subbuf_size
;
415 chanb
->buf_size_order
= get_count_order(chanb
->buf_size
);
416 chanb
->subbuf_size_order
= get_count_order(subbuf_size
);
417 chanb
->num_subbuf_order
= get_count_order(num_subbuf
);
418 chanb
->extra_reader_sb
=
419 (config
->mode
== RING_BUFFER_OVERWRITE
) ? 1 : 0;
420 chanb
->num_subbuf
= num_subbuf
;
421 strlcpy(chanb
->name
, name
, NAME_MAX
);
422 memcpy(&chanb
->config
, config
, sizeof(chanb
->config
));
424 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
425 if (!zalloc_cpumask_var(&chanb
->cpumask
, GFP_KERNEL
))
429 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
430 /* Allocating the buffer per-cpu structures */
431 chanb
->buf
= alloc_percpu(struct lib_ring_buffer
);
435 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
436 chanb
->cpuhp_prepare
.component
= LTTNG_RING_BUFFER_BACKEND
;
437 ret
= cpuhp_state_add_instance(lttng_rb_hp_prepare
,
438 &chanb
->cpuhp_prepare
.node
);
441 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
445 * In case of non-hotplug cpu, if the ring-buffer is allocated
446 * in early initcall, it will not be notified of secondary cpus.
447 * In that off case, we need to allocate for all possible cpus.
449 #ifdef CONFIG_HOTPLUG_CPU
451 * buf->backend.allocated test takes care of concurrent CPU
453 * Priority higher than frontend, so we create the ring buffer
454 * before we start the timer.
456 chanb
->cpu_hp_notifier
.notifier_call
=
457 lib_ring_buffer_cpu_hp_callback
;
458 chanb
->cpu_hp_notifier
.priority
= 5;
459 register_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
462 for_each_online_cpu(i
) {
463 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
466 goto free_bufs
; /* cpu hotplug locked */
470 for_each_possible_cpu(i
) {
471 ret
= lib_ring_buffer_create(per_cpu_ptr(chanb
->buf
, i
),
478 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
480 chanb
->buf
= kzalloc(sizeof(struct lib_ring_buffer
), GFP_KERNEL
);
483 ret
= lib_ring_buffer_create(chanb
->buf
, chanb
, -1);
487 chanb
->start_tsc
= config
->cb
.ring_buffer_clock_read(chan
);
492 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
495 * Teardown of lttng_rb_hp_prepare instance
496 * on "add" error is handled within cpu hotplug,
497 * no teardown to do from the caller.
499 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
500 #ifdef CONFIG_HOTPLUG_CPU
502 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
504 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
505 for_each_possible_cpu(i
) {
506 struct lib_ring_buffer
*buf
=
507 per_cpu_ptr(chanb
->buf
, i
);
509 if (!buf
->backend
.allocated
)
511 lib_ring_buffer_free(buf
);
513 free_percpu(chanb
->buf
);
517 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
)
518 free_cpumask_var(chanb
->cpumask
);
523 * channel_backend_unregister_notifiers - unregister notifiers
528 void channel_backend_unregister_notifiers(struct channel_backend
*chanb
)
530 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
532 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
533 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
536 ret
= cpuhp_state_remove_instance(lttng_rb_hp_prepare
,
537 &chanb
->cpuhp_prepare
.node
);
539 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
540 unregister_hotcpu_notifier(&chanb
->cpu_hp_notifier
);
541 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
546 * channel_backend_free - destroy the channel
549 * Destroy all channel buffers and frees the channel.
551 void channel_backend_free(struct channel_backend
*chanb
)
553 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
556 if (config
->alloc
== RING_BUFFER_ALLOC_PER_CPU
) {
557 for_each_possible_cpu(i
) {
558 struct lib_ring_buffer
*buf
= per_cpu_ptr(chanb
->buf
, i
);
560 if (!buf
->backend
.allocated
)
562 lib_ring_buffer_free(buf
);
564 free_cpumask_var(chanb
->cpumask
);
565 free_percpu(chanb
->buf
);
567 struct lib_ring_buffer
*buf
= chanb
->buf
;
569 CHAN_WARN_ON(chanb
, !buf
->backend
.allocated
);
570 lib_ring_buffer_free(buf
);
576 * lib_ring_buffer_write - write data to a ring_buffer buffer.
577 * @bufb : buffer backend
578 * @offset : offset within the buffer
579 * @src : source address
580 * @len : length to write
581 * @pagecpy : page size copied so far
583 void _lib_ring_buffer_write(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
584 const void *src
, size_t len
, size_t pagecpy
)
586 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
587 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
589 struct lib_ring_buffer_backend_pages
*rpages
;
590 unsigned long sb_bindex
, id
;
596 sbidx
= offset
>> chanb
->subbuf_size_order
;
597 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
600 * Underlying layer should never ask for writes across
603 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
605 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
606 id
= bufb
->buf_wsb
[sbidx
].id
;
607 sb_bindex
= subbuffer_id_get_index(config
, id
);
608 rpages
= bufb
->array
[sb_bindex
];
609 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
610 && subbuffer_id_is_noref(config
, id
));
611 lib_ring_buffer_do_copy(config
,
612 rpages
->p
[index
].virt
613 + (offset
& ~PAGE_MASK
),
615 } while (unlikely(len
!= pagecpy
));
617 EXPORT_SYMBOL_GPL(_lib_ring_buffer_write
);
621 * lib_ring_buffer_memset - write len bytes of c to a ring_buffer buffer.
622 * @bufb : buffer backend
623 * @offset : offset within the buffer
624 * @c : the byte to write
625 * @len : length to write
626 * @pagecpy : page size copied so far
628 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend
*bufb
,
630 int c
, size_t len
, size_t pagecpy
)
632 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
633 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
635 struct lib_ring_buffer_backend_pages
*rpages
;
636 unsigned long sb_bindex
, id
;
641 sbidx
= offset
>> chanb
->subbuf_size_order
;
642 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
645 * Underlying layer should never ask for writes across
648 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
650 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
651 id
= bufb
->buf_wsb
[sbidx
].id
;
652 sb_bindex
= subbuffer_id_get_index(config
, id
);
653 rpages
= bufb
->array
[sb_bindex
];
654 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
655 && subbuffer_id_is_noref(config
, id
));
656 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
657 + (offset
& ~PAGE_MASK
),
659 } while (unlikely(len
!= pagecpy
));
661 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset
);
664 * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
665 * @bufb : buffer backend
666 * @offset : offset within the buffer
667 * @src : source address
668 * @len : length to write
669 * @pagecpy : page size copied so far
670 * @pad : character to use for padding
672 void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend
*bufb
,
673 size_t offset
, const char *src
, size_t len
,
674 size_t pagecpy
, int pad
)
676 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
677 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
679 struct lib_ring_buffer_backend_pages
*rpages
;
680 unsigned long sb_bindex
, id
;
681 int src_terminated
= 0;
683 CHAN_WARN_ON(chanb
, !len
);
689 sbidx
= offset
>> chanb
->subbuf_size_order
;
690 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
693 * Underlying layer should never ask for writes across
696 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
698 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
699 id
= bufb
->buf_wsb
[sbidx
].id
;
700 sb_bindex
= subbuffer_id_get_index(config
, id
);
701 rpages
= bufb
->array
[sb_bindex
];
702 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
703 && subbuffer_id_is_noref(config
, id
));
705 if (likely(!src_terminated
)) {
706 size_t count
, to_copy
;
710 to_copy
--; /* Final '\0' */
711 count
= lib_ring_buffer_do_strcpy(config
,
712 rpages
->p
[index
].virt
713 + (offset
& ~PAGE_MASK
),
717 if (unlikely(count
< to_copy
)) {
718 size_t pad_len
= to_copy
- count
;
720 /* Next pages will have padding */
722 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
723 + (offset
& ~PAGE_MASK
),
732 pad_len
--; /* Final '\0' */
733 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
734 + (offset
& ~PAGE_MASK
),
738 } while (unlikely(len
!= pagecpy
));
740 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
743 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy
);
746 * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
747 * @bufb : buffer backend
748 * @offset : offset within the buffer
749 * @src : source address
750 * @len : length to write
751 * @pagecpy : page size copied so far
753 * This function deals with userspace pointers, it should never be called
754 * directly without having the src pointer checked with access_ok()
757 void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
759 const void __user
*src
, size_t len
,
762 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
763 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
765 struct lib_ring_buffer_backend_pages
*rpages
;
766 unsigned long sb_bindex
, id
;
773 sbidx
= offset
>> chanb
->subbuf_size_order
;
774 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
777 * Underlying layer should never ask for writes across
780 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
782 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
783 id
= bufb
->buf_wsb
[sbidx
].id
;
784 sb_bindex
= subbuffer_id_get_index(config
, id
);
785 rpages
= bufb
->array
[sb_bindex
];
786 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
787 && subbuffer_id_is_noref(config
, id
));
788 ret
= lib_ring_buffer_do_copy_from_user_inatomic(rpages
->p
[index
].virt
789 + (offset
& ~PAGE_MASK
),
793 _lib_ring_buffer_memset(bufb
, offset
, 0, len
, 0);
794 break; /* stop copy */
796 } while (unlikely(len
!= pagecpy
));
798 EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic
);
801 * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
802 * @bufb : buffer backend
803 * @offset : offset within the buffer
804 * @src : source address
805 * @len : length to write
806 * @pagecpy : page size copied so far
807 * @pad : character to use for padding
809 * This function deals with userspace pointers, it should never be called
810 * directly without having the src pointer checked with access_ok()
813 void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend
*bufb
,
814 size_t offset
, const char __user
*src
, size_t len
,
815 size_t pagecpy
, int pad
)
817 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
818 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
820 struct lib_ring_buffer_backend_pages
*rpages
;
821 unsigned long sb_bindex
, id
;
822 int src_terminated
= 0;
829 sbidx
= offset
>> chanb
->subbuf_size_order
;
830 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
833 * Underlying layer should never ask for writes across
836 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
838 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
839 id
= bufb
->buf_wsb
[sbidx
].id
;
840 sb_bindex
= subbuffer_id_get_index(config
, id
);
841 rpages
= bufb
->array
[sb_bindex
];
842 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
843 && subbuffer_id_is_noref(config
, id
));
845 if (likely(!src_terminated
)) {
846 size_t count
, to_copy
;
850 to_copy
--; /* Final '\0' */
851 count
= lib_ring_buffer_do_strcpy_from_user_inatomic(config
,
852 rpages
->p
[index
].virt
853 + (offset
& ~PAGE_MASK
),
857 if (unlikely(count
< to_copy
)) {
858 size_t pad_len
= to_copy
- count
;
860 /* Next pages will have padding */
862 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
863 + (offset
& ~PAGE_MASK
),
872 pad_len
--; /* Final '\0' */
873 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
874 + (offset
& ~PAGE_MASK
),
878 } while (unlikely(len
!= pagecpy
));
880 lib_ring_buffer_do_memset(rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
883 EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic
);
886 * lib_ring_buffer_read - read data from ring_buffer_buffer.
887 * @bufb : buffer backend
888 * @offset : offset within the buffer
889 * @dest : destination address
890 * @len : length to copy to destination
892 * Should be protected by get_subbuf/put_subbuf.
893 * Returns the length copied.
895 size_t lib_ring_buffer_read(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
896 void *dest
, size_t len
)
898 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
899 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
900 size_t index
, pagecpy
, orig_len
;
901 struct lib_ring_buffer_backend_pages
*rpages
;
902 unsigned long sb_bindex
, id
;
905 offset
&= chanb
->buf_size
- 1;
906 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
910 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
911 id
= bufb
->buf_rsb
.id
;
912 sb_bindex
= subbuffer_id_get_index(config
, id
);
913 rpages
= bufb
->array
[sb_bindex
];
914 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
915 && subbuffer_id_is_noref(config
, id
));
916 memcpy(dest
, rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
923 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
925 * Underlying layer should never ask for reads across
928 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
932 EXPORT_SYMBOL_GPL(lib_ring_buffer_read
);
935 * __lib_ring_buffer_copy_to_user - read data from ring_buffer to userspace
936 * @bufb : buffer backend
937 * @offset : offset within the buffer
938 * @dest : destination userspace address
939 * @len : length to copy to destination
941 * Should be protected by get_subbuf/put_subbuf.
942 * access_ok() must have been performed on dest addresses prior to call this
944 * Returns -EFAULT on error, 0 if ok.
946 int __lib_ring_buffer_copy_to_user(struct lib_ring_buffer_backend
*bufb
,
947 size_t offset
, void __user
*dest
, size_t len
)
949 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
950 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
953 struct lib_ring_buffer_backend_pages
*rpages
;
954 unsigned long sb_bindex
, id
;
956 offset
&= chanb
->buf_size
- 1;
957 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
961 pagecpy
= min_t(size_t, len
, PAGE_SIZE
- (offset
& ~PAGE_MASK
));
962 id
= bufb
->buf_rsb
.id
;
963 sb_bindex
= subbuffer_id_get_index(config
, id
);
964 rpages
= bufb
->array
[sb_bindex
];
965 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
966 && subbuffer_id_is_noref(config
, id
));
967 if (__copy_to_user(dest
,
968 rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
),
976 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
978 * Underlying layer should never ask for reads across
981 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
985 EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user
);
988 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
989 * @bufb : buffer backend
990 * @offset : offset within the buffer
991 * @dest : destination address
992 * @len : destination's length
994 * Return string's length, or -EINVAL on error.
995 * Should be protected by get_subbuf/put_subbuf.
996 * Destination length should be at least 1 to hold '\0'.
998 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend
*bufb
, size_t offset
,
999 void *dest
, size_t len
)
1001 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1002 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1004 ssize_t pagecpy
, pagelen
, strpagelen
, orig_offset
;
1006 struct lib_ring_buffer_backend_pages
*rpages
;
1007 unsigned long sb_bindex
, id
;
1009 offset
&= chanb
->buf_size
- 1;
1010 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1011 orig_offset
= offset
;
1015 id
= bufb
->buf_rsb
.id
;
1016 sb_bindex
= subbuffer_id_get_index(config
, id
);
1017 rpages
= bufb
->array
[sb_bindex
];
1018 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1019 && subbuffer_id_is_noref(config
, id
));
1020 str
= (char *)rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1021 pagelen
= PAGE_SIZE
- (offset
& ~PAGE_MASK
);
1022 strpagelen
= strnlen(str
, pagelen
);
1024 pagecpy
= min_t(size_t, len
, strpagelen
);
1026 memcpy(dest
, str
, pagecpy
);
1031 offset
+= strpagelen
;
1032 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1033 if (strpagelen
< pagelen
)
1036 * Underlying layer should never ask for reads across
1039 CHAN_WARN_ON(chanb
, offset
>= chanb
->buf_size
);
1042 ((char *)dest
)[0] = 0;
1043 return offset
- orig_offset
;
1045 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr
);
1048 * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
1049 * @bufb : buffer backend
1050 * @offset : offset within the buffer
1051 * @virt : pointer to page address (output)
1053 * Should be protected by get_subbuf/put_subbuf.
1054 * Returns the pointer to the page frame number unsigned long.
1056 unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend
*bufb
,
1057 size_t offset
, void ***virt
)
1060 struct lib_ring_buffer_backend_pages
*rpages
;
1061 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1062 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1063 unsigned long sb_bindex
, id
;
1065 offset
&= chanb
->buf_size
- 1;
1066 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1067 id
= bufb
->buf_rsb
.id
;
1068 sb_bindex
= subbuffer_id_get_index(config
, id
);
1069 rpages
= bufb
->array
[sb_bindex
];
1070 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1071 && subbuffer_id_is_noref(config
, id
));
1072 *virt
= &rpages
->p
[index
].virt
;
1073 return &rpages
->p
[index
].pfn
;
1075 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn
);
1078 * lib_ring_buffer_read_offset_address - get address of a buffer location
1079 * @bufb : buffer backend
1080 * @offset : offset within the buffer.
1082 * Return the address where a given offset is located (for read).
1083 * Should be used to get the current subbuffer header pointer. Given we know
1084 * it's never on a page boundary, it's safe to read/write directly
1085 * from/to this address, as long as the read/write is never bigger than a
1088 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend
*bufb
,
1092 struct lib_ring_buffer_backend_pages
*rpages
;
1093 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1094 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1095 unsigned long sb_bindex
, id
;
1097 offset
&= chanb
->buf_size
- 1;
1098 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1099 id
= bufb
->buf_rsb
.id
;
1100 sb_bindex
= subbuffer_id_get_index(config
, id
);
1101 rpages
= bufb
->array
[sb_bindex
];
1102 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1103 && subbuffer_id_is_noref(config
, id
));
1104 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1106 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_offset_address
);
1109 * lib_ring_buffer_offset_address - get address of a location within the buffer
1110 * @bufb : buffer backend
1111 * @offset : offset within the buffer.
1113 * Return the address where a given offset is located.
1114 * Should be used to get the current subbuffer header pointer. Given we know
1115 * it's always at the beginning of a page, it's safe to write directly to this
1116 * address, as long as the write is never bigger than a page size.
1118 void *lib_ring_buffer_offset_address(struct lib_ring_buffer_backend
*bufb
,
1121 size_t sbidx
, index
;
1122 struct lib_ring_buffer_backend_pages
*rpages
;
1123 struct channel_backend
*chanb
= &bufb
->chan
->backend
;
1124 const struct lib_ring_buffer_config
*config
= &chanb
->config
;
1125 unsigned long sb_bindex
, id
;
1127 offset
&= chanb
->buf_size
- 1;
1128 sbidx
= offset
>> chanb
->subbuf_size_order
;
1129 index
= (offset
& (chanb
->subbuf_size
- 1)) >> PAGE_SHIFT
;
1130 id
= bufb
->buf_wsb
[sbidx
].id
;
1131 sb_bindex
= subbuffer_id_get_index(config
, id
);
1132 rpages
= bufb
->array
[sb_bindex
];
1133 CHAN_WARN_ON(chanb
, config
->mode
== RING_BUFFER_OVERWRITE
1134 && subbuffer_id_is_noref(config
, id
));
1135 return rpages
->p
[index
].virt
+ (offset
& ~PAGE_MASK
);
1137 EXPORT_SYMBOL_GPL(lib_ring_buffer_offset_address
);