1 /* SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
6 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
7 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Re-using code from kernel/relay.c, which is why it is licensed under
13 #include <linux/module.h>
15 #include <linux/version.h>
17 #include <wrapper/splice.h>
18 #include <wrapper/ringbuffer/backend.h>
19 #include <wrapper/ringbuffer/frontend.h>
20 #include <wrapper/ringbuffer/vfs.h>
23 #define printk_dbg(fmt, args...) printk(fmt, args)
25 #define printk_dbg(fmt, args...)
28 loff_t
vfs_lib_ring_buffer_no_llseek(struct file
*file
, loff_t offset
,
33 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek
);
36 * Release pages from the buffer so splice pipe_to_file can move them.
37 * Called after the pipe has been populated with buffer pages.
39 static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
40 struct pipe_buffer
*pbuf
)
42 __free_page(pbuf
->page
);
45 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops
= {
46 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
49 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
50 .map
= generic_pipe_buf_map
,
51 .unmap
= generic_pipe_buf_unmap
,
53 .confirm
= generic_pipe_buf_confirm
,
54 .release
= lib_ring_buffer_pipe_buf_release
,
55 .steal
= generic_pipe_buf_steal
,
56 .get
= generic_pipe_buf_get
,
60 * Page release operation after splice pipe_to_file ends.
62 static void lib_ring_buffer_page_release(struct splice_pipe_desc
*spd
,
65 __free_page(spd
->pages
[i
]);
69 * subbuf_splice_actor - splice up to one subbuf's worth of data
71 static int subbuf_splice_actor(struct file
*in
,
73 struct pipe_inode_info
*pipe
,
76 struct lib_ring_buffer
*buf
)
78 struct channel
*chan
= buf
->backend
.chan
;
79 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
80 unsigned int poff
, subbuf_pages
, nr_pages
;
81 struct page
*pages
[PIPE_DEF_BUFFERS
];
82 struct partial_page partial
[PIPE_DEF_BUFFERS
];
83 struct splice_pipe_desc spd
= {
87 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
90 .ops
= &ring_buffer_pipe_buf_ops
,
91 .spd_release
= lib_ring_buffer_page_release
,
93 unsigned long consumed_old
, roffset
;
94 unsigned long bytes_avail
;
97 * Check that a GET_SUBBUF ioctl has been done before.
99 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
100 consumed_old
= lib_ring_buffer_get_consumed(config
, buf
);
101 consumed_old
+= *ppos
;
104 * Adjust read len, if longer than what is available.
105 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
108 bytes_avail
= chan
->backend
.subbuf_size
;
109 WARN_ON(bytes_avail
> chan
->backend
.buf_size
);
110 len
= min_t(size_t, len
, bytes_avail
);
111 subbuf_pages
= bytes_avail
>> PAGE_SHIFT
;
112 nr_pages
= min_t(unsigned int, subbuf_pages
, PIPE_DEF_BUFFERS
);
113 roffset
= consumed_old
& PAGE_MASK
;
114 poff
= consumed_old
& ~PAGE_MASK
;
115 printk_dbg(KERN_DEBUG
"SPLICE actor len %zu pos %zd write_pos %ld\n",
116 len
, (ssize_t
)*ppos
, lib_ring_buffer_get_offset(config
, buf
));
118 for (; spd
.nr_pages
< nr_pages
; spd
.nr_pages
++) {
119 unsigned int this_len
;
120 unsigned long *pfnp
, new_pfn
;
121 struct page
*new_page
;
126 printk_dbg(KERN_DEBUG
"SPLICE actor loop len %zu roffset %ld\n",
130 * We have to replace the page we are moving into the splice
133 new_page
= alloc_pages_node(cpu_to_node(max(buf
->backend
.cpu
,
135 GFP_KERNEL
| __GFP_ZERO
, 0);
138 new_pfn
= page_to_pfn(new_page
);
139 this_len
= PAGE_SIZE
- poff
;
140 pfnp
= lib_ring_buffer_read_get_pfn(&buf
->backend
, roffset
, &virt
);
141 spd
.pages
[spd
.nr_pages
] = pfn_to_page(*pfnp
);
143 *virt
= page_address(new_page
);
144 spd
.partial
[spd
.nr_pages
].offset
= poff
;
145 spd
.partial
[spd
.nr_pages
].len
= this_len
;
148 roffset
+= PAGE_SIZE
;
155 return wrapper_splice_to_pipe(pipe
, &spd
);
158 ssize_t
lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
159 struct pipe_inode_info
*pipe
, size_t len
,
161 struct lib_ring_buffer
*buf
)
163 struct channel
*chan
= buf
->backend
.chan
;
164 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
168 if (config
->output
!= RING_BUFFER_SPLICE
)
172 * We require ppos and length to be page-aligned for performance reasons
173 * (no page copy). Size is known using the ioctl
174 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
175 * We fail when the ppos or len passed is not page-sized, because splice
176 * is not allowed to copy more than the length passed as parameter (so
177 * the ABI does not let us silently copy more than requested to include
180 if (*ppos
!= PAGE_ALIGN(*ppos
) || len
!= PAGE_ALIGN(len
))
186 printk_dbg(KERN_DEBUG
"SPLICE read len %zu pos %zd\n", len
,
188 while (len
&& !spliced
) {
189 ret
= subbuf_splice_actor(in
, ppos
, pipe
, len
, flags
, buf
);
190 printk_dbg(KERN_DEBUG
"SPLICE read loop ret %d\n", ret
);
194 if (flags
& SPLICE_F_NONBLOCK
)
212 EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read
);
214 ssize_t
vfs_lib_ring_buffer_splice_read(struct file
*in
, loff_t
*ppos
,
215 struct pipe_inode_info
*pipe
, size_t len
,
218 struct lib_ring_buffer
*buf
= in
->private_data
;
220 return lib_ring_buffer_splice_read(in
, ppos
, pipe
, len
, flags
, buf
);
222 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read
);