Cleanup: Move lib/ringbuffer/ headers to include/ringbuffer/
[lttng-modules.git] / lib / ringbuffer / ring_buffer_splice.c
1 /* SPDX-License-Identifier: GPL-2.0-only
2 *
3 * ring_buffer_splice.c
4 *
5 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
6 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
7 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Re-using code from kernel/relay.c, which is why it is licensed under
10 * the GPL-2.0.
11 */
12
13 #include <linux/module.h>
14 #include <linux/fs.h>
15 #include <linux/version.h>
16
17 #include <wrapper/splice.h>
18 #include <ringbuffer/backend.h>
19 #include <ringbuffer/frontend.h>
20 #include <ringbuffer/vfs.h>
21
22 #if 0
23 #define printk_dbg(fmt, args...) printk(fmt, args)
24 #else
25 #define printk_dbg(fmt, args...)
26 #endif
27
28 loff_t vfs_lib_ring_buffer_no_llseek(struct file *file, loff_t offset,
29 int origin)
30 {
31 return -ESPIPE;
32 }
33 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_no_llseek);
34
35 /*
36 * Release pages from the buffer so splice pipe_to_file can move them.
37 * Called after the pipe has been populated with buffer pages.
38 */
39 static void lib_ring_buffer_pipe_buf_release(struct pipe_inode_info *pipe,
40 struct pipe_buffer *pbuf)
41 {
42 __free_page(pbuf->page);
43 }
44
45 static const struct pipe_buf_operations ring_buffer_pipe_buf_ops = {
46 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0))
47 .can_merge = 0,
48 #endif
49 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0))
50 .map = generic_pipe_buf_map,
51 .unmap = generic_pipe_buf_unmap,
52 #endif
53 .confirm = generic_pipe_buf_confirm,
54 .release = lib_ring_buffer_pipe_buf_release,
55 .steal = generic_pipe_buf_steal,
56 .get = generic_pipe_buf_get,
57 };
58
59 /*
60 * Page release operation after splice pipe_to_file ends.
61 */
62 static void lib_ring_buffer_page_release(struct splice_pipe_desc *spd,
63 unsigned int i)
64 {
65 __free_page(spd->pages[i]);
66 }
67
68 /*
69 * subbuf_splice_actor - splice up to one subbuf's worth of data
70 */
71 static int subbuf_splice_actor(struct file *in,
72 loff_t *ppos,
73 struct pipe_inode_info *pipe,
74 size_t len,
75 unsigned int flags,
76 struct lib_ring_buffer *buf)
77 {
78 struct channel *chan = buf->backend.chan;
79 const struct lib_ring_buffer_config *config = &chan->backend.config;
80 unsigned int poff, subbuf_pages, nr_pages;
81 struct page *pages[PIPE_DEF_BUFFERS];
82 struct partial_page partial[PIPE_DEF_BUFFERS];
83 struct splice_pipe_desc spd = {
84 .pages = pages,
85 .nr_pages = 0,
86 .partial = partial,
87 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
88 .flags = flags,
89 #endif
90 .ops = &ring_buffer_pipe_buf_ops,
91 .spd_release = lib_ring_buffer_page_release,
92 };
93 unsigned long consumed_old, roffset;
94 unsigned long bytes_avail;
95
96 /*
97 * Check that a GET_SUBBUF ioctl has been done before.
98 */
99 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
100 consumed_old = lib_ring_buffer_get_consumed(config, buf);
101 consumed_old += *ppos;
102
103 /*
104 * Adjust read len, if longer than what is available.
105 * Max read size is 1 subbuffer due to get_subbuf/put_subbuf for
106 * protection.
107 */
108 bytes_avail = chan->backend.subbuf_size;
109 WARN_ON(bytes_avail > chan->backend.buf_size);
110 len = min_t(size_t, len, bytes_avail);
111 subbuf_pages = bytes_avail >> PAGE_SHIFT;
112 nr_pages = min_t(unsigned int, subbuf_pages, PIPE_DEF_BUFFERS);
113 roffset = consumed_old & PAGE_MASK;
114 poff = consumed_old & ~PAGE_MASK;
115 printk_dbg(KERN_DEBUG "SPLICE actor len %zu pos %zd write_pos %ld\n",
116 len, (ssize_t)*ppos, lib_ring_buffer_get_offset(config, buf));
117
118 for (; spd.nr_pages < nr_pages; spd.nr_pages++) {
119 unsigned int this_len;
120 unsigned long *pfnp, new_pfn;
121 struct page *new_page;
122 void **virt;
123
124 if (!len)
125 break;
126 printk_dbg(KERN_DEBUG "SPLICE actor loop len %zu roffset %ld\n",
127 len, roffset);
128
129 /*
130 * We have to replace the page we are moving into the splice
131 * pipe.
132 */
133 new_page = alloc_pages_node(cpu_to_node(max(buf->backend.cpu,
134 0)),
135 GFP_KERNEL | __GFP_ZERO, 0);
136 if (!new_page)
137 break;
138 new_pfn = page_to_pfn(new_page);
139 this_len = PAGE_SIZE - poff;
140 pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, roffset, &virt);
141 spd.pages[spd.nr_pages] = pfn_to_page(*pfnp);
142 *pfnp = new_pfn;
143 *virt = page_address(new_page);
144 spd.partial[spd.nr_pages].offset = poff;
145 spd.partial[spd.nr_pages].len = this_len;
146
147 poff = 0;
148 roffset += PAGE_SIZE;
149 len -= this_len;
150 }
151
152 if (!spd.nr_pages)
153 return 0;
154
155 return wrapper_splice_to_pipe(pipe, &spd);
156 }
157
158 ssize_t lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
159 struct pipe_inode_info *pipe, size_t len,
160 unsigned int flags,
161 struct lib_ring_buffer *buf)
162 {
163 struct channel *chan = buf->backend.chan;
164 const struct lib_ring_buffer_config *config = &chan->backend.config;
165 ssize_t spliced;
166 int ret;
167
168 if (config->output != RING_BUFFER_SPLICE)
169 return -EINVAL;
170
171 /*
172 * We require ppos and length to be page-aligned for performance reasons
173 * (no page copy). Size is known using the ioctl
174 * RING_BUFFER_GET_PADDED_SUBBUF_SIZE, which is page-size padded.
175 * We fail when the ppos or len passed is not page-sized, because splice
176 * is not allowed to copy more than the length passed as parameter (so
177 * the ABI does not let us silently copy more than requested to include
178 * padding).
179 */
180 if (*ppos != PAGE_ALIGN(*ppos) || len != PAGE_ALIGN(len))
181 return -EINVAL;
182
183 ret = 0;
184 spliced = 0;
185
186 printk_dbg(KERN_DEBUG "SPLICE read len %zu pos %zd\n", len,
187 (ssize_t)*ppos);
188 while (len && !spliced) {
189 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, buf);
190 printk_dbg(KERN_DEBUG "SPLICE read loop ret %d\n", ret);
191 if (ret < 0)
192 break;
193 else if (!ret) {
194 if (flags & SPLICE_F_NONBLOCK)
195 ret = -EAGAIN;
196 break;
197 }
198
199 *ppos += ret;
200 if (ret > len)
201 len = 0;
202 else
203 len -= ret;
204 spliced += ret;
205 }
206
207 if (spliced)
208 return spliced;
209
210 return ret;
211 }
212 EXPORT_SYMBOL_GPL(lib_ring_buffer_splice_read);
213
214 ssize_t vfs_lib_ring_buffer_splice_read(struct file *in, loff_t *ppos,
215 struct pipe_inode_info *pipe, size_t len,
216 unsigned int flags)
217 {
218 struct lib_ring_buffer *buf = in->private_data;
219
220 return lib_ring_buffer_splice_read(in, ppos, pipe, len, flags, buf);
221 }
222 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_splice_read);
This page took 0.033545 seconds and 4 git commands to generate.