Fix: ensure power of 2 check handles 64-bit size_t entirely
[lttng-modules.git] / lib / ringbuffer / ring_buffer_vfs.c
CommitLineData
f3bc08c5
MD
1/*
2 * ring_buffer_vfs.c
3 *
4 * Copyright (C) 2009-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring Buffer VFS file operations.
7 *
8 * Dual LGPL v2.1/GPL v2 license.
9 */
10
11#include <linux/module.h>
12#include <linux/fs.h>
13#include <linux/compat.h>
14
15#include "../../wrapper/ringbuffer/backend.h"
16#include "../../wrapper/ringbuffer/frontend.h"
17#include "../../wrapper/ringbuffer/vfs.h"
18#include "../../wrapper/poll.h"
19
20static int put_ulong(unsigned long val, unsigned long arg)
21{
22 return put_user(val, (unsigned long __user *)arg);
23}
24
25#ifdef CONFIG_COMPAT
26static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
27{
28 return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
29}
30#endif
31
32/**
33 * lib_ring_buffer_open - ring buffer open file operation
34 * @inode: opened inode
35 * @file: opened file
36 *
37 * Open implementation. Makes sure only one open instance of a buffer is
38 * done at a given moment.
39 */
40int lib_ring_buffer_open(struct inode *inode, struct file *file)
41{
42 struct lib_ring_buffer *buf = inode->i_private;
43 int ret;
44
733ce41d
MD
45 if (!buf)
46 return -EINVAL;
47
f3bc08c5
MD
48 ret = lib_ring_buffer_open_read(buf);
49 if (ret)
50 return ret;
51
52 file->private_data = buf;
53 ret = nonseekable_open(inode, file);
54 if (ret)
55 goto release_read;
56 return 0;
57
58release_read:
59 lib_ring_buffer_release_read(buf);
60 return ret;
61}
62
63/**
64 * lib_ring_buffer_release - ring buffer release file operation
65 * @inode: opened inode
66 * @file: opened file
67 *
68 * Release implementation.
69 */
70int lib_ring_buffer_release(struct inode *inode, struct file *file)
71{
72 struct lib_ring_buffer *buf = file->private_data;
73
74 lib_ring_buffer_release_read(buf);
75
76 return 0;
77}
78
79/**
80 * lib_ring_buffer_poll - ring buffer poll file operation
81 * @filp: the file
82 * @wait: poll table
83 *
84 * Poll implementation.
85 */
86unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait)
87{
88 unsigned int mask = 0;
89 struct lib_ring_buffer *buf = filp->private_data;
90 struct channel *chan = buf->backend.chan;
5a8fd222 91 const struct lib_ring_buffer_config *config = &chan->backend.config;
254ec7bc 92 int finalized, disabled;
f3bc08c5
MD
93
94 if (filp->f_mode & FMODE_READ) {
a33e44a6 95 poll_wait_set_exclusive(wait);
f3bc08c5
MD
96 poll_wait(filp, &buf->read_wait, wait);
97
98 finalized = lib_ring_buffer_is_finalized(config, buf);
254ec7bc
MD
99 disabled = lib_ring_buffer_channel_is_disabled(chan);
100
f3bc08c5
MD
101 /*
102 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
103 * finalized load before offsets loads.
104 */
105 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
106retry:
254ec7bc
MD
107 if (disabled)
108 return POLLERR;
109
f3bc08c5
MD
110 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
111 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
112 == 0) {
113 if (finalized)
114 return POLLHUP;
115 else {
116 /*
117 * The memory barriers
118 * __wait_event()/wake_up_interruptible() take
119 * care of "raw_spin_is_locked" memory ordering.
120 */
121 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
122 goto retry;
123 else
124 return 0;
125 }
126 } else {
127 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
128 chan)
129 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
130 chan)
131 >= chan->backend.buf_size)
132 return POLLPRI | POLLRDBAND;
133 else
134 return POLLIN | POLLRDNORM;
135 }
136 }
137 return mask;
138}
139
140/**
141 * lib_ring_buffer_ioctl - control ring buffer reader synchronization
142 *
143 * @filp: the file
144 * @cmd: the command
145 * @arg: command arg
146 *
147 * This ioctl implements commands necessary for producer/consumer
148 * and flight recorder reader interaction :
149 * RING_BUFFER_GET_NEXT_SUBBUF
150 * Get the next sub-buffer that can be read. It never blocks.
151 * RING_BUFFER_PUT_NEXT_SUBBUF
152 * Release the currently read sub-buffer.
153 * RING_BUFFER_GET_SUBBUF_SIZE
154 * returns the size of the current sub-buffer.
155 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
156 * returns the maximum size for sub-buffers.
157 * RING_BUFFER_GET_NUM_SUBBUF
158 * returns the number of reader-visible sub-buffers in the per cpu
159 * channel (for mmap).
160 * RING_BUFFER_GET_MMAP_READ_OFFSET
161 * returns the offset of the subbuffer belonging to the reader.
162 * Should only be used for mmap clients.
163 */
164long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
165{
166 struct lib_ring_buffer *buf = filp->private_data;
167 struct channel *chan = buf->backend.chan;
5a8fd222 168 const struct lib_ring_buffer_config *config = &chan->backend.config;
f3bc08c5 169
254ec7bc
MD
170 if (lib_ring_buffer_channel_is_disabled(chan))
171 return -EIO;
172
f3bc08c5
MD
173 switch (cmd) {
174 case RING_BUFFER_SNAPSHOT:
175 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
176 &buf->prod_snapshot);
177 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
178 return put_ulong(buf->cons_snapshot, arg);
179 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
180 return put_ulong(buf->prod_snapshot, arg);
181 case RING_BUFFER_GET_SUBBUF:
182 {
183 unsigned long uconsume;
184 long ret;
185
186 ret = get_user(uconsume, (unsigned long __user *) arg);
187 if (ret)
188 return ret; /* will return -EFAULT */
189 ret = lib_ring_buffer_get_subbuf(buf, uconsume);
190 if (!ret) {
191 /* Set file position to zero at each successful "get" */
192 filp->f_pos = 0;
193 }
194 return ret;
195 }
196 case RING_BUFFER_PUT_SUBBUF:
197 lib_ring_buffer_put_subbuf(buf);
198 return 0;
199
200 case RING_BUFFER_GET_NEXT_SUBBUF:
201 {
202 long ret;
203
204 ret = lib_ring_buffer_get_next_subbuf(buf);
205 if (!ret) {
206 /* Set file position to zero at each successful "get" */
207 filp->f_pos = 0;
208 }
209 return ret;
210 }
211 case RING_BUFFER_PUT_NEXT_SUBBUF:
212 lib_ring_buffer_put_next_subbuf(buf);
213 return 0;
214 case RING_BUFFER_GET_SUBBUF_SIZE:
215 return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
216 arg);
217 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
218 {
219 unsigned long size;
220
221 size = lib_ring_buffer_get_read_data_size(config, buf);
222 size = PAGE_ALIGN(size);
223 return put_ulong(size, arg);
224 }
225 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
226 return put_ulong(chan->backend.subbuf_size, arg);
227 case RING_BUFFER_GET_MMAP_LEN:
228 {
229 unsigned long mmap_buf_len;
230
231 if (config->output != RING_BUFFER_MMAP)
232 return -EINVAL;
233 mmap_buf_len = chan->backend.buf_size;
234 if (chan->backend.extra_reader_sb)
235 mmap_buf_len += chan->backend.subbuf_size;
236 if (mmap_buf_len > INT_MAX)
237 return -EFBIG;
238 return put_ulong(mmap_buf_len, arg);
239 }
240 case RING_BUFFER_GET_MMAP_READ_OFFSET:
241 {
242 unsigned long sb_bindex;
243
244 if (config->output != RING_BUFFER_MMAP)
245 return -EINVAL;
246 sb_bindex = subbuffer_id_get_index(config,
247 buf->backend.buf_rsb.id);
248 return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
249 arg);
250 }
5c055266
MD
251 case RING_BUFFER_FLUSH:
252 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
253 return 0;
f3bc08c5
MD
254 default:
255 return -ENOIOCTLCMD;
256 }
257}
258
259#ifdef CONFIG_COMPAT
260long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
261 unsigned long arg)
262{
263 struct lib_ring_buffer *buf = filp->private_data;
264 struct channel *chan = buf->backend.chan;
5a8fd222 265 const struct lib_ring_buffer_config *config = &chan->backend.config;
254ec7bc
MD
266
267 if (lib_ring_buffer_channel_is_disabled(chan))
268 return -EIO;
f3bc08c5
MD
269
270 switch (cmd) {
271 case RING_BUFFER_SNAPSHOT:
272 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
273 &buf->prod_snapshot);
274 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
275 return compat_put_ulong(buf->cons_snapshot, arg);
276 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
277 return compat_put_ulong(buf->prod_snapshot, arg);
278 case RING_BUFFER_GET_SUBBUF:
279 {
280 __u32 uconsume;
281 unsigned long consume;
282 long ret;
283
284 ret = get_user(uconsume, (__u32 __user *) arg);
285 if (ret)
286 return ret; /* will return -EFAULT */
287 consume = buf->cons_snapshot;
288 consume &= ~0xFFFFFFFFL;
289 consume |= uconsume;
290 ret = lib_ring_buffer_get_subbuf(buf, consume);
291 if (!ret) {
292 /* Set file position to zero at each successful "get" */
293 filp->f_pos = 0;
294 }
295 return ret;
296 }
297 case RING_BUFFER_PUT_SUBBUF:
298 lib_ring_buffer_put_subbuf(buf);
299 return 0;
300
301 case RING_BUFFER_GET_NEXT_SUBBUF:
302 {
303 long ret;
304
305 ret = lib_ring_buffer_get_next_subbuf(buf);
306 if (!ret) {
307 /* Set file position to zero at each successful "get" */
308 filp->f_pos = 0;
309 }
310 return ret;
311 }
312 case RING_BUFFER_PUT_NEXT_SUBBUF:
313 lib_ring_buffer_put_next_subbuf(buf);
314 return 0;
315 case RING_BUFFER_GET_SUBBUF_SIZE:
316 {
317 unsigned long data_size;
318
319 data_size = lib_ring_buffer_get_read_data_size(config, buf);
320 if (data_size > UINT_MAX)
321 return -EFBIG;
322 return put_ulong(data_size, arg);
323 }
324 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
325 {
326 unsigned long size;
327
328 size = lib_ring_buffer_get_read_data_size(config, buf);
329 size = PAGE_ALIGN(size);
330 if (size > UINT_MAX)
331 return -EFBIG;
332 return put_ulong(size, arg);
333 }
334 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
335 if (chan->backend.subbuf_size > UINT_MAX)
336 return -EFBIG;
337 return put_ulong(chan->backend.subbuf_size, arg);
338 case RING_BUFFER_GET_MMAP_LEN:
339 {
340 unsigned long mmap_buf_len;
341
342 if (config->output != RING_BUFFER_MMAP)
343 return -EINVAL;
344 mmap_buf_len = chan->backend.buf_size;
345 if (chan->backend.extra_reader_sb)
346 mmap_buf_len += chan->backend.subbuf_size;
347 if (mmap_buf_len > UINT_MAX)
348 return -EFBIG;
349 return put_ulong(mmap_buf_len, arg);
350 }
351 case RING_BUFFER_GET_MMAP_READ_OFFSET:
352 {
353 unsigned long sb_bindex, read_offset;
354
355 if (config->output != RING_BUFFER_MMAP)
356 return -EINVAL;
357 sb_bindex = subbuffer_id_get_index(config,
358 buf->backend.buf_rsb.id);
359 read_offset = buf->backend.array[sb_bindex]->mmap_offset;
360 if (read_offset > UINT_MAX)
361 return -EINVAL;
362 return put_ulong(read_offset, arg);
363 }
5c055266
MD
364 case RING_BUFFER_FLUSH:
365 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
366 return 0;
f3bc08c5
MD
367 default:
368 return -ENOIOCTLCMD;
369 }
370}
371#endif
372
373const struct file_operations lib_ring_buffer_file_operations = {
a33c9927 374 .owner = THIS_MODULE,
f3bc08c5
MD
375 .open = lib_ring_buffer_open,
376 .release = lib_ring_buffer_release,
377 .poll = lib_ring_buffer_poll,
378 .splice_read = lib_ring_buffer_splice_read,
379 .mmap = lib_ring_buffer_mmap,
380 .unlocked_ioctl = lib_ring_buffer_ioctl,
381 .llseek = lib_ring_buffer_no_llseek,
382#ifdef CONFIG_COMPAT
383 .compat_ioctl = lib_ring_buffer_compat_ioctl,
384#endif
385};
386EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
387
388MODULE_LICENSE("GPL and additional rights");
389MODULE_AUTHOR("Mathieu Desnoyers");
390MODULE_DESCRIPTION("Ring Buffer Library VFS");
This page took 0.039572 seconds and 4 git commands to generate.