Fix: don't perform extra flush on metadata channel
[lttng-modules.git] / lib / ringbuffer / ring_buffer_vfs.c
1 /*
2 * ring_buffer_vfs.c
3 *
4 * Ring Buffer VFS file operations.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/module.h>
24 #include <linux/fs.h>
25 #include <linux/compat.h>
26
27 #include <wrapper/ringbuffer/backend.h>
28 #include <wrapper/ringbuffer/frontend.h>
29 #include <wrapper/ringbuffer/vfs.h>
30 #include <wrapper/poll.h>
31 #include <lttng-tracer.h>
32
33 static int put_ulong(unsigned long val, unsigned long arg)
34 {
35 return put_user(val, (unsigned long __user *)arg);
36 }
37
38 #ifdef CONFIG_COMPAT
39 static int compat_put_ulong(compat_ulong_t val, unsigned long arg)
40 {
41 return put_user(val, (compat_ulong_t __user *)compat_ptr(arg));
42 }
43 #endif
44
45 /*
46 * This is not used by anonymous file descriptors. This code is left
47 * there if we ever want to implement an inode with open() operation.
48 */
49 int lib_ring_buffer_open(struct inode *inode, struct file *file,
50 struct lib_ring_buffer *buf)
51 {
52 int ret;
53
54 if (!buf)
55 return -EINVAL;
56
57 ret = lib_ring_buffer_open_read(buf);
58 if (ret)
59 return ret;
60
61 ret = nonseekable_open(inode, file);
62 if (ret)
63 goto release_read;
64 return 0;
65
66 release_read:
67 lib_ring_buffer_release_read(buf);
68 return ret;
69 }
70 EXPORT_SYMBOL_GPL(lib_ring_buffer_open);
71
72 /**
73 * vfs_lib_ring_buffer_open - ring buffer open file operation
74 * @inode: opened inode
75 * @file: opened file
76 *
77 * Open implementation. Makes sure only one open instance of a buffer is
78 * done at a given moment.
79 */
80 static
81 int vfs_lib_ring_buffer_open(struct inode *inode, struct file *file)
82 {
83 struct lib_ring_buffer *buf = inode->i_private;
84
85 file->private_data = buf;
86 return lib_ring_buffer_open(inode, file, buf);
87 }
88
89 int lib_ring_buffer_release(struct inode *inode, struct file *file,
90 struct lib_ring_buffer *buf)
91 {
92 lib_ring_buffer_release_read(buf);
93
94 return 0;
95 }
96 EXPORT_SYMBOL_GPL(lib_ring_buffer_release);
97
98 /**
99 * vfs_lib_ring_buffer_release - ring buffer release file operation
100 * @inode: opened inode
101 * @file: opened file
102 *
103 * Release implementation.
104 */
105 static
106 int vfs_lib_ring_buffer_release(struct inode *inode, struct file *file)
107 {
108 struct lib_ring_buffer *buf = file->private_data;
109
110 return lib_ring_buffer_release(inode, file, buf);
111 }
112
113 unsigned int lib_ring_buffer_poll(struct file *filp, poll_table *wait,
114 struct lib_ring_buffer *buf)
115 {
116 unsigned int mask = 0;
117 struct channel *chan = buf->backend.chan;
118 const struct lib_ring_buffer_config *config = &chan->backend.config;
119 int finalized, disabled;
120
121 if (filp->f_mode & FMODE_READ) {
122 poll_wait_set_exclusive(wait);
123 poll_wait(filp, &buf->read_wait, wait);
124
125 finalized = lib_ring_buffer_is_finalized(config, buf);
126 disabled = lib_ring_buffer_channel_is_disabled(chan);
127
128 /*
129 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
130 * finalized load before offsets loads.
131 */
132 WARN_ON(atomic_long_read(&buf->active_readers) != 1);
133 retry:
134 if (disabled)
135 return POLLERR;
136
137 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf), chan)
138 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf), chan)
139 == 0) {
140 if (finalized)
141 return POLLHUP;
142 else {
143 /*
144 * The memory barriers
145 * __wait_event()/wake_up_interruptible() take
146 * care of "raw_spin_is_locked" memory ordering.
147 */
148 if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
149 goto retry;
150 else
151 return 0;
152 }
153 } else {
154 if (subbuf_trunc(lib_ring_buffer_get_offset(config, buf),
155 chan)
156 - subbuf_trunc(lib_ring_buffer_get_consumed(config, buf),
157 chan)
158 >= chan->backend.buf_size)
159 return POLLPRI | POLLRDBAND;
160 else
161 return POLLIN | POLLRDNORM;
162 }
163 }
164 return mask;
165 }
166 EXPORT_SYMBOL_GPL(lib_ring_buffer_poll);
167
168 /**
169 * vfs_lib_ring_buffer_poll - ring buffer poll file operation
170 * @filp: the file
171 * @wait: poll table
172 *
173 * Poll implementation.
174 */
175 static
176 unsigned int vfs_lib_ring_buffer_poll(struct file *filp, poll_table *wait)
177 {
178 struct lib_ring_buffer *buf = filp->private_data;
179
180 return lib_ring_buffer_poll(filp, wait, buf);
181 }
182
183 long lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd,
184 unsigned long arg, struct lib_ring_buffer *buf)
185 {
186 struct channel *chan = buf->backend.chan;
187 const struct lib_ring_buffer_config *config = &chan->backend.config;
188
189 if (lib_ring_buffer_channel_is_disabled(chan))
190 return -EIO;
191
192 switch (cmd) {
193 case RING_BUFFER_SNAPSHOT:
194 /*
195 * First, ensure we perform a "final" flush onto the
196 * stream. This will ensure we create a packet of
197 * padding if we encounter an empty packet. This ensures
198 * the time-stamps right before the snapshot is used as
199 * end of packet timestamp.
200 */
201 if (!buf->quiescent)
202 lib_ring_buffer_switch_remote_empty(buf);
203 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
204 &buf->prod_snapshot);
205 case RING_BUFFER_SNAPSHOT_GET_CONSUMED:
206 return put_ulong(buf->cons_snapshot, arg);
207 case RING_BUFFER_SNAPSHOT_GET_PRODUCED:
208 return put_ulong(buf->prod_snapshot, arg);
209 case RING_BUFFER_GET_SUBBUF:
210 {
211 unsigned long uconsume;
212 long ret;
213
214 ret = get_user(uconsume, (unsigned long __user *) arg);
215 if (ret)
216 return ret; /* will return -EFAULT */
217 ret = lib_ring_buffer_get_subbuf(buf, uconsume);
218 if (!ret) {
219 /* Set file position to zero at each successful "get" */
220 filp->f_pos = 0;
221 }
222 return ret;
223 }
224 case RING_BUFFER_PUT_SUBBUF:
225 lib_ring_buffer_put_subbuf(buf);
226 return 0;
227
228 case RING_BUFFER_GET_NEXT_SUBBUF:
229 {
230 long ret;
231
232 ret = lib_ring_buffer_get_next_subbuf(buf);
233 if (!ret) {
234 /* Set file position to zero at each successful "get" */
235 filp->f_pos = 0;
236 }
237 return ret;
238 }
239 case RING_BUFFER_PUT_NEXT_SUBBUF:
240 lib_ring_buffer_put_next_subbuf(buf);
241 return 0;
242 case RING_BUFFER_GET_SUBBUF_SIZE:
243 return put_ulong(lib_ring_buffer_get_read_data_size(config, buf),
244 arg);
245 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE:
246 {
247 unsigned long size;
248
249 size = lib_ring_buffer_get_read_data_size(config, buf);
250 size = PAGE_ALIGN(size);
251 return put_ulong(size, arg);
252 }
253 case RING_BUFFER_GET_MAX_SUBBUF_SIZE:
254 return put_ulong(chan->backend.subbuf_size, arg);
255 case RING_BUFFER_GET_MMAP_LEN:
256 {
257 unsigned long mmap_buf_len;
258
259 if (config->output != RING_BUFFER_MMAP)
260 return -EINVAL;
261 mmap_buf_len = chan->backend.buf_size;
262 if (chan->backend.extra_reader_sb)
263 mmap_buf_len += chan->backend.subbuf_size;
264 if (mmap_buf_len > INT_MAX)
265 return -EFBIG;
266 return put_ulong(mmap_buf_len, arg);
267 }
268 case RING_BUFFER_GET_MMAP_READ_OFFSET:
269 {
270 unsigned long sb_bindex;
271
272 if (config->output != RING_BUFFER_MMAP)
273 return -EINVAL;
274 sb_bindex = subbuffer_id_get_index(config,
275 buf->backend.buf_rsb.id);
276 return put_ulong(buf->backend.array[sb_bindex]->mmap_offset,
277 arg);
278 }
279 case RING_BUFFER_FLUSH:
280 lib_ring_buffer_switch_remote(buf);
281 return 0;
282 default:
283 return -ENOIOCTLCMD;
284 }
285 }
286 EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl);
287
288 /**
289 * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
290 *
291 * @filp: the file
292 * @cmd: the command
293 * @arg: command arg
294 *
295 * This ioctl implements commands necessary for producer/consumer
296 * and flight recorder reader interaction :
297 * RING_BUFFER_GET_NEXT_SUBBUF
298 * Get the next sub-buffer that can be read. It never blocks.
299 * RING_BUFFER_PUT_NEXT_SUBBUF
300 * Release the currently read sub-buffer.
301 * RING_BUFFER_GET_SUBBUF_SIZE
302 * returns the size of the current sub-buffer.
303 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
304 * returns the maximum size for sub-buffers.
305 * RING_BUFFER_GET_NUM_SUBBUF
306 * returns the number of reader-visible sub-buffers in the per cpu
307 * channel (for mmap).
308 * RING_BUFFER_GET_MMAP_READ_OFFSET
309 * returns the offset of the subbuffer belonging to the reader.
310 * Should only be used for mmap clients.
311 */
312 static
313 long vfs_lib_ring_buffer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
314 {
315 struct lib_ring_buffer *buf = filp->private_data;
316
317 return lib_ring_buffer_ioctl(filp, cmd, arg, buf);
318 }
319
320 #ifdef CONFIG_COMPAT
321 long lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
322 unsigned long arg, struct lib_ring_buffer *buf)
323 {
324 struct channel *chan = buf->backend.chan;
325 const struct lib_ring_buffer_config *config = &chan->backend.config;
326
327 if (lib_ring_buffer_channel_is_disabled(chan))
328 return -EIO;
329
330 switch (cmd) {
331 case RING_BUFFER_COMPAT_SNAPSHOT:
332 /*
333 * First, ensure we perform a "final" flush onto the
334 * stream. This will ensure we create a packet of
335 * padding if we encounter an empty packet. This ensures
336 * the time-stamps right before the snapshot is used as
337 * end of packet timestamp.
338 */
339 if (!buf->quiescent)
340 lib_ring_buffer_switch_remote_empty(buf);
341 return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
342 &buf->prod_snapshot);
343 case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED:
344 return compat_put_ulong(buf->cons_snapshot, arg);
345 case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED:
346 return compat_put_ulong(buf->prod_snapshot, arg);
347 case RING_BUFFER_COMPAT_GET_SUBBUF:
348 {
349 __u32 uconsume;
350 unsigned long consume;
351 long ret;
352
353 ret = get_user(uconsume, (__u32 __user *) arg);
354 if (ret)
355 return ret; /* will return -EFAULT */
356 consume = buf->cons_snapshot;
357 consume &= ~0xFFFFFFFFL;
358 consume |= uconsume;
359 ret = lib_ring_buffer_get_subbuf(buf, consume);
360 if (!ret) {
361 /* Set file position to zero at each successful "get" */
362 filp->f_pos = 0;
363 }
364 return ret;
365 }
366 case RING_BUFFER_COMPAT_PUT_SUBBUF:
367 lib_ring_buffer_put_subbuf(buf);
368 return 0;
369
370 case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF:
371 {
372 long ret;
373
374 ret = lib_ring_buffer_get_next_subbuf(buf);
375 if (!ret) {
376 /* Set file position to zero at each successful "get" */
377 filp->f_pos = 0;
378 }
379 return ret;
380 }
381 case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF:
382 lib_ring_buffer_put_next_subbuf(buf);
383 return 0;
384 case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE:
385 {
386 unsigned long data_size;
387
388 data_size = lib_ring_buffer_get_read_data_size(config, buf);
389 if (data_size > UINT_MAX)
390 return -EFBIG;
391 return compat_put_ulong(data_size, arg);
392 }
393 case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE:
394 {
395 unsigned long size;
396
397 size = lib_ring_buffer_get_read_data_size(config, buf);
398 size = PAGE_ALIGN(size);
399 if (size > UINT_MAX)
400 return -EFBIG;
401 return compat_put_ulong(size, arg);
402 }
403 case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE:
404 if (chan->backend.subbuf_size > UINT_MAX)
405 return -EFBIG;
406 return compat_put_ulong(chan->backend.subbuf_size, arg);
407 case RING_BUFFER_COMPAT_GET_MMAP_LEN:
408 {
409 unsigned long mmap_buf_len;
410
411 if (config->output != RING_BUFFER_MMAP)
412 return -EINVAL;
413 mmap_buf_len = chan->backend.buf_size;
414 if (chan->backend.extra_reader_sb)
415 mmap_buf_len += chan->backend.subbuf_size;
416 if (mmap_buf_len > UINT_MAX)
417 return -EFBIG;
418 return compat_put_ulong(mmap_buf_len, arg);
419 }
420 case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET:
421 {
422 unsigned long sb_bindex, read_offset;
423
424 if (config->output != RING_BUFFER_MMAP)
425 return -EINVAL;
426 sb_bindex = subbuffer_id_get_index(config,
427 buf->backend.buf_rsb.id);
428 read_offset = buf->backend.array[sb_bindex]->mmap_offset;
429 if (read_offset > UINT_MAX)
430 return -EINVAL;
431 return compat_put_ulong(read_offset, arg);
432 }
433 case RING_BUFFER_COMPAT_FLUSH:
434 lib_ring_buffer_switch_remote(buf);
435 return 0;
436 default:
437 return -ENOIOCTLCMD;
438 }
439 }
440 EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl);
441
442 static
443 long vfs_lib_ring_buffer_compat_ioctl(struct file *filp, unsigned int cmd,
444 unsigned long arg)
445 {
446 struct lib_ring_buffer *buf = filp->private_data;
447
448 return lib_ring_buffer_compat_ioctl(filp, cmd, arg, buf);
449 }
450 #endif
451
452 const struct file_operations lib_ring_buffer_file_operations = {
453 .owner = THIS_MODULE,
454 .open = vfs_lib_ring_buffer_open,
455 .release = vfs_lib_ring_buffer_release,
456 .poll = vfs_lib_ring_buffer_poll,
457 .splice_read = vfs_lib_ring_buffer_splice_read,
458 .mmap = vfs_lib_ring_buffer_mmap,
459 .unlocked_ioctl = vfs_lib_ring_buffer_ioctl,
460 .llseek = vfs_lib_ring_buffer_no_llseek,
461 #ifdef CONFIG_COMPAT
462 .compat_ioctl = vfs_lib_ring_buffer_compat_ioctl,
463 #endif
464 };
465 EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations);
466
467 MODULE_LICENSE("GPL and additional rights");
468 MODULE_AUTHOR("Mathieu Desnoyers");
469 MODULE_DESCRIPTION("Ring Buffer Library VFS");
470 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION) "."
471 __stringify(LTTNG_MODULES_MINOR_VERSION) "."
472 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION)
473 LTTNG_MODULES_EXTRAVERSION);
This page took 0.039304 seconds and 5 git commands to generate.