1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
5 * Ring Buffer VFS file operations.
7 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/module.h>
12 #include <linux/compat.h>
14 #include <wrapper/ringbuffer/backend.h>
15 #include <wrapper/ringbuffer/frontend.h>
16 #include <wrapper/ringbuffer/vfs.h>
17 #include <lttng-tracer.h>
19 static int put_ulong(unsigned long val
, unsigned long arg
)
21 return put_user(val
, (unsigned long __user
*)arg
);
25 static int compat_put_ulong(compat_ulong_t val
, unsigned long arg
)
27 return put_user(val
, (compat_ulong_t __user
*)compat_ptr(arg
));
32 * This is not used by anonymous file descriptors. This code is left
33 * there if we ever want to implement an inode with open() operation.
35 int lib_ring_buffer_open(struct inode
*inode
, struct file
*file
,
36 struct lib_ring_buffer
*buf
)
43 ret
= lib_ring_buffer_open_read(buf
);
47 ret
= nonseekable_open(inode
, file
);
53 lib_ring_buffer_release_read(buf
);
56 EXPORT_SYMBOL_GPL(lib_ring_buffer_open
);
59 * vfs_lib_ring_buffer_open - ring buffer open file operation
60 * @inode: opened inode
63 * Open implementation. Makes sure only one open instance of a buffer is
64 * done at a given moment.
67 int vfs_lib_ring_buffer_open(struct inode
*inode
, struct file
*file
)
69 struct lib_ring_buffer
*buf
= inode
->i_private
;
71 file
->private_data
= buf
;
72 return lib_ring_buffer_open(inode
, file
, buf
);
75 int lib_ring_buffer_release(struct inode
*inode
, struct file
*file
,
76 struct lib_ring_buffer
*buf
)
78 lib_ring_buffer_release_read(buf
);
82 EXPORT_SYMBOL_GPL(lib_ring_buffer_release
);
85 * vfs_lib_ring_buffer_release - ring buffer release file operation
86 * @inode: opened inode
89 * Release implementation.
92 int vfs_lib_ring_buffer_release(struct inode
*inode
, struct file
*file
)
94 struct lib_ring_buffer
*buf
= file
->private_data
;
96 return lib_ring_buffer_release(inode
, file
, buf
);
99 unsigned int lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
,
100 struct lib_ring_buffer
*buf
)
102 unsigned int mask
= 0;
103 struct channel
*chan
= buf
->backend
.chan
;
104 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
105 int finalized
, disabled
;
107 if (filp
->f_mode
& FMODE_READ
) {
108 poll_wait(filp
, &buf
->read_wait
, wait
);
110 finalized
= lib_ring_buffer_is_finalized(config
, buf
);
111 disabled
= lib_ring_buffer_channel_is_disabled(chan
);
114 * lib_ring_buffer_is_finalized() contains a smp_rmb() ordering
115 * finalized load before offsets loads.
117 WARN_ON(atomic_long_read(&buf
->active_readers
) != 1);
122 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
), chan
)
123 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
), chan
)
129 * The memory barriers
130 * __wait_event()/wake_up_interruptible() take
131 * care of "raw_spin_is_locked" memory ordering.
133 if (raw_spin_is_locked(&buf
->raw_tick_nohz_spinlock
))
139 if (subbuf_trunc(lib_ring_buffer_get_offset(config
, buf
),
141 - subbuf_trunc(lib_ring_buffer_get_consumed(config
, buf
),
143 >= chan
->backend
.buf_size
)
144 return POLLPRI
| POLLRDBAND
;
146 return POLLIN
| POLLRDNORM
;
151 EXPORT_SYMBOL_GPL(lib_ring_buffer_poll
);
154 * vfs_lib_ring_buffer_poll - ring buffer poll file operation
158 * Poll implementation.
161 unsigned int vfs_lib_ring_buffer_poll(struct file
*filp
, poll_table
*wait
)
163 struct lib_ring_buffer
*buf
= filp
->private_data
;
165 return lib_ring_buffer_poll(filp
, wait
, buf
);
168 long lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
,
169 unsigned long arg
, struct lib_ring_buffer
*buf
)
171 struct channel
*chan
= buf
->backend
.chan
;
172 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
174 if (lib_ring_buffer_channel_is_disabled(chan
))
178 case RING_BUFFER_SNAPSHOT
:
179 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
180 &buf
->prod_snapshot
);
181 case RING_BUFFER_SNAPSHOT_SAMPLE_POSITIONS
:
182 return lib_ring_buffer_snapshot_sample_positions(buf
,
183 &buf
->cons_snapshot
, &buf
->prod_snapshot
);
184 case RING_BUFFER_SNAPSHOT_GET_CONSUMED
:
185 return put_ulong(buf
->cons_snapshot
, arg
);
186 case RING_BUFFER_SNAPSHOT_GET_PRODUCED
:
187 return put_ulong(buf
->prod_snapshot
, arg
);
188 case RING_BUFFER_GET_SUBBUF
:
190 unsigned long uconsume
;
193 ret
= get_user(uconsume
, (unsigned long __user
*) arg
);
195 return ret
; /* will return -EFAULT */
196 ret
= lib_ring_buffer_get_subbuf(buf
, uconsume
);
198 /* Set file position to zero at each successful "get" */
203 case RING_BUFFER_PUT_SUBBUF
:
204 lib_ring_buffer_put_subbuf(buf
);
207 case RING_BUFFER_GET_NEXT_SUBBUF
:
211 ret
= lib_ring_buffer_get_next_subbuf(buf
);
213 /* Set file position to zero at each successful "get" */
218 case RING_BUFFER_PUT_NEXT_SUBBUF
:
219 lib_ring_buffer_put_next_subbuf(buf
);
221 case RING_BUFFER_GET_SUBBUF_SIZE
:
222 return put_ulong(lib_ring_buffer_get_read_data_size(config
, buf
),
224 case RING_BUFFER_GET_PADDED_SUBBUF_SIZE
:
228 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
229 size
= PAGE_ALIGN(size
);
230 return put_ulong(size
, arg
);
232 case RING_BUFFER_GET_MAX_SUBBUF_SIZE
:
233 return put_ulong(chan
->backend
.subbuf_size
, arg
);
234 case RING_BUFFER_GET_MMAP_LEN
:
236 unsigned long mmap_buf_len
;
238 if (config
->output
!= RING_BUFFER_MMAP
)
240 mmap_buf_len
= chan
->backend
.buf_size
;
241 if (chan
->backend
.extra_reader_sb
)
242 mmap_buf_len
+= chan
->backend
.subbuf_size
;
243 if (mmap_buf_len
> INT_MAX
)
245 return put_ulong(mmap_buf_len
, arg
);
247 case RING_BUFFER_GET_MMAP_READ_OFFSET
:
249 unsigned long sb_bindex
;
251 if (config
->output
!= RING_BUFFER_MMAP
)
253 sb_bindex
= subbuffer_id_get_index(config
,
254 buf
->backend
.buf_rsb
.id
);
255 return put_ulong(buf
->backend
.array
[sb_bindex
]->mmap_offset
,
258 case RING_BUFFER_FLUSH
:
259 lib_ring_buffer_switch_remote(buf
);
261 case RING_BUFFER_FLUSH_EMPTY
:
262 lib_ring_buffer_switch_remote_empty(buf
);
264 case RING_BUFFER_CLEAR
:
265 lib_ring_buffer_clear(buf
);
271 EXPORT_SYMBOL_GPL(lib_ring_buffer_ioctl
);
274 * vfs_lib_ring_buffer_ioctl - control ring buffer reader synchronization
280 * This ioctl implements commands necessary for producer/consumer
281 * and flight recorder reader interaction :
282 * RING_BUFFER_GET_NEXT_SUBBUF
283 * Get the next sub-buffer that can be read. It never blocks.
284 * RING_BUFFER_PUT_NEXT_SUBBUF
285 * Release the currently read sub-buffer.
286 * RING_BUFFER_GET_SUBBUF_SIZE
287 * returns the size of the current sub-buffer.
288 * RING_BUFFER_GET_MAX_SUBBUF_SIZE
289 * returns the maximum size for sub-buffers.
290 * RING_BUFFER_GET_NUM_SUBBUF
291 * returns the number of reader-visible sub-buffers in the per cpu
292 * channel (for mmap).
293 * RING_BUFFER_GET_MMAP_READ_OFFSET
294 * returns the offset of the subbuffer belonging to the reader.
295 * Should only be used for mmap clients.
298 long vfs_lib_ring_buffer_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
300 struct lib_ring_buffer
*buf
= filp
->private_data
;
302 return lib_ring_buffer_ioctl(filp
, cmd
, arg
, buf
);
306 long lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
307 unsigned long arg
, struct lib_ring_buffer
*buf
)
309 struct channel
*chan
= buf
->backend
.chan
;
310 const struct lib_ring_buffer_config
*config
= &chan
->backend
.config
;
312 if (lib_ring_buffer_channel_is_disabled(chan
))
316 case RING_BUFFER_COMPAT_SNAPSHOT
:
317 return lib_ring_buffer_snapshot(buf
, &buf
->cons_snapshot
,
318 &buf
->prod_snapshot
);
319 case RING_BUFFER_COMPAT_SNAPSHOT_SAMPLE_POSITIONS
:
320 return lib_ring_buffer_snapshot_sample_positions(buf
,
321 &buf
->cons_snapshot
, &buf
->prod_snapshot
);
322 case RING_BUFFER_COMPAT_SNAPSHOT_GET_CONSUMED
:
323 return compat_put_ulong(buf
->cons_snapshot
, arg
);
324 case RING_BUFFER_COMPAT_SNAPSHOT_GET_PRODUCED
:
325 return compat_put_ulong(buf
->prod_snapshot
, arg
);
326 case RING_BUFFER_COMPAT_GET_SUBBUF
:
329 unsigned long consume
;
332 ret
= get_user(uconsume
, (__u32 __user
*) arg
);
334 return ret
; /* will return -EFAULT */
335 consume
= buf
->cons_snapshot
;
336 consume
&= ~0xFFFFFFFFL
;
338 ret
= lib_ring_buffer_get_subbuf(buf
, consume
);
340 /* Set file position to zero at each successful "get" */
345 case RING_BUFFER_COMPAT_PUT_SUBBUF
:
346 lib_ring_buffer_put_subbuf(buf
);
349 case RING_BUFFER_COMPAT_GET_NEXT_SUBBUF
:
353 ret
= lib_ring_buffer_get_next_subbuf(buf
);
355 /* Set file position to zero at each successful "get" */
360 case RING_BUFFER_COMPAT_PUT_NEXT_SUBBUF
:
361 lib_ring_buffer_put_next_subbuf(buf
);
363 case RING_BUFFER_COMPAT_GET_SUBBUF_SIZE
:
365 unsigned long data_size
;
367 data_size
= lib_ring_buffer_get_read_data_size(config
, buf
);
368 if (data_size
> UINT_MAX
)
370 return compat_put_ulong(data_size
, arg
);
372 case RING_BUFFER_COMPAT_GET_PADDED_SUBBUF_SIZE
:
376 size
= lib_ring_buffer_get_read_data_size(config
, buf
);
377 size
= PAGE_ALIGN(size
);
380 return compat_put_ulong(size
, arg
);
382 case RING_BUFFER_COMPAT_GET_MAX_SUBBUF_SIZE
:
383 if (chan
->backend
.subbuf_size
> UINT_MAX
)
385 return compat_put_ulong(chan
->backend
.subbuf_size
, arg
);
386 case RING_BUFFER_COMPAT_GET_MMAP_LEN
:
388 unsigned long mmap_buf_len
;
390 if (config
->output
!= RING_BUFFER_MMAP
)
392 mmap_buf_len
= chan
->backend
.buf_size
;
393 if (chan
->backend
.extra_reader_sb
)
394 mmap_buf_len
+= chan
->backend
.subbuf_size
;
395 if (mmap_buf_len
> UINT_MAX
)
397 return compat_put_ulong(mmap_buf_len
, arg
);
399 case RING_BUFFER_COMPAT_GET_MMAP_READ_OFFSET
:
401 unsigned long sb_bindex
, read_offset
;
403 if (config
->output
!= RING_BUFFER_MMAP
)
405 sb_bindex
= subbuffer_id_get_index(config
,
406 buf
->backend
.buf_rsb
.id
);
407 read_offset
= buf
->backend
.array
[sb_bindex
]->mmap_offset
;
408 if (read_offset
> UINT_MAX
)
410 return compat_put_ulong(read_offset
, arg
);
412 case RING_BUFFER_COMPAT_FLUSH
:
413 lib_ring_buffer_switch_remote(buf
);
415 case RING_BUFFER_COMPAT_FLUSH_EMPTY
:
416 lib_ring_buffer_switch_remote_empty(buf
);
418 case RING_BUFFER_COMPAT_CLEAR
:
419 lib_ring_buffer_clear(buf
);
425 EXPORT_SYMBOL_GPL(lib_ring_buffer_compat_ioctl
);
428 long vfs_lib_ring_buffer_compat_ioctl(struct file
*filp
, unsigned int cmd
,
431 struct lib_ring_buffer
*buf
= filp
->private_data
;
433 return lib_ring_buffer_compat_ioctl(filp
, cmd
, arg
, buf
);
437 const struct file_operations lib_ring_buffer_file_operations
= {
438 .owner
= THIS_MODULE
,
439 .open
= vfs_lib_ring_buffer_open
,
440 .release
= vfs_lib_ring_buffer_release
,
441 .poll
= vfs_lib_ring_buffer_poll
,
442 .splice_read
= vfs_lib_ring_buffer_splice_read
,
443 .mmap
= vfs_lib_ring_buffer_mmap
,
444 .unlocked_ioctl
= vfs_lib_ring_buffer_ioctl
,
445 .llseek
= vfs_lib_ring_buffer_no_llseek
,
447 .compat_ioctl
= vfs_lib_ring_buffer_compat_ioctl
,
450 EXPORT_SYMBOL_GPL(lib_ring_buffer_file_operations
);
452 MODULE_LICENSE("GPL and additional rights");
453 MODULE_AUTHOR("Mathieu Desnoyers <mathieu.desnoyers@efficios.com>");
454 MODULE_DESCRIPTION("LTTng ring buffer library");
455 MODULE_VERSION(__stringify(LTTNG_MODULES_MAJOR_VERSION
) "."
456 __stringify(LTTNG_MODULES_MINOR_VERSION
) "."
457 __stringify(LTTNG_MODULES_PATCHLEVEL_VERSION
)
458 LTTNG_MODULES_EXTRAVERSION
);