Cleanup: Move lib/ringbuffer/ headers to include/ringbuffer/
[lttng-modules.git] / lib / ringbuffer / ring_buffer_mmap.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: GPL-2.0-only
9f36eaed 2 *
f3bc08c5
MD
3 * ring_buffer_mmap.c
4 *
5 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
6 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
886d51a3 7 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
f3bc08c5 8 *
b7cdc182 9 * Re-using code from kernel/relay.c, hence the GPL-2.0-only license for this
886d51a3 10 * file.
f3bc08c5
MD
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15
24591303
MD
16#include <ringbuffer/backend.h>
17#include <ringbuffer/frontend.h>
18#include <ringbuffer/vfs.h>
f3bc08c5
MD
19
20/*
21 * fault() vm_op implementation for ring buffer file mapping.
22 */
2ca0c84f
MJ
23#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
24static vm_fault_t lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
25#else
52a391bd 26static int lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
2ca0c84f 27#endif
f3bc08c5
MD
28{
29 struct lib_ring_buffer *buf = vma->vm_private_data;
30 struct channel *chan = buf->backend.chan;
5a8fd222 31 const struct lib_ring_buffer_config *config = &chan->backend.config;
f3bc08c5 32 pgoff_t pgoff = vmf->pgoff;
0112cb7b 33 unsigned long *pfnp;
f3bc08c5
MD
34 void **virt;
35 unsigned long offset, sb_bindex;
36
f3bc08c5
MD
37 /*
38 * Verify that faults are only done on the range of pages owned by the
39 * reader.
40 */
41 offset = pgoff << PAGE_SHIFT;
42 sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
43 if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
44 && offset < buf->backend.array[sb_bindex]->mmap_offset +
45 buf->backend.chan->backend.subbuf_size))
46 return VM_FAULT_SIGBUS;
47 /*
0112cb7b
MD
48 * ring_buffer_read_get_pfn() gets the page frame number for the
49 * current reader's pages.
f3bc08c5 50 */
0112cb7b
MD
51 pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, offset, &virt);
52 if (!*pfnp)
f3bc08c5 53 return VM_FAULT_SIGBUS;
0112cb7b
MD
54 get_page(pfn_to_page(*pfnp));
55 vmf->page = pfn_to_page(*pfnp);
f3bc08c5
MD
56
57 return 0;
58}
59
2ca0c84f
MJ
60#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
61static vm_fault_t lib_ring_buffer_fault(struct vm_fault *vmf)
62{
63 struct vm_area_struct *vma = vmf->vma;
64 return lib_ring_buffer_fault_compat(vma, vmf);
65}
66#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
52a391bd
FD
67static int lib_ring_buffer_fault(struct vm_fault *vmf)
68{
69 struct vm_area_struct *vma = vmf->vma;
70 return lib_ring_buffer_fault_compat(vma, vmf);
71}
72#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
73static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
74{
75 return lib_ring_buffer_fault_compat(vma, vmf);
76}
77#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
78
f3bc08c5
MD
79/*
80 * vm_ops for ring buffer file mappings.
81 */
82static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
83 .fault = lib_ring_buffer_fault,
84};
85
86/**
87 * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
88 * @buf: ring buffer to map
89 * @vma: vm_area_struct describing memory to be mapped
90 *
91 * Returns 0 if ok, negative on error
92 *
93 * Caller should already have grabbed mmap_sem.
94 */
95static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
96 struct vm_area_struct *vma)
97{
98 unsigned long length = vma->vm_end - vma->vm_start;
99 struct channel *chan = buf->backend.chan;
5a8fd222 100 const struct lib_ring_buffer_config *config = &chan->backend.config;
f3bc08c5
MD
101 unsigned long mmap_buf_len;
102
103 if (config->output != RING_BUFFER_MMAP)
104 return -EINVAL;
105
f3bc08c5
MD
106 mmap_buf_len = chan->backend.buf_size;
107 if (chan->backend.extra_reader_sb)
108 mmap_buf_len += chan->backend.subbuf_size;
109
110 if (length != mmap_buf_len)
111 return -EINVAL;
112
113 vma->vm_ops = &lib_ring_buffer_mmap_ops;
114 vma->vm_flags |= VM_DONTEXPAND;
115 vma->vm_private_data = buf;
116
117 return 0;
118}
119
d83004aa
JD
120int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
121 struct lib_ring_buffer *buf)
122{
123 return lib_ring_buffer_mmap_buf(buf, vma);
124}
125EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
126
f3bc08c5 127/**
d83004aa 128 * vfs_lib_ring_buffer_mmap - mmap file op
f3bc08c5
MD
129 * @filp: the file
130 * @vma: the vma describing what to map
131 *
132 * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
133 */
d83004aa 134int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
f3bc08c5
MD
135{
136 struct lib_ring_buffer *buf = filp->private_data;
d83004aa 137 return lib_ring_buffer_mmap(filp, vma, buf);
f3bc08c5 138}
d83004aa 139EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
This page took 0.040199 seconds and 4 git commands to generate.