Fix: mm: create the new vm_fault_t type (v5.1)
[lttng-modules.git] / lib / ringbuffer / ring_buffer_mmap.c
1 /*
2 * ring_buffer_mmap.c
3 *
4 * Copyright (C) 2002-2005 - Tom Zanussi <zanussi@us.ibm.com>, IBM Corp
5 * Copyright (C) 1999-2005 - Karim Yaghmour <karim@opersys.com>
6 * Copyright (C) 2008-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; only version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Re-using code from kernel/relay.c, hence the GPLv2 license for this
22 * file.
23 */
24
25 #include <linux/module.h>
26 #include <linux/mm.h>
27
28 #include <wrapper/ringbuffer/backend.h>
29 #include <wrapper/ringbuffer/frontend.h>
30 #include <wrapper/ringbuffer/vfs.h>
31
32 /*
33 * fault() vm_op implementation for ring buffer file mapping.
34 */
35 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
36 static vm_fault_t lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
37 #else
38 static int lib_ring_buffer_fault_compat(struct vm_area_struct *vma, struct vm_fault *vmf)
39 #endif
40 {
41 struct lib_ring_buffer *buf = vma->vm_private_data;
42 struct channel *chan = buf->backend.chan;
43 const struct lib_ring_buffer_config *config = &chan->backend.config;
44 pgoff_t pgoff = vmf->pgoff;
45 unsigned long *pfnp;
46 void **virt;
47 unsigned long offset, sb_bindex;
48
49 /*
50 * Verify that faults are only done on the range of pages owned by the
51 * reader.
52 */
53 offset = pgoff << PAGE_SHIFT;
54 sb_bindex = subbuffer_id_get_index(config, buf->backend.buf_rsb.id);
55 if (!(offset >= buf->backend.array[sb_bindex]->mmap_offset
56 && offset < buf->backend.array[sb_bindex]->mmap_offset +
57 buf->backend.chan->backend.subbuf_size))
58 return VM_FAULT_SIGBUS;
59 /*
60 * ring_buffer_read_get_pfn() gets the page frame number for the
61 * current reader's pages.
62 */
63 pfnp = lib_ring_buffer_read_get_pfn(&buf->backend, offset, &virt);
64 if (!*pfnp)
65 return VM_FAULT_SIGBUS;
66 get_page(pfn_to_page(*pfnp));
67 vmf->page = pfn_to_page(*pfnp);
68
69 return 0;
70 }
71
72 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0))
73 static vm_fault_t lib_ring_buffer_fault(struct vm_fault *vmf)
74 {
75 struct vm_area_struct *vma = vmf->vma;
76 return lib_ring_buffer_fault_compat(vma, vmf);
77 }
78 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
79 static int lib_ring_buffer_fault(struct vm_fault *vmf)
80 {
81 struct vm_area_struct *vma = vmf->vma;
82 return lib_ring_buffer_fault_compat(vma, vmf);
83 }
84 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
85 static int lib_ring_buffer_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
86 {
87 return lib_ring_buffer_fault_compat(vma, vmf);
88 }
89 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
90
91 /*
92 * vm_ops for ring buffer file mappings.
93 */
94 static const struct vm_operations_struct lib_ring_buffer_mmap_ops = {
95 .fault = lib_ring_buffer_fault,
96 };
97
98 /**
99 * lib_ring_buffer_mmap_buf: - mmap channel buffer to process address space
100 * @buf: ring buffer to map
101 * @vma: vm_area_struct describing memory to be mapped
102 *
103 * Returns 0 if ok, negative on error
104 *
105 * Caller should already have grabbed mmap_sem.
106 */
107 static int lib_ring_buffer_mmap_buf(struct lib_ring_buffer *buf,
108 struct vm_area_struct *vma)
109 {
110 unsigned long length = vma->vm_end - vma->vm_start;
111 struct channel *chan = buf->backend.chan;
112 const struct lib_ring_buffer_config *config = &chan->backend.config;
113 unsigned long mmap_buf_len;
114
115 if (config->output != RING_BUFFER_MMAP)
116 return -EINVAL;
117
118 mmap_buf_len = chan->backend.buf_size;
119 if (chan->backend.extra_reader_sb)
120 mmap_buf_len += chan->backend.subbuf_size;
121
122 if (length != mmap_buf_len)
123 return -EINVAL;
124
125 vma->vm_ops = &lib_ring_buffer_mmap_ops;
126 vma->vm_flags |= VM_DONTEXPAND;
127 vma->vm_private_data = buf;
128
129 return 0;
130 }
131
132 int lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma,
133 struct lib_ring_buffer *buf)
134 {
135 return lib_ring_buffer_mmap_buf(buf, vma);
136 }
137 EXPORT_SYMBOL_GPL(lib_ring_buffer_mmap);
138
139 /**
140 * vfs_lib_ring_buffer_mmap - mmap file op
141 * @filp: the file
142 * @vma: the vma describing what to map
143 *
144 * Calls upon lib_ring_buffer_mmap_buf() to map the file into user space.
145 */
146 int vfs_lib_ring_buffer_mmap(struct file *filp, struct vm_area_struct *vma)
147 {
148 struct lib_ring_buffer *buf = filp->private_data;
149 return lib_ring_buffer_mmap(filp, vma, buf);
150 }
151 EXPORT_SYMBOL_GPL(vfs_lib_ring_buffer_mmap);
This page took 0.032576 seconds and 4 git commands to generate.