ff279bc1b7042fdb68abccf3844aa1010d2a215c
[lttng-modules.git] / include / wrapper / vmalloc.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/vmalloc.h
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 */
11
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
14
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mm.h>
18
19 #ifdef CONFIG_KALLSYMS
20
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng/kernel-version.h>
24
25 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
26
27 /*
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
30 */
31 static inline
32 void wrapper_vmalloc_sync_mappings(void)
33 {}
34
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
46
47 static inline
48 void wrapper_vmalloc_sync_mappings(void)
49 {
50 void (*vmalloc_sync_mappings_sym)(void);
51
52 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
53 if (vmalloc_sync_mappings_sym) {
54 vmalloc_sync_mappings_sym();
55 } else {
56 #ifdef CONFIG_X86
57 /*
58 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
59 * trigger recursive page faults.
60 */
61 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
62 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
63 #endif
64 }
65 }
66
67 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
68
69 /*
70 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
71 */
72 static inline
73 void wrapper_vmalloc_sync_mappings(void)
74 {
75 void (*vmalloc_sync_all_sym)(void);
76
77 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
78 if (vmalloc_sync_all_sym) {
79 vmalloc_sync_all_sym();
80 } else {
81 #ifdef CONFIG_X86
82 /*
83 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
84 * trigger recursive page faults.
85 */
86 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
87 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
88 #endif
89 }
90 }
91
92 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
93
94 #else
95
96 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
97 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
98 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
99 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
100 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
101 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
102 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
103 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
104 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
105 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
106 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
107
108 static inline
109 void wrapper_vmalloc_sync_mappings(void)
110 {
111 return vmalloc_sync_mappings();
112 }
113
114 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
115
116 static inline
117 void wrapper_vmalloc_sync_mappings(void)
118 {
119 return vmalloc_sync_all();
120 }
121
122 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
123
124 #endif
125
126 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
127 static inline
128 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
129 {
130 void *ret;
131
132 ret = kvmalloc_node(size, flags, node);
133 if (is_vmalloc_addr(ret)) {
134 /*
135 * Make sure we don't trigger recursive page faults in the
136 * tracing fast path.
137 */
138 wrapper_vmalloc_sync_mappings();
139 }
140 return ret;
141 }
142
143 static inline
144 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
145 {
146 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
147 }
148
149 static inline
150 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
151 {
152 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
153 }
154
155 static inline
156 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
157 {
158 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
159 }
160
161 static inline
162 void lttng_kvfree(const void *addr)
163 {
164 kvfree(addr);
165 }
166
167 #else
168
169 #include <linux/slab.h>
170
171 static inline
172 void print_vmalloc_node_range_warning(void)
173 {
174 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
175 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
176 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
177 }
178
179 /*
180 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
181 */
182 static inline
183 void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
184 unsigned long start, unsigned long end, gfp_t gfp_mask,
185 pgprot_t prot, unsigned long vm_flags, int node,
186 const void *caller)
187 {
188 #ifdef CONFIG_KALLSYMS
189 /*
190 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
191 */
192 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
193 unsigned long start, unsigned long end, gfp_t gfp_mask,
194 pgprot_t prot, unsigned long vm_flags, int node,
195 const void *caller);
196
197 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
198 if (lttng__vmalloc_node_range)
199 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
200 vm_flags, node, caller);
201 #endif
202 if (node != NUMA_NO_NODE)
203 print_vmalloc_node_range_warning();
204 return __vmalloc(size, gfp_mask, prot);
205 }
206
207 /**
208 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
209 * failure, fall back to non-contiguous (vmalloc) allocation.
210 * @size: size of the request.
211 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
212 *
213 * Uses kmalloc to get the memory but if the allocation fails then falls back
214 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
215 *
216 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
217 */
218 static inline
219 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
220 {
221 void *ret;
222
223 /*
224 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
225 * so the given set of flags has to be compatible.
226 */
227 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
228
229 /*
230 * If the allocation fits in a single page, do not fallback.
231 */
232 if (size <= PAGE_SIZE) {
233 return kmalloc_node(size, flags, node);
234 }
235
236 /*
237 * Make sure that larger requests are not too disruptive - no OOM
238 * killer and no allocation failure warnings as we have a fallback
239 */
240 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
241 if (!ret) {
242 ret = __lttng_vmalloc_node_range(size, 1,
243 VMALLOC_START, VMALLOC_END,
244 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
245 node, __builtin_return_address(0));
246 /*
247 * Make sure we don't trigger recursive page faults in the
248 * tracing fast path.
249 */
250 wrapper_vmalloc_sync_mappings();
251 }
252 return ret;
253 }
254
255 static inline
256 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
257 {
258 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
259 }
260
261 static inline
262 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
263 {
264 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
265 }
266
267 static inline
268 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
269 {
270 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
271 }
272
273 static inline
274 void lttng_kvfree(const void *addr)
275 {
276 if (is_vmalloc_addr(addr)) {
277 vfree(addr);
278 } else {
279 kfree(addr);
280 }
281 }
282 #endif
283
284 #endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.034267 seconds and 3 git commands to generate.