f742f48f9b44767119045427ff07a28357af6cba
[lttng-modules.git] / include / wrapper / vmalloc.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/vmalloc.h
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 */
11
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
14
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mm.h>
18
19 #ifdef CONFIG_KALLSYMS
20
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng/kernel-version.h>
24
25 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
26
27 /*
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
30 */
31 static inline
32 void wrapper_vmalloc_sync_mappings(void)
33 {}
34
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
46
47 static inline
48 void wrapper_vmalloc_sync_mappings(void)
49 {
50 void (*vmalloc_sync_mappings_sym)(void);
51
52 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
53 if (vmalloc_sync_mappings_sym) {
54 vmalloc_sync_mappings_sym();
55 } else {
56 #ifdef CONFIG_X86
57 /*
58 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
59 * trigger recursive page faults.
60 */
61 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
62 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
63 #endif
64 }
65 }
66
67 /*
68 * Canary function to check for 'vmalloc_sync_mappings()' at compile time.
69 *
70 * From 'include/linux/vmalloc.h':
71 *
72 * void vmalloc_sync_mappings(void);
73 */
74 static inline
75 void __canary__vmalloc_sync_mappings(void)
76 {
77 vmalloc_sync_mappings();
78 }
79
80 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
81
82 /*
83 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6.
84 */
85 static inline
86 void wrapper_vmalloc_sync_mappings(void)
87 {
88 void (*vmalloc_sync_all_sym)(void);
89
90 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
91 if (vmalloc_sync_all_sym) {
92 vmalloc_sync_all_sym();
93 } else {
94 #ifdef CONFIG_X86
95 /*
96 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
97 * trigger recursive page faults.
98 */
99 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
100 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
101 #endif
102 }
103 }
104
105 /*
106 * Canary function to check for 'vmalloc_sync_all()' at compile time.
107 *
108 * From 'include/linux/vmalloc.h':
109 *
110 * void vmalloc_sync_all(void);
111 */
112 static inline
113 void __canary__vmalloc_sync_all(void)
114 {
115 vmalloc_sync_all();
116 }
117
118 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
119
120 #else /* CONFIG_KALLSYMS */
121
122 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
123
124 /*
125 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
126 * are now synchronized when they are created or torn down.
127 */
128 static inline
129 void wrapper_vmalloc_sync_mappings(void)
130 {}
131
132 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
133 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
134 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
135 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
136 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
137 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
138 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
139 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
140 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
141 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
142 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
143
144 static inline
145 void wrapper_vmalloc_sync_mappings(void)
146 {
147 return vmalloc_sync_mappings();
148 }
149
150 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
151
152 static inline
153 void wrapper_vmalloc_sync_mappings(void)
154 {
155 return vmalloc_sync_all();
156 }
157
158 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
159
160 #endif
161
162 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
163 static inline
164 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
165 {
166 void *ret;
167
168 ret = kvmalloc_node(size, flags, node);
169 if (is_vmalloc_addr(ret)) {
170 /*
171 * Make sure we don't trigger recursive page faults in the
172 * tracing fast path.
173 */
174 wrapper_vmalloc_sync_mappings();
175 }
176 return ret;
177 }
178
179 static inline
180 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
181 {
182 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
183 }
184
185 static inline
186 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
187 {
188 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
189 }
190
191 static inline
192 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
193 {
194 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
195 }
196
197 static inline
198 void lttng_kvfree(const void *addr)
199 {
200 kvfree(addr);
201 }
202
203 #else
204
205 #include <linux/slab.h>
206
207 static inline
208 void print_vmalloc_node_range_warning(void)
209 {
210 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
211 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
212 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
213 }
214
215 /*
216 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
217 */
218 static inline
219 void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
220 unsigned long start, unsigned long end, gfp_t gfp_mask,
221 pgprot_t prot, unsigned long vm_flags, int node,
222 const void *caller)
223 {
224 #ifdef CONFIG_KALLSYMS
225 /*
226 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
227 */
228 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
229 unsigned long start, unsigned long end, gfp_t gfp_mask,
230 pgprot_t prot, unsigned long vm_flags, int node,
231 const void *caller);
232
233 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
234 if (lttng__vmalloc_node_range)
235 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
236 vm_flags, node, caller);
237 #endif
238 if (node != NUMA_NO_NODE)
239 print_vmalloc_node_range_warning();
240 return __vmalloc(size, gfp_mask, prot);
241 }
242
243 /*
244 * Canary function to check for '__vmalloc_node_range()' at compile time.
245 *
246 * From 'include/linux/vmalloc.h':
247 *
248 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
249 * unsigned long start, unsigned long end, gfp_t gfp_mask,
250 * pgprot_t prot, unsigned long vm_flags, int node,
251 * const void *caller);
252 */
253 static inline
254 void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
255 unsigned long start, unsigned long end, gfp_t gfp_mask,
256 pgprot_t prot, unsigned long vm_flags, int node,
257 const void *caller)
258 {
259 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
260 vm_flags, node, caller);
261 }
262
263 /**
264 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
265 * failure, fall back to non-contiguous (vmalloc) allocation.
266 * @size: size of the request.
267 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
268 *
269 * Uses kmalloc to get the memory but if the allocation fails then falls back
270 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
271 *
272 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
273 */
274 static inline
275 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
276 {
277 void *ret;
278
279 /*
280 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
281 * so the given set of flags has to be compatible.
282 */
283 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
284
285 /*
286 * If the allocation fits in a single page, do not fallback.
287 */
288 if (size <= PAGE_SIZE) {
289 return kmalloc_node(size, flags, node);
290 }
291
292 /*
293 * Make sure that larger requests are not too disruptive - no OOM
294 * killer and no allocation failure warnings as we have a fallback
295 */
296 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
297 if (!ret) {
298 ret = __lttng_vmalloc_node_range(size, 1,
299 VMALLOC_START, VMALLOC_END,
300 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
301 node, __builtin_return_address(0));
302 /*
303 * Make sure we don't trigger recursive page faults in the
304 * tracing fast path.
305 */
306 wrapper_vmalloc_sync_mappings();
307 }
308 return ret;
309 }
310
311 static inline
312 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
313 {
314 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
315 }
316
317 static inline
318 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
319 {
320 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
321 }
322
323 static inline
324 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
325 {
326 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
327 }
328
329 static inline
330 void lttng_kvfree(const void *addr)
331 {
332 if (is_vmalloc_addr(addr)) {
333 vfree(addr);
334 } else {
335 kfree(addr);
336 }
337 }
338 #endif
339
340 #endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.039542 seconds and 3 git commands to generate.