fix: __lttng_vmalloc_node_range const caller introduced in v3.6
[lttng-modules.git] / include / wrapper / vmalloc.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/vmalloc.h
6d2a620c
MD
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
886d51a3 9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6d2a620c
MD
10 */
11
9f36eaed
MJ
12#ifndef _LTTNG_WRAPPER_VMALLOC_H
13#define _LTTNG_WRAPPER_VMALLOC_H
14
48f5e0b5
MJ
15#include <linux/version.h>
16#include <linux/vmalloc.h>
01ab5113 17#include <linux/mm.h>
48f5e0b5 18
6d2a620c
MD
19#ifdef CONFIG_KALLSYMS
20
21#include <linux/kallsyms.h>
5a2f5e92 22#include <wrapper/kallsyms.h>
2b3dbafc 23#include <lttng/kernel-version.h>
6d2a620c 24
0dcc94fa
MJ
25#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
26
27/*
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
30 */
31static inline
32void wrapper_vmalloc_sync_mappings(void)
33{}
34
35#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
9bfe744a 36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
2e4c781e
SB
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
263b6c88
MD
46
47static inline
48void wrapper_vmalloc_sync_mappings(void)
49{
50 void (*vmalloc_sync_mappings_sym)(void);
51
52 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
53 if (vmalloc_sync_mappings_sym) {
54 vmalloc_sync_mappings_sym();
55 } else {
56#ifdef CONFIG_X86
57 /*
58 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
59 * trigger recursive page faults.
60 */
61 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
62 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
63#endif
64 }
65}
66
3dfec228
MJ
67/*
68 * Canary function to check for 'vmalloc_sync_mappings()' at compile time.
69 *
70 * From 'include/linux/vmalloc.h':
71 *
72 * void vmalloc_sync_mappings(void);
73 */
74static inline
75void __canary__vmalloc_sync_mappings(void)
76{
77 vmalloc_sync_mappings();
78}
79
1d618748 80#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88
MD
81
82/*
3dfec228 83 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6.
263b6c88 84 */
6d2a620c 85static inline
263b6c88 86void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
87{
88 void (*vmalloc_sync_all_sym)(void);
89
c539a324 90 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
6d2a620c
MD
91 if (vmalloc_sync_all_sym) {
92 vmalloc_sync_all_sym();
93 } else {
94#ifdef CONFIG_X86
95 /*
96 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
97 * trigger recursive page faults.
98 */
e36de50d
MD
99 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
100 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
6d2a620c
MD
101#endif
102 }
103}
263b6c88 104
3dfec228
MJ
105/*
106 * Canary function to check for 'vmalloc_sync_all()' at compile time.
107 *
108 * From 'include/linux/vmalloc.h':
109 *
110 * void vmalloc_sync_all(void);
111 */
112static inline
113void __canary__vmalloc_sync_all(void)
114{
115 vmalloc_sync_all();
116}
117
1d618748 118#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88 119
f740341a 120#else /* CONFIG_KALLSYMS */
6d2a620c 121
f740341a
MJ
122#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
123
124/*
125 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
126 * are now synchronized when they are created or torn down.
127 */
128static inline
129void wrapper_vmalloc_sync_mappings(void)
130{}
131
132#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
9bfe744a 133 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
134 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
135 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
136 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
137 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
138 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
2e4c781e
SB
139 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
140 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
141 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
142 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
263b6c88 143
6d2a620c 144static inline
263b6c88
MD
145void wrapper_vmalloc_sync_mappings(void)
146{
147 return vmalloc_sync_mappings();
148}
149
1d618748 150#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88
MD
151
152static inline
153void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
154{
155 return vmalloc_sync_all();
156}
263b6c88 157
1d618748 158#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88 159
6d2a620c 160#endif
b13f3ebe 161
48f5e0b5
MJ
162#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
163static inline
164void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
165{
166 void *ret;
167
168 ret = kvmalloc_node(size, flags, node);
169 if (is_vmalloc_addr(ret)) {
170 /*
171 * Make sure we don't trigger recursive page faults in the
172 * tracing fast path.
173 */
263b6c88 174 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
175 }
176 return ret;
177}
178
179static inline
180void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
181{
182 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
183}
184
185static inline
186void *lttng_kvmalloc(unsigned long size, gfp_t flags)
187{
188 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
189}
190
191static inline
192void *lttng_kvzalloc(unsigned long size, gfp_t flags)
193{
194 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
195}
196
197static inline
198void lttng_kvfree(const void *addr)
199{
200 kvfree(addr);
201}
202
203#else
204
205#include <linux/slab.h>
48f5e0b5 206
20eb87c9
MD
207static inline
208void print_vmalloc_node_range_warning(void)
209{
210 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
211 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
212 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
213}
214
fd094ddf
MJ
215#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,0,0))
216
48f5e0b5
MJ
217/*
218 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
219 */
220static inline
20eb87c9
MD
221void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
222 unsigned long start, unsigned long end, gfp_t gfp_mask,
223 pgprot_t prot, unsigned long vm_flags, int node,
224 const void *caller)
48f5e0b5 225{
48f5e0b5
MJ
226#ifdef CONFIG_KALLSYMS
227 /*
20eb87c9 228 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
48f5e0b5 229 */
20eb87c9
MD
230 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
231 unsigned long start, unsigned long end, gfp_t gfp_mask,
232 pgprot_t prot, unsigned long vm_flags, int node,
233 const void *caller);
234
235 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
236 if (lttng__vmalloc_node_range)
237 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
238 vm_flags, node, caller);
48f5e0b5 239#endif
20eb87c9
MD
240 if (node != NUMA_NO_NODE)
241 print_vmalloc_node_range_warning();
242 return __vmalloc(size, gfp_mask, prot);
48f5e0b5 243}
3dfec228
MJ
244
245/*
246 * Canary function to check for '__vmalloc_node_range()' at compile time.
247 *
248 * From 'include/linux/vmalloc.h':
249 *
250 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
251 * unsigned long start, unsigned long end, gfp_t gfp_mask,
252 * pgprot_t prot, unsigned long vm_flags, int node,
253 * const void *caller);
254 */
255static inline
256void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
257 unsigned long start, unsigned long end, gfp_t gfp_mask,
258 pgprot_t prot, unsigned long vm_flags, int node,
259 const void *caller)
260{
261 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
262 vm_flags, node, caller);
263}
48f5e0b5 264
0c8a119a 265#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
fd094ddf
MJ
266
267/*
268 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
269 */
270static inline
271void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
272 unsigned long start, unsigned long end, gfp_t gfp_mask,
273 pgprot_t prot, unsigned long vm_flags, int node,
274 const void *caller)
275{
276#ifdef CONFIG_KALLSYMS
277 /*
278 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
279 */
280 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
281 unsigned long start, unsigned long end, gfp_t gfp_mask,
282 pgprot_t prot, int node, const void *caller);
283
284 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
285 if (lttng__vmalloc_node_range)
286 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
287 node, caller);
288#endif
289 if (node != NUMA_NO_NODE)
290 print_vmalloc_node_range_warning();
291 return __vmalloc(size, gfp_mask, prot);
292}
293
294/*
295 * Canary function to check for '__vmalloc_node_range()' at compile time.
296 *
297 * From 'include/linux/vmalloc.h':
298 *
299 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
300 * unsigned long start, unsigned long end, gfp_t gfp_mask,
301 * pgprot_t prot, unsigned long vm_flags, int node,
302 * const void *caller);
303 */
304static inline
305void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
306 unsigned long start, unsigned long end, gfp_t gfp_mask,
307 pgprot_t prot, int node, const void *caller)
308{
309 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
310 node, caller);
311}
312
0c8a119a
MJ
313#else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) */
314
315/*
316 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
317 */
318static inline
319void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
320 unsigned long start, unsigned long end, gfp_t gfp_mask,
321 pgprot_t prot, unsigned long vm_flags, int node,
322 void *caller)
323{
324#ifdef CONFIG_KALLSYMS
325 /*
326 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
327 */
328 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
329 unsigned long start, unsigned long end, gfp_t gfp_mask,
330 pgprot_t prot, int node, void *caller);
331
332 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
333 if (lttng__vmalloc_node_range)
334 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
335 node, caller);
336#endif
337 if (node != NUMA_NO_NODE)
338 print_vmalloc_node_range_warning();
339 return __vmalloc(size, gfp_mask, prot);
340}
341
342/*
343 * Canary function to check for '__vmalloc_node_range()' at compile time.
344 *
345 * From 'include/linux/vmalloc.h':
346 *
347 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
348 * unsigned long start, unsigned long end, gfp_t gfp_mask,
349 * pgprot_t prot, unsigned long vm_flags, int node,
350 * void *caller);
351 */
352static inline
353void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
354 unsigned long start, unsigned long end, gfp_t gfp_mask,
355 pgprot_t prot, int node, void *caller)
356{
357 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
358 node, caller);
359}
360
fd094ddf
MJ
361#endif
362
48f5e0b5
MJ
363/**
364 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
365 * failure, fall back to non-contiguous (vmalloc) allocation.
366 * @size: size of the request.
367 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
368 *
369 * Uses kmalloc to get the memory but if the allocation fails then falls back
370 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
371 *
372 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
373 */
374static inline
375void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
376{
377 void *ret;
378
379 /*
380 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
381 * so the given set of flags has to be compatible.
382 */
383 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
384
385 /*
386 * If the allocation fits in a single page, do not fallback.
387 */
388 if (size <= PAGE_SIZE) {
389 return kmalloc_node(size, flags, node);
390 }
391
392 /*
393 * Make sure that larger requests are not too disruptive - no OOM
394 * killer and no allocation failure warnings as we have a fallback
395 */
396 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
397 if (!ret) {
20eb87c9
MD
398 ret = __lttng_vmalloc_node_range(size, 1,
399 VMALLOC_START, VMALLOC_END,
400 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
401 node, __builtin_return_address(0));
48f5e0b5
MJ
402 /*
403 * Make sure we don't trigger recursive page faults in the
404 * tracing fast path.
405 */
f3e4ba5d 406 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
407 }
408 return ret;
409}
410
411static inline
412void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
413{
414 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
415}
416
417static inline
418void *lttng_kvmalloc(unsigned long size, gfp_t flags)
419{
420 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
421}
422
423static inline
424void *lttng_kvzalloc(unsigned long size, gfp_t flags)
425{
426 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
427}
428
429static inline
430void lttng_kvfree(const void *addr)
431{
432 if (is_vmalloc_addr(addr)) {
433 vfree(addr);
434 } else {
435 kfree(addr);
436 }
437}
438#endif
439
a90917c3 440#endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.057812 seconds and 4 git commands to generate.