Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 | 3 | * wrapper/vmalloc.h |
6d2a620c MD |
4 | * |
5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
6 | * available, else we need to have a kernel that exports this function to GPL | |
7 | * modules. | |
8 | * | |
886d51a3 | 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
6d2a620c MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
13 | #define _LTTNG_WRAPPER_VMALLOC_H | |
14 | ||
48f5e0b5 MJ |
15 | #include <linux/version.h> |
16 | #include <linux/vmalloc.h> | |
01ab5113 | 17 | #include <linux/mm.h> |
48f5e0b5 | 18 | |
6d2a620c MD |
19 | #ifdef CONFIG_KALLSYMS |
20 | ||
21 | #include <linux/kallsyms.h> | |
5a2f5e92 | 22 | #include <wrapper/kallsyms.h> |
5939591f | 23 | #include <lttng-kernel-version.h> |
6d2a620c | 24 | |
1bf0964a MJ |
25 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) |
26 | ||
27 | /* | |
28 | * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings | |
29 | * are now synchronized when they are created or torn down. | |
30 | */ | |
31 | static inline | |
32 | void wrapper_vmalloc_sync_mappings(void) | |
33 | {} | |
34 | ||
35 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ | |
3dd99395 | 36 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
5939591f OP |
37 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
38 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
39 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
40 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
41 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
4ad90183 SB |
42 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
43 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \ | |
44 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ | |
45 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) | |
da0fcb14 MD |
46 | |
47 | static inline | |
48 | void wrapper_vmalloc_sync_mappings(void) | |
49 | { | |
50 | void (*vmalloc_sync_mappings_sym)(void); | |
51 | ||
52 | vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); | |
53 | if (vmalloc_sync_mappings_sym) { | |
54 | vmalloc_sync_mappings_sym(); | |
55 | } else { | |
56 | #ifdef CONFIG_X86 | |
57 | /* | |
58 | * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not | |
59 | * trigger recursive page faults. | |
60 | */ | |
61 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); | |
62 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
63 | #endif | |
64 | } | |
65 | } | |
66 | ||
9e52289b MJ |
67 | /* |
68 | * Canary function to check for 'vmalloc_sync_mappings()' at compile time. | |
69 | * | |
70 | * From 'include/linux/vmalloc.h': | |
71 | * | |
72 | * void vmalloc_sync_mappings(void); | |
73 | */ | |
74 | static inline | |
75 | void __canary__vmalloc_sync_mappings(void) | |
76 | { | |
77 | vmalloc_sync_mappings(); | |
78 | } | |
79 | ||
da356b32 | 80 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
81 | |
82 | /* | |
9e52289b | 83 | * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6. |
da0fcb14 | 84 | */ |
6d2a620c | 85 | static inline |
da0fcb14 | 86 | void wrapper_vmalloc_sync_mappings(void) |
6d2a620c MD |
87 | { |
88 | void (*vmalloc_sync_all_sym)(void); | |
89 | ||
c539a324 | 90 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
91 | if (vmalloc_sync_all_sym) { |
92 | vmalloc_sync_all_sym(); | |
93 | } else { | |
94 | #ifdef CONFIG_X86 | |
95 | /* | |
96 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
97 | * trigger recursive page faults. | |
98 | */ | |
e36de50d MD |
99 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
100 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
101 | #endif |
102 | } | |
103 | } | |
da0fcb14 | 104 | |
9e52289b MJ |
105 | /* |
106 | * Canary function to check for 'vmalloc_sync_all()' at compile time. | |
107 | * | |
108 | * From 'include/linux/vmalloc.h': | |
109 | * | |
110 | * void vmalloc_sync_all(void); | |
111 | */ | |
112 | static inline | |
113 | void __canary__vmalloc_sync_all(void) | |
114 | { | |
115 | vmalloc_sync_all(); | |
116 | } | |
117 | ||
da356b32 | 118 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 119 | |
6d2a620c MD |
120 | #else |
121 | ||
5939591f | 122 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \ |
3dd99395 | 123 | || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ |
5939591f OP |
124 | || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ |
125 | || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ | |
126 | || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ | |
127 | || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ | |
128 | || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ | |
4ad90183 SB |
129 | || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ |
130 | || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \ | |
131 | || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ | |
132 | || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) | |
da0fcb14 | 133 | |
6d2a620c | 134 | static inline |
da0fcb14 MD |
135 | void wrapper_vmalloc_sync_mappings(void) |
136 | { | |
137 | return vmalloc_sync_mappings(); | |
138 | } | |
139 | ||
da356b32 | 140 | #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 MD |
141 | |
142 | static inline | |
143 | void wrapper_vmalloc_sync_mappings(void) | |
6d2a620c MD |
144 | { |
145 | return vmalloc_sync_all(); | |
146 | } | |
da0fcb14 | 147 | |
da356b32 | 148 | #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */ |
da0fcb14 | 149 | |
6d2a620c | 150 | #endif |
b13f3ebe | 151 | |
48f5e0b5 MJ |
152 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
153 | static inline | |
154 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
155 | { | |
156 | void *ret; | |
157 | ||
158 | ret = kvmalloc_node(size, flags, node); | |
159 | if (is_vmalloc_addr(ret)) { | |
160 | /* | |
161 | * Make sure we don't trigger recursive page faults in the | |
162 | * tracing fast path. | |
163 | */ | |
da0fcb14 | 164 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
165 | } |
166 | return ret; | |
167 | } | |
168 | ||
169 | static inline | |
170 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
171 | { | |
172 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
173 | } | |
174 | ||
175 | static inline | |
176 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
177 | { | |
178 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
179 | } | |
180 | ||
181 | static inline | |
182 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
183 | { | |
184 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
185 | } | |
186 | ||
187 | static inline | |
188 | void lttng_kvfree(const void *addr) | |
189 | { | |
190 | kvfree(addr); | |
191 | } | |
192 | ||
193 | #else | |
194 | ||
195 | #include <linux/slab.h> | |
48f5e0b5 | 196 | |
20eb87c9 MD |
197 | static inline |
198 | void print_vmalloc_node_range_warning(void) | |
199 | { | |
200 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); | |
201 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); | |
202 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); | |
203 | } | |
204 | ||
48f5e0b5 MJ |
205 | /* |
206 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
207 | */ | |
208 | static inline | |
20eb87c9 MD |
209 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
210 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
211 | pgprot_t prot, unsigned long vm_flags, int node, | |
212 | const void *caller) | |
48f5e0b5 | 213 | { |
48f5e0b5 MJ |
214 | #ifdef CONFIG_KALLSYMS |
215 | /* | |
20eb87c9 | 216 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
48f5e0b5 | 217 | */ |
20eb87c9 MD |
218 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
219 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
220 | pgprot_t prot, unsigned long vm_flags, int node, | |
221 | const void *caller); | |
222 | ||
223 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
224 | if (lttng__vmalloc_node_range) | |
225 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
226 | vm_flags, node, caller); | |
48f5e0b5 | 227 | #endif |
20eb87c9 MD |
228 | if (node != NUMA_NO_NODE) |
229 | print_vmalloc_node_range_warning(); | |
230 | return __vmalloc(size, gfp_mask, prot); | |
48f5e0b5 | 231 | } |
9e52289b MJ |
232 | |
233 | /* | |
234 | * Canary function to check for '__vmalloc_node_range()' at compile time. | |
235 | * | |
236 | * From 'include/linux/vmalloc.h': | |
237 | * | |
238 | * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, | |
239 | * unsigned long start, unsigned long end, gfp_t gfp_mask, | |
240 | * pgprot_t prot, unsigned long vm_flags, int node, | |
241 | * const void *caller); | |
242 | */ | |
243 | static inline | |
244 | void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, | |
245 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
246 | pgprot_t prot, unsigned long vm_flags, int node, | |
247 | const void *caller) | |
248 | { | |
249 | return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
250 | vm_flags, node, caller); | |
251 | } | |
48f5e0b5 MJ |
252 | |
253 | /** | |
254 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
255 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
256 | * @size: size of the request. | |
257 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
258 | * | |
259 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
260 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
261 | * | |
262 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
263 | */ | |
264 | static inline | |
265 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
266 | { | |
267 | void *ret; | |
268 | ||
269 | /* | |
270 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
271 | * so the given set of flags has to be compatible. | |
272 | */ | |
273 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
274 | ||
275 | /* | |
276 | * If the allocation fits in a single page, do not fallback. | |
277 | */ | |
278 | if (size <= PAGE_SIZE) { | |
279 | return kmalloc_node(size, flags, node); | |
280 | } | |
281 | ||
282 | /* | |
283 | * Make sure that larger requests are not too disruptive - no OOM | |
284 | * killer and no allocation failure warnings as we have a fallback | |
285 | */ | |
286 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
287 | if (!ret) { | |
20eb87c9 MD |
288 | ret = __lttng_vmalloc_node_range(size, 1, |
289 | VMALLOC_START, VMALLOC_END, | |
290 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, | |
291 | node, __builtin_return_address(0)); | |
48f5e0b5 MJ |
292 | /* |
293 | * Make sure we don't trigger recursive page faults in the | |
294 | * tracing fast path. | |
295 | */ | |
c1ba644f | 296 | wrapper_vmalloc_sync_mappings(); |
48f5e0b5 MJ |
297 | } |
298 | return ret; | |
299 | } | |
300 | ||
301 | static inline | |
302 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
303 | { | |
304 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
305 | } | |
306 | ||
307 | static inline | |
308 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
309 | { | |
310 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
311 | } | |
312 | ||
313 | static inline | |
314 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
315 | { | |
316 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
317 | } | |
318 | ||
319 | static inline | |
320 | void lttng_kvfree(const void *addr) | |
321 | { | |
322 | if (is_vmalloc_addr(addr)) { | |
323 | vfree(addr); | |
324 | } else { | |
325 | kfree(addr); | |
326 | } | |
327 | } | |
328 | #endif | |
329 | ||
a90917c3 | 330 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |