Detect missing symbols used with kallsyms_lookup at compile time
[lttng-modules.git] / include / wrapper / vmalloc.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/vmalloc.h
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 */
11
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
14
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mm.h>
18
19 #ifdef CONFIG_KALLSYMS
20
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng/kernel-version.h>
24
25 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0))
26
27 /*
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
30 */
31 static inline
32 void wrapper_vmalloc_sync_mappings(void)
33 {}
34
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
46
47 static inline
48 void wrapper_vmalloc_sync_mappings(void)
49 {
50 void (*vmalloc_sync_mappings_sym)(void);
51
52 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
53 if (vmalloc_sync_mappings_sym) {
54 vmalloc_sync_mappings_sym();
55 } else {
56 #ifdef CONFIG_X86
57 /*
58 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
59 * trigger recursive page faults.
60 */
61 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
62 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
63 #endif
64 }
65 }
66
67 /*
68 * Canary function to check for 'vmalloc_sync_mappings()' at compile time.
69 *
70 * From 'include/linux/vmalloc.h':
71 *
72 * void vmalloc_sync_mappings(void);
73 */
74 static inline
75 void __canary__vmalloc_sync_mappings(void)
76 {
77 vmalloc_sync_mappings();
78 }
79
80 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
81
82 /*
83 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6.
84 */
85 static inline
86 void wrapper_vmalloc_sync_mappings(void)
87 {
88 void (*vmalloc_sync_all_sym)(void);
89
90 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
91 if (vmalloc_sync_all_sym) {
92 vmalloc_sync_all_sym();
93 } else {
94 #ifdef CONFIG_X86
95 /*
96 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
97 * trigger recursive page faults.
98 */
99 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
100 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
101 #endif
102 }
103 }
104
105 /*
106 * Canary function to check for 'vmalloc_sync_all()' at compile time.
107 *
108 * From 'include/linux/vmalloc.h':
109 *
110 * void vmalloc_sync_all(void);
111 */
112 static inline
113 void __canary__vmalloc_sync_all(void)
114 {
115 vmalloc_sync_all();
116 }
117
118 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
119
120 #else
121
122 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
123 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
124 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
125 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
126 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
127 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
128 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
129 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
130 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
131 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
132 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
133
134 static inline
135 void wrapper_vmalloc_sync_mappings(void)
136 {
137 return vmalloc_sync_mappings();
138 }
139
140 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
141
142 static inline
143 void wrapper_vmalloc_sync_mappings(void)
144 {
145 return vmalloc_sync_all();
146 }
147
148 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
149
150 #endif
151
152 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
153 static inline
154 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
155 {
156 void *ret;
157
158 ret = kvmalloc_node(size, flags, node);
159 if (is_vmalloc_addr(ret)) {
160 /*
161 * Make sure we don't trigger recursive page faults in the
162 * tracing fast path.
163 */
164 wrapper_vmalloc_sync_mappings();
165 }
166 return ret;
167 }
168
169 static inline
170 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
171 {
172 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
173 }
174
175 static inline
176 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
177 {
178 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
179 }
180
181 static inline
182 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
183 {
184 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
185 }
186
187 static inline
188 void lttng_kvfree(const void *addr)
189 {
190 kvfree(addr);
191 }
192
193 #else
194
195 #include <linux/slab.h>
196
197 static inline
198 void print_vmalloc_node_range_warning(void)
199 {
200 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
201 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
202 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
203 }
204
205 /*
206 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
207 */
208 static inline
209 void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
210 unsigned long start, unsigned long end, gfp_t gfp_mask,
211 pgprot_t prot, unsigned long vm_flags, int node,
212 const void *caller)
213 {
214 #ifdef CONFIG_KALLSYMS
215 /*
216 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
217 */
218 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
219 unsigned long start, unsigned long end, gfp_t gfp_mask,
220 pgprot_t prot, unsigned long vm_flags, int node,
221 const void *caller);
222
223 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
224 if (lttng__vmalloc_node_range)
225 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
226 vm_flags, node, caller);
227 #endif
228 if (node != NUMA_NO_NODE)
229 print_vmalloc_node_range_warning();
230 return __vmalloc(size, gfp_mask, prot);
231 }
232
233 /*
234 * Canary function to check for '__vmalloc_node_range()' at compile time.
235 *
236 * From 'include/linux/vmalloc.h':
237 *
238 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
239 * unsigned long start, unsigned long end, gfp_t gfp_mask,
240 * pgprot_t prot, unsigned long vm_flags, int node,
241 * const void *caller);
242 */
243 static inline
244 void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
245 unsigned long start, unsigned long end, gfp_t gfp_mask,
246 pgprot_t prot, unsigned long vm_flags, int node,
247 const void *caller)
248 {
249 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
250 vm_flags, node, caller);
251 }
252
253 /**
254 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
255 * failure, fall back to non-contiguous (vmalloc) allocation.
256 * @size: size of the request.
257 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
258 *
259 * Uses kmalloc to get the memory but if the allocation fails then falls back
260 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
261 *
262 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
263 */
264 static inline
265 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
266 {
267 void *ret;
268
269 /*
270 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
271 * so the given set of flags has to be compatible.
272 */
273 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
274
275 /*
276 * If the allocation fits in a single page, do not fallback.
277 */
278 if (size <= PAGE_SIZE) {
279 return kmalloc_node(size, flags, node);
280 }
281
282 /*
283 * Make sure that larger requests are not too disruptive - no OOM
284 * killer and no allocation failure warnings as we have a fallback
285 */
286 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
287 if (!ret) {
288 ret = __lttng_vmalloc_node_range(size, 1,
289 VMALLOC_START, VMALLOC_END,
290 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
291 node, __builtin_return_address(0));
292 /*
293 * Make sure we don't trigger recursive page faults in the
294 * tracing fast path.
295 */
296 wrapper_vmalloc_sync_mappings();
297 }
298 return ret;
299 }
300
301 static inline
302 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
303 {
304 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
305 }
306
307 static inline
308 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
309 {
310 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
311 }
312
313 static inline
314 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
315 {
316 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
317 }
318
319 static inline
320 void lttng_kvfree(const void *addr)
321 {
322 if (is_vmalloc_addr(addr)) {
323 vfree(addr);
324 } else {
325 kfree(addr);
326 }
327 }
328 #endif
329
330 #endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.034934 seconds and 4 git commands to generate.