X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=wrapper%2Fvmalloc.h;h=4000fc9cca0fc17be1e155b263eba3cdccdbc8ab;hb=263b6c88138c3354d63dba3c70a965de94becd22;hp=e117584f37b6ae66a7ccae65dced8c061444d58e;hpb=01ab511358519483fe46e8887d2b1332e1a628f1;p=lttng-modules.git diff --git a/wrapper/vmalloc.h b/wrapper/vmalloc.h index e117584f..4000fc9c 100644 --- a/wrapper/vmalloc.h +++ b/wrapper/vmalloc.h @@ -1,7 +1,5 @@ -#ifndef _LTTNG_WRAPPER_VMALLOC_H -#define _LTTNG_WRAPPER_VMALLOC_H - -/* +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * * wrapper/vmalloc.h * * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when @@ -9,22 +7,11 @@ * modules. * * Copyright (C) 2011-2012 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#ifndef _LTTNG_WRAPPER_VMALLOC_H +#define _LTTNG_WRAPPER_VMALLOC_H + #include #include #include @@ -34,8 +21,35 @@ #include #include +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) + +static inline +void wrapper_vmalloc_sync_mappings(void) +{ + void (*vmalloc_sync_mappings_sym)(void); + + vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); + if (vmalloc_sync_mappings_sym) { + vmalloc_sync_mappings_sym(); + } else { +#ifdef CONFIG_X86 + /* + * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not + * trigger recursive page faults. + */ + printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); + printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); +#endif + } +} + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */ + +/* + * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7. + */ static inline -void wrapper_vmalloc_sync_all(void) +void wrapper_vmalloc_sync_mappings(void) { void (*vmalloc_sync_all_sym)(void); @@ -53,13 +67,29 @@ void wrapper_vmalloc_sync_all(void) #endif } } + +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */ + #else +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) + static inline -void wrapper_vmalloc_sync_all(void) +void wrapper_vmalloc_sync_mappings(void) +{ + return vmalloc_sync_mappings(); +} + +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */ + +static inline +void wrapper_vmalloc_sync_mappings(void) { return vmalloc_sync_all(); } + +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0)) */ + #endif #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) @@ -74,7 +104,7 @@ void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) * Make sure we don't trigger recursive page faults in the * tracing fast path. */ - wrapper_vmalloc_sync_all(); + wrapper_vmalloc_sync_mappings(); } return ret; } @@ -107,32 +137,40 @@ void lttng_kvfree(const void *addr) #include +static inline +void print_vmalloc_node_range_warning(void) +{ + printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); + printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); + printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); +} + /* * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. */ static inline -void *__lttng_vmalloc_node_fallback(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, int node, void *caller) +void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller) { - void *ret; - #ifdef CONFIG_KALLSYMS /* - * If we have KALLSYMS, get * __vmalloc_node which is not exported. + * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. */ - void *(*lttng__vmalloc_node)(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, int node, void *caller); - - lttng__vmalloc_node = (void *) kallsyms_lookup_funcptr("__vmalloc_node"); - ret = lttng__vmalloc_node(size, align, gfp_mask, prot, node, caller); -#else - /* - * If we don't have KALLSYMS, fallback to kmalloc_node. - */ - ret = kmalloc_node(size, flags, node); + void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller); + + lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); + if (lttng__vmalloc_node_range) + return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, + vm_flags, node, caller); #endif - - return ret; + if (node != NUMA_NO_NODE) + print_vmalloc_node_range_warning(); + return __vmalloc(size, gfp_mask, prot); } /** @@ -170,23 +208,10 @@ void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) */ ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); if (!ret) { - if (node == NUMA_NO_NODE) { - /* - * If no node was specified, use __vmalloc which is - * always exported. - */ - ret = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); - } else { - /* - * Otherwise, we need to select a node but __vmalloc_node - * is not exported, use this fallback wrapper which uses - * kallsyms if available or falls back to kmalloc_node. - */ - ret = __lttng_vmalloc_node_fallback(size, 1, - flags | __GFP_HIGHMEM, PAGE_KERNEL, node, - __builtin_return_address(0)); - } - + ret = __lttng_vmalloc_node_range(size, 1, + VMALLOC_START, VMALLOC_END, + flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, + node, __builtin_return_address(0)); /* * Make sure we don't trigger recursive page faults in the * tracing fast path.