X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=wrapper%2Fvmalloc.h;h=e117584f37b6ae66a7ccae65dced8c061444d58e;hb=ecda9325cd5ad6b69600fd4d88c46095d22f95e1;hp=dad8d4e34498426762a1d195ed0510f47248c26a;hpb=5a2f5e92e422d87a28581ba97e6b1789ff743bc6;p=lttng-modules.git diff --git a/wrapper/vmalloc.h b/wrapper/vmalloc.h index dad8d4e3..e117584f 100644 --- a/wrapper/vmalloc.h +++ b/wrapper/vmalloc.h @@ -25,6 +25,10 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +#include +#include +#include + #ifdef CONFIG_KALLSYMS #include @@ -44,15 +48,13 @@ void wrapper_vmalloc_sync_all(void) * Only x86 needs vmalloc_sync_all to make sure LTTng does not * trigger recursive page faults. */ - printk(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); - printk(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); + printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); + printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); #endif } } #else -#include - static inline void wrapper_vmalloc_sync_all(void) { @@ -60,4 +62,167 @@ void wrapper_vmalloc_sync_all(void) } #endif +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) +static inline +void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) +{ + void *ret; + + ret = kvmalloc_node(size, flags, node); + if (is_vmalloc_addr(ret)) { + /* + * Make sure we don't trigger recursive page faults in the + * tracing fast path. + */ + wrapper_vmalloc_sync_all(); + } + return ret; +} + +static inline +void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) +{ + return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); +} + +static inline +void *lttng_kvmalloc(unsigned long size, gfp_t flags) +{ + return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); +} + +static inline +void *lttng_kvzalloc(unsigned long size, gfp_t flags) +{ + return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); +} + +static inline +void lttng_kvfree(const void *addr) +{ + kvfree(addr); +} + +#else + +#include + +/* + * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. + */ +static inline +void *__lttng_vmalloc_node_fallback(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, int node, void *caller) +{ + void *ret; + +#ifdef CONFIG_KALLSYMS + /* + * If we have KALLSYMS, get * __vmalloc_node which is not exported. + */ + void *(*lttng__vmalloc_node)(unsigned long size, unsigned long align, + gfp_t gfp_mask, pgprot_t prot, int node, void *caller); + + lttng__vmalloc_node = (void *) kallsyms_lookup_funcptr("__vmalloc_node"); + ret = lttng__vmalloc_node(size, align, gfp_mask, prot, node, caller); +#else + /* + * If we don't have KALLSYMS, fallback to kmalloc_node. + */ + ret = kmalloc_node(size, flags, node); +#endif + + return ret; +} + +/** + * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon + * failure, fall back to non-contiguous (vmalloc) allocation. + * @size: size of the request. + * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. + * + * Uses kmalloc to get the memory but if the allocation fails then falls back + * to the vmalloc allocator. Use lttng_kvfree to free the memory. + * + * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported + */ +static inline +void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) +{ + void *ret; + + /* + * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) + * so the given set of flags has to be compatible. + */ + WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); + + /* + * If the allocation fits in a single page, do not fallback. + */ + if (size <= PAGE_SIZE) { + return kmalloc_node(size, flags, node); + } + + /* + * Make sure that larger requests are not too disruptive - no OOM + * killer and no allocation failure warnings as we have a fallback + */ + ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); + if (!ret) { + if (node == NUMA_NO_NODE) { + /* + * If no node was specified, use __vmalloc which is + * always exported. + */ + ret = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); + } else { + /* + * Otherwise, we need to select a node but __vmalloc_node + * is not exported, use this fallback wrapper which uses + * kallsyms if available or falls back to kmalloc_node. + */ + ret = __lttng_vmalloc_node_fallback(size, 1, + flags | __GFP_HIGHMEM, PAGE_KERNEL, node, + __builtin_return_address(0)); + } + + /* + * Make sure we don't trigger recursive page faults in the + * tracing fast path. + */ + wrapper_vmalloc_sync_all(); + } + return ret; +} + +static inline +void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) +{ + return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); +} + +static inline +void *lttng_kvmalloc(unsigned long size, gfp_t flags) +{ + return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); +} + +static inline +void *lttng_kvzalloc(unsigned long size, gfp_t flags) +{ + return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); +} + +static inline +void lttng_kvfree(const void *addr) +{ + if (is_vmalloc_addr(addr)) { + vfree(addr); + } else { + kfree(addr); + } +} +#endif + #endif /* _LTTNG_WRAPPER_VMALLOC_H */