X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_backend.c;h=94bdc10780ef850844a8d4dff4b7c141bf901da1;hb=4a6ea683a440dc224dc4a9628ff4f713c42f5922;hp=9d199e516aacbaf009538faf394671113e18bf70;hpb=5f14d8ae2cc0734b007c8770c3b13ff00d830040;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index 9d199e51..94bdc107 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -12,11 +12,12 @@ #include #include #include -#include #include #include -#include /* for wrapper_vmalloc_sync_all() */ +#include +#include +#include /* for wrapper_vmalloc_sync_mappings() */ #include #include #include @@ -43,6 +44,25 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config unsigned long i; num_pages = size >> PAGE_SHIFT; + + /* + * Verify that there is enough free pages available on the system for + * the current allocation request. + * wrapper_check_enough_free_pages uses si_mem_available() if available + * and returns if there should be enough free pages based on the + * current estimate. + */ + if (!wrapper_check_enough_free_pages(num_pages)) + goto not_enough_pages; + + /* + * Set the current user thread as the first target of the OOM killer. + * If the estimate received by si_mem_available() was off, and we do + * end up running out of memory because of this buffer allocation, we + * want to kill the offending app first. + */ + wrapper_set_current_oom_origin(); + num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; @@ -136,7 +156,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config * If kmalloc ever uses vmalloc underneath, make sure the buffer pages * will not fault. */ - wrapper_vmalloc_sync_all(); + wrapper_vmalloc_sync_mappings(); + wrapper_clear_current_oom_origin(); vfree(pages); return 0; @@ -153,6 +174,8 @@ depopulate: array_error: vfree(pages); pages_error: + wrapper_clear_current_oom_origin(); +not_enough_pages: return -ENOMEM; } @@ -236,7 +259,7 @@ void channel_backend_reset(struct channel_backend *chanb) chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) /* * No need to implement a "dead" callback to do a buffer switch here, @@ -268,7 +291,7 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU @@ -318,7 +341,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ /** * channel_backend_init - initialize a channel backend @@ -396,13 +419,13 @@ int channel_backend_init(struct channel_backend *chanb, if (!chanb->buf) goto free_cpumask; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); if (ret) goto free_bufs; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ { /* @@ -422,14 +445,14 @@ int channel_backend_init(struct channel_backend *chanb, chanb->cpu_hp_notifier.priority = 5; register_hotcpu_notifier(&chanb->cpu_hp_notifier); - get_online_cpus(); + lttng_cpus_read_lock(); for_each_online_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), chanb, i); if (ret) goto free_bufs; /* cpu hotplug locked */ } - put_online_cpus(); + lttng_cpus_read_unlock(); #else for_each_possible_cpu(i) { ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), @@ -439,7 +462,7 @@ int channel_backend_init(struct channel_backend *chanb, } #endif } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } else { chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); if (!chanb->buf) @@ -454,18 +477,18 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) /* * Teardown of lttng_rb_hp_prepare instance * on "add" error is handled within cpu hotplug, * no teardown to do from the caller. */ -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ #ifdef CONFIG_HOTPLUG_CPU - put_online_cpus(); + lttng_cpus_read_unlock(); unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); #endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ for_each_possible_cpu(i) { struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); @@ -494,15 +517,15 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb) const struct lib_ring_buffer_config *config = &chanb->config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) int ret; ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); WARN_ON(ret); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */ } }