X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_backend.c;h=ec331232649cfcef667569779761c50456e810f6;hb=5760f3f4e64af16249a4f208e680c7dcffb4f990;hp=9d199e516aacbaf009538faf394671113e18bf70;hpb=5f14d8ae2cc0734b007c8770c3b13ff00d830040;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index 9d199e51..ec331232 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1) +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) * * ring_buffer_backend.c * @@ -15,11 +15,11 @@ #include #include #include +#include -#include /* for wrapper_vmalloc_sync_all() */ -#include -#include -#include +#include +#include +#include /** * lib_ring_buffer_backend_allocate - allocate a channel buffer @@ -43,6 +43,25 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config unsigned long i; num_pages = size >> PAGE_SHIFT; + + /* + * Verify that there is enough free pages available on the system for + * the current allocation request. + * wrapper_check_enough_free_pages uses si_mem_available() if available + * and returns if there should be enough free pages based on the + * current estimate. + */ + if (num_pages >= si_mem_available()) + goto not_enough_pages; + + /* + * Set the current user thread as the first target of the OOM killer. + * If the estimate received by si_mem_available() was off, and we do + * end up running out of memory because of this buffer allocation, we + * want to kill the offending app first. + */ + set_current_oom_origin(); + num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; @@ -58,14 +77,13 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config if (unlikely(!pages)) goto pages_error; - bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array) - * num_subbuf_alloc, - 1 << INTERNODE_CACHE_SHIFT), + bufb->array = kvmalloc_node(ALIGN(sizeof(*bufb->array) + * num_subbuf_alloc, + 1 << INTERNODE_CACHE_SHIFT), GFP_KERNEL | __GFP_NOWARN, cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->array)) goto array_error; - for (i = 0; i < num_pages; i++) { pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); @@ -77,7 +95,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config /* Allocate backend pages array elements */ for (i = 0; i < num_subbuf_alloc; i++) { bufb->array[i] = - lttng_kvzalloc_node(ALIGN( + kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_pages) + sizeof(struct lib_ring_buffer_backend_page) * num_pages_per_subbuf, @@ -89,7 +107,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config } /* Allocate write-side subbuffer table */ - bufb->buf_wsb = lttng_kvzalloc_node(ALIGN( + bufb->buf_wsb = kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), @@ -109,7 +127,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0); /* Allocate subbuffer packet counter table */ - bufb->buf_cnt = lttng_kvzalloc_node(ALIGN( + bufb->buf_cnt = kvzalloc_node(ALIGN( sizeof(struct lib_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), @@ -132,27 +150,25 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config } } - /* - * If kmalloc ever uses vmalloc underneath, make sure the buffer pages - * will not fault. - */ - wrapper_vmalloc_sync_all(); + clear_current_oom_origin(); vfree(pages); return 0; free_wsb: - lttng_kvfree(bufb->buf_wsb); + kvfree(bufb->buf_wsb); free_array: for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++) - lttng_kvfree(bufb->array[i]); + kvfree(bufb->array[i]); depopulate: /* Free all allocated pages */ for (i = 0; (i < num_pages && pages[i]); i++) __free_page(pages[i]); - lttng_kvfree(bufb->array); + kvfree(bufb->array); array_error: vfree(pages); pages_error: + clear_current_oom_origin(); +not_enough_pages: return -ENOMEM; } @@ -178,14 +194,14 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb) if (chanb->extra_reader_sb) num_subbuf_alloc++; - lttng_kvfree(bufb->buf_wsb); - lttng_kvfree(bufb->buf_cnt); + kvfree(bufb->buf_wsb); + kvfree(bufb->buf_cnt); for (i = 0; i < num_subbuf_alloc; i++) { for (j = 0; j < bufb->num_pages_per_subbuf; j++) __free_page(pfn_to_page(bufb->array[i]->p[j].pfn)); - lttng_kvfree(bufb->array[i]); + kvfree(bufb->array[i]); } - lttng_kvfree(bufb->array); + kvfree(bufb->array); bufb->allocated = 0; } @@ -236,8 +252,6 @@ void channel_backend_reset(struct channel_backend *chanb) chanb->start_tsc = config->cb.ring_buffer_clock_read(chan); } -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) - /* * No need to implement a "dead" callback to do a buffer switch here, * because it will happen when tracing is stopped, or will be done by @@ -268,58 +282,6 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu, } EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ - -#ifdef CONFIG_HOTPLUG_CPU - -/** - * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback - * @nb: notifier block - * @action: hotplug action to take - * @hcpu: CPU number - * - * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) - */ -static -int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb, - unsigned long action, - void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - struct channel_backend *chanb = container_of(nb, struct channel_backend, - cpu_hp_notifier); - const struct lib_ring_buffer_config *config = &chanb->config; - struct lib_ring_buffer *buf; - int ret; - - CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL); - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - buf = per_cpu_ptr(chanb->buf, cpu); - ret = lib_ring_buffer_create(buf, chanb, cpu); - if (ret) { - printk(KERN_ERR - "ring_buffer_cpu_hp_callback: cpu %d " - "buffer creation failed\n", cpu); - return NOTIFY_BAD; - } - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - /* No need to do a buffer switch here, because it will happen - * when tracing is stopped, or will be done by switch timer CPU - * DEAD callback. */ - break; - } - return NOTIFY_OK; -} - -#endif - -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ - /** * channel_backend_init - initialize a channel backend * @chanb: channel backend @@ -396,50 +358,11 @@ int channel_backend_init(struct channel_backend *chanb, if (!chanb->buf) goto free_cpumask; -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND; ret = cpuhp_state_add_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); if (ret) goto free_bufs; -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ - - { - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ -#ifdef CONFIG_HOTPLUG_CPU - /* - * buf->backend.allocated test takes care of concurrent CPU - * hotplug. - * Priority higher than frontend, so we create the ring buffer - * before we start the timer. - */ - chanb->cpu_hp_notifier.notifier_call = - lib_ring_buffer_cpu_hp_callback; - chanb->cpu_hp_notifier.priority = 5; - register_hotcpu_notifier(&chanb->cpu_hp_notifier); - - get_online_cpus(); - for_each_online_cpu(i) { - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), - chanb, i); - if (ret) - goto free_bufs; /* cpu hotplug locked */ - } - put_online_cpus(); -#else - for_each_possible_cpu(i) { - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i), - chanb, i); - if (ret) - goto free_bufs; - } -#endif - } -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } else { chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL); if (!chanb->buf) @@ -454,18 +377,11 @@ int channel_backend_init(struct channel_backend *chanb, free_bufs: if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) /* * Teardown of lttng_rb_hp_prepare instance * on "add" error is handled within cpu hotplug, * no teardown to do from the caller. */ -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ -#ifdef CONFIG_HOTPLUG_CPU - put_online_cpus(); - unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ for_each_possible_cpu(i) { struct lib_ring_buffer *buf = per_cpu_ptr(chanb->buf, i); @@ -494,15 +410,11 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb) const struct lib_ring_buffer_config *config = &chanb->config; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) int ret; ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare, &chanb->cpuhp_prepare.node); WARN_ON(ret); -#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ - unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); -#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */ } }