#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
-#include <linux/oom.h>
-#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/cpu.h>
+#include <wrapper/mm.h>
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
num_pages = size >> PAGE_SHIFT;
/*
- * Verify that the number of pages requested for that buffer is smaller
- * than the number of available pages on the system. si_mem_available()
- * returns an _estimate_ of the number of available pages.
+ * Verify that there is enough free pages available on the system for
+ * the current allocation request.
+ * wrapper_check_enough_free_pages uses si_mem_available() if available
+ * and returns if there should be enough free pages based on the
+ * current estimate.
*/
- if (num_pages > si_mem_available())
+ if (!wrapper_check_enough_free_pages(num_pages))
goto not_enough_pages;
/*
* end up running out of memory because of this buffer allocation, we
* want to kill the offending app first.
*/
- set_current_oom_origin();
+ wrapper_set_current_oom_origin();
num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
subbuf_size = chanb->subbuf_size;
* If kmalloc ever uses vmalloc underneath, make sure the buffer pages
* will not fault.
*/
- wrapper_vmalloc_sync_all();
- clear_current_oom_origin();
+ wrapper_vmalloc_sync_mappings();
+ wrapper_clear_current_oom_origin();
vfree(pages);
return 0;
array_error:
vfree(pages);
pages_error:
- clear_current_oom_origin();
+ wrapper_clear_current_oom_origin();
not_enough_pages:
return -ENOMEM;
}
chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
/*
* No need to implement a "dead" callback to do a buffer switch here,
}
EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
#ifdef CONFIG_HOTPLUG_CPU
#endif
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
/**
* channel_backend_init - initialize a channel backend
if (!chanb->buf)
goto free_cpumask;
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
&chanb->cpuhp_prepare.node);
if (ret)
goto free_bufs;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
{
/*
chanb->cpu_hp_notifier.priority = 5;
register_hotcpu_notifier(&chanb->cpu_hp_notifier);
- get_online_cpus();
+ lttng_cpus_read_lock();
for_each_online_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
chanb, i);
if (ret)
goto free_bufs; /* cpu hotplug locked */
}
- put_online_cpus();
+ lttng_cpus_read_unlock();
#else
for_each_possible_cpu(i) {
ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
}
#endif
}
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
} else {
chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
if (!chanb->buf)
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
/*
* Teardown of lttng_rb_hp_prepare instance
* on "add" error is handled within cpu hotplug,
* no teardown to do from the caller.
*/
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
#ifdef CONFIG_HOTPLUG_CPU
- put_online_cpus();
+ lttng_cpus_read_unlock();
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
#endif
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
for_each_possible_cpu(i) {
struct lib_ring_buffer *buf =
per_cpu_ptr(chanb->buf, i);
const struct lib_ring_buffer_config *config = &chanb->config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
+#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
int ret;
ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
&chanb->cpuhp_prepare.node);
WARN_ON(ret);
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
+#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
}
}