Fix: handle large number of pages or subbuffers per buffer
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 1 Sep 2016 22:08:15 +0000 (18:08 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Thu, 1 Sep 2016 22:16:55 +0000 (18:16 -0400)
Do no trigger kernel console warnings when we try to allocate too many
pages, or a too large kmalloc area for page array (within a subbuffer),
or a sub-buffer array (within a buffer).

Use vmalloc/vfree for the "pages" local variable used only during
allocation, which is an array of nr_subbuf * nr_pages_per_subbuf
pointers. This ensures we do not limit the overall buffer size due to
kmalloc limitations.

Fixes #1031

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lib/ringbuffer/ring_buffer_backend.c
lib/ringbuffer/ring_buffer_frontend.c

index ad7611361b56aaad7805d8e579d5f1a2dc700df0..5275bde24031c655b3ad466d15fbd4d58d0eff58 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/mm.h>
+#include <linux/vmalloc.h>
 
 #include "../../wrapper/vmalloc.h"     /* for wrapper_vmalloc_sync_all() */
 #include "../../wrapper/ringbuffer/config.h"
@@ -65,28 +66,29 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                num_subbuf_alloc++;
        }
 
-       pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages,
+       pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages,
                                   1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+                       cpu_to_node(max(bufb->cpu, 0)));
        if (unlikely(!pages))
                goto pages_error;
 
-       virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
+       virt = vmalloc_node(ALIGN(sizeof(*virt) * num_pages,
                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+                       cpu_to_node(max(bufb->cpu, 0)));
        if (unlikely(!virt))
                goto virt_error;
 
        bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
                                         * num_subbuf_alloc,
                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(bufb->cpu, 0)));
        if (unlikely(!bufb->array))
                goto array_error;
 
        for (i = 0; i < num_pages; i++) {
                pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
-                                           GFP_KERNEL | __GFP_ZERO, 0);
+                               GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
                if (unlikely(!pages[i]))
                        goto depopulate;
                virt[i] = page_address(pages[i]);
@@ -101,7 +103,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                                sizeof(struct lib_ring_buffer_backend_page)
                                * num_pages_per_subbuf,
                                1 << INTERNODE_CACHE_SHIFT),
-                               GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+                               GFP_KERNEL | __GFP_NOWARN,
+                               cpu_to_node(max(bufb->cpu, 0)));
                if (!bufb->array[i])
                        goto free_array;
        }
@@ -111,7 +114,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                                sizeof(struct lib_ring_buffer_backend_subbuffer)
                                * num_subbuf,
                                1 << INTERNODE_CACHE_SHIFT),
-                               GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+                               GFP_KERNEL | __GFP_NOWARN,
+                               cpu_to_node(max(bufb->cpu, 0)));
        if (unlikely(!bufb->buf_wsb))
                goto free_array;
 
@@ -144,8 +148,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
         * will not fault.
         */
        wrapper_vmalloc_sync_all();
-       kfree(virt);
-       kfree(pages);
+       vfree(virt);
+       vfree(pages);
        return 0;
 
 free_array:
@@ -157,9 +161,9 @@ depopulate:
                __free_page(pages[i]);
        kfree(bufb->array);
 array_error:
-       kfree(virt);
+       vfree(virt);
 virt_error:
-       kfree(pages);
+       vfree(pages);
 pages_error:
        return -ENOMEM;
 }
index 82c2c9f29a7cd7493bef4c288979733842a13b75..82726a17678ecfb097891f3475956a8dd3bda85a 100644 (file)
@@ -205,7 +205,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf,
                kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
                                   * chan->backend.num_subbuf,
                                   1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(cpu, 0)));
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(cpu, 0)));
        if (!buf->commit_hot) {
                ret = -ENOMEM;
                goto free_chanbuf;
@@ -215,7 +216,8 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf,
                kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
                                   * chan->backend.num_subbuf,
                                   1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(cpu, 0)));
+                       GFP_KERNEL | __GFP_NOWARN,
+                       cpu_to_node(max(cpu, 0)));
        if (!buf->commit_cold) {
                ret = -ENOMEM;
                goto free_commit;
This page took 0.030323 seconds and 4 git commands to generate.