Fix: unregister cpu hotplug notifier on buffer alloc error
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
index 5466325f10a3b95535cf6270128254d35d656074..c7f2fe9d7b6226b33738d2fbabb379b42999ef93 100644 (file)
 #include <linux/cpu.h>
 #include <linux/mm.h>
 
-#include "../../wrapper/vmalloc.h"     /* for wrapper_vmalloc_sync_all() */
-#include "../../wrapper/ringbuffer/config.h"
-#include "../../wrapper/ringbuffer/backend.h"
-#include "../../wrapper/ringbuffer/frontend.h"
+#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/ringbuffer/config.h>
+#include <wrapper/ringbuffer/backend.h>
+#include <wrapper/ringbuffer/frontend.h>
 
 /**
  * lib_ring_buffer_backend_allocate - allocate a channel buffer
@@ -52,7 +52,6 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        unsigned long subbuf_size, mmap_offset = 0;
        unsigned long num_subbuf_alloc;
        struct page **pages;
-       void **virt;
        unsigned long i;
 
        num_pages = size >> PAGE_SHIFT;
@@ -71,12 +70,6 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        if (unlikely(!pages))
                goto pages_error;
 
-       virt = kmalloc_node(ALIGN(sizeof(*virt) * num_pages,
-                                 1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
-       if (unlikely(!virt))
-               goto virt_error;
-
        bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
                                         * num_subbuf_alloc,
                                  1 << INTERNODE_CACHE_SHIFT),
@@ -89,7 +82,6 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                                            GFP_KERNEL | __GFP_ZERO, 0);
                if (unlikely(!pages[i]))
                        goto depopulate;
-               virt[i] = page_address(pages[i]);
        }
        bufb->num_pages_per_subbuf = num_pages_per_subbuf;
 
@@ -138,8 +130,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        for (i = 0; i < num_subbuf_alloc; i++) {
                for (j = 0; j < num_pages_per_subbuf; j++) {
                        CHAN_WARN_ON(chanb, page_idx > num_pages);
-                       bufb->array[i]->p[j].virt = virt[page_idx];
-                       bufb->array[i]->p[j].page = pages[page_idx];
+                       bufb->array[i]->p[j].virt = page_address(pages[page_idx]);
+                       bufb->array[i]->p[j].pfn = page_to_pfn(pages[page_idx]);
                        page_idx++;
                }
                if (config->output == RING_BUFFER_MMAP) {
@@ -153,7 +145,6 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
         * will not fault.
         */
        wrapper_vmalloc_sync_all();
-       kfree(virt);
        kfree(pages);
        return 0;
 
@@ -168,8 +159,6 @@ depopulate:
                __free_page(pages[i]);
        kfree(bufb->array);
 array_error:
-       kfree(virt);
-virt_error:
        kfree(pages);
 pages_error:
        return -ENOMEM;
@@ -201,7 +190,7 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
        kfree(bufb->buf_cnt);
        for (i = 0; i < num_subbuf_alloc; i++) {
                for (j = 0; j < bufb->num_pages_per_subbuf; j++)
-                       __free_page(bufb->array[i]->p[j].page);
+                       __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
                kfree(bufb->array[i]);
        }
        kfree(bufb->array);
@@ -433,6 +422,7 @@ free_bufs:
                }
 #ifdef CONFIG_HOTPLUG_CPU
                put_online_cpus();
+               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
 #endif
                free_percpu(chanb->buf);
        } else
@@ -704,8 +694,7 @@ void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bu
                                                        + (offset & ~PAGE_MASK),
                                                        src, pagecpy) != 0;
                if (ret > 0) {
-                       offset += (pagecpy - ret);
-                       len -= (pagecpy - ret);
+                       /* Copy failed. */
                        _lib_ring_buffer_memset(bufb, offset, 0, len, 0);
                        break; /* stop copy */
                }
@@ -961,15 +950,15 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse
 EXPORT_SYMBOL_GPL(lib_ring_buffer_read_cstr);
 
 /**
- * lib_ring_buffer_read_get_page - Get a whole page to read from
+ * lib_ring_buffer_read_get_pfn - Get a page frame number to read from
  * @bufb : buffer backend
  * @offset : offset within the buffer
  * @virt : pointer to page address (output)
  *
  * Should be protected by get_subbuf/put_subbuf.
- * Returns the pointer to the page struct pointer.
+ * Returns the pointer to the page frame number unsigned long.
  */
-struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb,
+unsigned long *lib_ring_buffer_read_get_pfn(struct lib_ring_buffer_backend *bufb,
                                            size_t offset, void ***virt)
 {
        size_t index;
@@ -986,9 +975,9 @@ struct page **lib_ring_buffer_read_get_page(struct lib_ring_buffer_backend *bufb
        CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, id));
        *virt = &rpages->p[index].virt;
-       return &rpages->p[index].page;
+       return &rpages->p[index].pfn;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
+EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_pfn);
 
 /**
  * lib_ring_buffer_read_offset_address - get address of a buffer location
This page took 0.025077 seconds and 4 git commands to generate.