Add a packet sequence number
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
index e8ad4c521cfb66440e51eb4ae9ca0930ea6528d6..5466325f10a3b95535cf6270128254d35d656074 100644 (file)
@@ -125,6 +125,15 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        else
                bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
 
+       /* Allocate subbuffer packet counter table */
+       bufb->buf_cnt = kzalloc_node(ALIGN(
+                               sizeof(struct lib_ring_buffer_backend_counts)
+                               * num_subbuf,
+                               1 << INTERNODE_CACHE_SHIFT),
+                       GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0)));
+       if (unlikely(!bufb->buf_cnt))
+               goto free_wsb;
+
        /* Assign pages to page index */
        for (i = 0; i < num_subbuf_alloc; i++) {
                for (j = 0; j < num_pages_per_subbuf; j++) {
@@ -148,6 +157,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        kfree(pages);
        return 0;
 
+free_wsb:
+       kfree(bufb->buf_wsb);
 free_array:
        for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
                kfree(bufb->array[i]);
@@ -187,6 +198,7 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
                num_subbuf_alloc++;
 
        kfree(bufb->buf_wsb);
+       kfree(bufb->buf_cnt);
        for (i = 0; i < num_subbuf_alloc; i++) {
                for (j = 0; j < bufb->num_pages_per_subbuf; j++)
                        __free_page(bufb->array[i]->p[j].page);
@@ -253,7 +265,7 @@ void channel_backend_reset(struct channel_backend *chanb)
  *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
  */
 static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
                                              unsigned long action,
                                              void *hcpu)
 {
@@ -331,6 +343,12 @@ int channel_backend_init(struct channel_backend *chanb,
                return -EINVAL;
        if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
                return -EINVAL;
+       /*
+        * Overwrite mode buffers require at least 2 subbuffers per
+        * buffer.
+        */
+       if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+               return -EINVAL;
 
        ret = subbuffer_id_check_index(config, num_subbuf);
        if (ret)
@@ -478,7 +496,7 @@ void channel_backend_free(struct channel_backend *chanb)
  * @pagecpy : page size copied so far
  */
 void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset,
-                           const void *src, size_t len, ssize_t pagecpy)
+                           const void *src, size_t len, size_t pagecpy)
 {
        struct channel_backend *chanb = &bufb->chan->backend;
        const struct lib_ring_buffer_config *config = &chanb->config;
@@ -524,7 +542,7 @@ EXPORT_SYMBOL_GPL(_lib_ring_buffer_write);
  */
 void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
                             size_t offset,
-                            int c, size_t len, ssize_t pagecpy)
+                            int c, size_t len, size_t pagecpy)
 {
        struct channel_backend *chanb = &bufb->chan->backend;
        const struct lib_ring_buffer_config *config = &chanb->config;
@@ -557,9 +575,90 @@ void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
 }
 EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
 
+/**
+ * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ */
+void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+                       size_t offset, const char *src, size_t len,
+                       size_t pagecpy, int pad)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int src_terminated = 0;
+
+       CHAN_WARN_ON(chanb, !len);
+       offset += pagecpy;
+       do {
+               len -= pagecpy;
+               if (!src_terminated)
+                       src += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                            && subbuffer_id_is_noref(config, id));
+
+               if (likely(!src_terminated)) {
+                       size_t count, to_copy;
+
+                       to_copy = pagecpy;
+                       if (pagecpy == len)
+                               to_copy--;      /* Final '\0' */
+                       count = lib_ring_buffer_do_strcpy(config,
+                                       rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       src, to_copy);
+                       offset += count;
+                       /* Padding */
+                       if (unlikely(count < to_copy)) {
+                               size_t pad_len = to_copy - count;
+
+                               /* Next pages will have padding */
+                               src_terminated = 1;
+                               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                               offset += pad_len;
+                       }
+               } else {
+                       size_t pad_len;
+
+                       pad_len = pagecpy;
+                       if (pagecpy == len)
+                               pad_len--;      /* Final '\0' */
+                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               pad, pad_len);
+                       offset += pad_len;
+               }
+       } while (unlikely(len != pagecpy));
+       /* Ending '\0' */
+       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
 
 /**
- * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
+ * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
  * @bufb : buffer backend
  * @offset : offset within the buffer
  * @src : source address
@@ -570,10 +669,10 @@ EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
  * directly without having the src pointer checked with access_ok()
  * previously.
  */
-void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
                                      size_t offset,
                                      const void __user *src, size_t len,
-                                     ssize_t pagecpy)
+                                     size_t pagecpy)
 {
        struct channel_backend *chanb = &bufb->chan->backend;
        const struct lib_ring_buffer_config *config = &chanb->config;
@@ -601,7 +700,7 @@ void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
                rpages = bufb->array[sb_bindex];
                CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
                                && subbuffer_id_is_noref(config, id));
-               ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
+               ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
                                                        + (offset & ~PAGE_MASK),
                                                        src, pagecpy) != 0;
                if (ret > 0) {
@@ -612,7 +711,92 @@ void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
                }
        } while (unlikely(len != pagecpy));
 }
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
+
+/**
+ * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+               size_t offset, const char __user *src, size_t len,
+               size_t pagecpy, int pad)
+{
+       struct channel_backend *chanb = &bufb->chan->backend;
+       const struct lib_ring_buffer_config *config = &chanb->config;
+       size_t sbidx, index;
+       struct lib_ring_buffer_backend_pages *rpages;
+       unsigned long sb_bindex, id;
+       int src_terminated = 0;
+
+       offset += pagecpy;
+       do {
+               len -= pagecpy;
+               if (!src_terminated)
+                       src += pagecpy;
+               sbidx = offset >> chanb->subbuf_size_order;
+               index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+               /*
+                * Underlying layer should never ask for writes across
+                * subbuffers.
+                */
+               CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+               pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+               id = bufb->buf_wsb[sbidx].id;
+               sb_bindex = subbuffer_id_get_index(config, id);
+               rpages = bufb->array[sb_bindex];
+               CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+                               && subbuffer_id_is_noref(config, id));
+
+               if (likely(!src_terminated)) {
+                       size_t count, to_copy;
+
+                       to_copy = pagecpy;
+                       if (pagecpy == len)
+                               to_copy--;      /* Final '\0' */
+                       count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+                                       rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       src, to_copy);
+                       offset += count;
+                       /* Padding */
+                       if (unlikely(count < to_copy)) {
+                               size_t pad_len = to_copy - count;
+
+                               /* Next pages will have padding */
+                               src_terminated = 1;
+                               lib_ring_buffer_do_memset(rpages->p[index].virt
+                                               + (offset & ~PAGE_MASK),
+                                       pad, pad_len);
+                               offset += pad_len;
+                       }
+               } else {
+                       size_t pad_len;
+
+                       pad_len = pagecpy;
+                       if (pagecpy == len)
+                               pad_len--;      /* Final '\0' */
+                       lib_ring_buffer_do_memset(rpages->p[index].virt
+                                       + (offset & ~PAGE_MASK),
+                               pad, pad_len);
+                       offset += pad_len;
+               }
+       } while (unlikely(len != pagecpy));
+       /* Ending '\0' */
+       lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+                       '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
 
 /**
  * lib_ring_buffer_read - read data from ring_buffer_buffer.
@@ -629,8 +813,7 @@ size_t lib_ring_buffer_read(struct lib_ring_buffer_backend *bufb, size_t offset,
 {
        struct channel_backend *chanb = &bufb->chan->backend;
        const struct lib_ring_buffer_config *config = &chanb->config;
-       size_t index;
-       ssize_t pagecpy, orig_len;
+       size_t index, pagecpy, orig_len;
        struct lib_ring_buffer_backend_pages *rpages;
        unsigned long sb_bindex, id;
 
@@ -724,8 +907,9 @@ EXPORT_SYMBOL_GPL(__lib_ring_buffer_copy_to_user);
  * @dest : destination address
  * @len : destination's length
  *
- * return string's length
+ * Return string's length, or -EINVAL on error.
  * Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
  */
 int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
                              void *dest, size_t len)
@@ -741,6 +925,8 @@ int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offse
        offset &= chanb->buf_size - 1;
        index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
        orig_offset = offset;
+       if (unlikely(!len))
+               return -EINVAL;
        for (;;) {
                id = bufb->buf_rsb.id;
                sb_bindex = subbuffer_id_get_index(config, id);
@@ -811,8 +997,9 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_read_get_page);
  *
  * Return the address where a given offset is located (for read).
  * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than a
+ * page size.
  */
 void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
                                          size_t offset)
This page took 0.027694 seconds and 4 git commands to generate.