/*
* ring_buffer_backend.c
*
- * Copyright (C) 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * Dual LGPL v2.1/GPL v2 license.
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/stddef.h>
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb, int cpu)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
bufb->chan = container_of(chanb, struct channel, backend);
bufb->cpu = cpu;
void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long num_subbuf_alloc;
unsigned int i;
void channel_backend_reset(struct channel_backend *chanb)
{
struct channel *chan = container_of(chanb, struct channel, backend);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
/*
* Don't reset buf_size, subbuf_size, subbuf_size_order,
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct channel_backend *chanb = container_of(nb, struct channel_backend,
cpu_hp_notifier);
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct lib_ring_buffer *buf;
int ret;
if (!name)
return -EPERM;
- if (!(subbuf_size && num_subbuf))
- return -EPERM;
-
/* Check that the subbuffer size is larger than a page. */
if (subbuf_size < PAGE_SIZE)
return -EINVAL;
/*
- * Make sure the number of subbuffers and subbuffer size are power of 2.
+ * Make sure the number of subbuffers and subbuffer size are
+ * power of 2 and nonzero.
+ */
+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+ return -EINVAL;
+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+ return -EINVAL;
+ /*
+ * Overwrite mode buffers require at least 2 subbuffers per
+ * buffer.
*/
- CHAN_WARN_ON(chanb, hweight32(subbuf_size) != 1);
- CHAN_WARN_ON(chanb, hweight32(num_subbuf) != 1);
+ if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+ return -EINVAL;
ret = subbuffer_id_check_index(config, num_subbuf);
if (ret)
(config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
chanb->num_subbuf = num_subbuf;
strlcpy(chanb->name, name, NAME_MAX);
- chanb->config = config;
+ memcpy(&chanb->config, config, sizeof(chanb->config));
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
if (!zalloc_cpumask_var(&chanb->cpumask, GFP_KERNEL))
*/
void channel_backend_unregister_notifiers(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
*/
void channel_backend_free(struct channel_backend *chanb)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned int i;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
const void *src, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
int c, size_t len, ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
}
EXPORT_SYMBOL_GPL(_lib_ring_buffer_memset);
+/**
+ * lib_ring_buffer_strcpy - write string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ */
+void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
+ size_t offset, const char *src, size_t len,
+ size_t pagecpy, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int src_terminated = 0;
+
+ CHAN_WARN_ON(chanb, !len);
+ offset += pagecpy;
+ do {
+ len -= pagecpy;
+ if (!src_terminated)
+ src += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = pagecpy;
+ if (pagecpy == len)
+ to_copy--; /* Final '\0' */
+ count = lib_ring_buffer_do_strcpy(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = 1;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = pagecpy;
+ if (pagecpy == len)
+ pad_len--; /* Final '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } while (unlikely(len != pagecpy));
+ /* Ending '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+ '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy);
/**
- * lib_ring_buffer_copy_from_user - write user data to a ring_buffer buffer.
+ * lib_ring_buffer_copy_from_user_inatomic - write user data to a ring_buffer buffer.
* @bufb : buffer backend
* @offset : offset within the buffer
* @src : source address
* directly without having the src pointer checked with access_ok()
* previously.
*/
-void _lib_ring_buffer_copy_from_user(struct lib_ring_buffer_backend *bufb,
+void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
size_t offset,
const void __user *src, size_t len,
ssize_t pagecpy)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
unsigned long sb_bindex, id;
rpages = bufb->array[sb_bindex];
CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, id));
- ret = lib_ring_buffer_do_copy_from_user(rpages->p[index].virt
+ ret = lib_ring_buffer_do_copy_from_user_inatomic(rpages->p[index].virt
+ (offset & ~PAGE_MASK),
src, pagecpy) != 0;
if (ret > 0) {
}
} while (unlikely(len != pagecpy));
}
-EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user);
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_copy_from_user_inatomic);
+
+/**
+ * lib_ring_buffer_strcpy_from_user_inatomic - write userspace string data to a ring_buffer buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @src : source address
+ * @len : length to write
+ * @pagecpy : page size copied so far
+ * @pad : character to use for padding
+ *
+ * This function deals with userspace pointers, it should never be called
+ * directly without having the src pointer checked with access_ok()
+ * previously.
+ */
+void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
+ size_t offset, const char __user *src, size_t len,
+ size_t pagecpy, int pad)
+{
+ struct channel_backend *chanb = &bufb->chan->backend;
+ const struct lib_ring_buffer_config *config = &chanb->config;
+ size_t sbidx, index;
+ struct lib_ring_buffer_backend_pages *rpages;
+ unsigned long sb_bindex, id;
+ int src_terminated = 0;
+
+ offset += pagecpy;
+ do {
+ len -= pagecpy;
+ if (!src_terminated)
+ src += pagecpy;
+ sbidx = offset >> chanb->subbuf_size_order;
+ index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
+
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+
+ pagecpy = min_t(size_t, len, PAGE_SIZE - (offset & ~PAGE_MASK));
+ id = bufb->buf_wsb[sbidx].id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = bufb->array[sb_bindex];
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+
+ if (likely(!src_terminated)) {
+ size_t count, to_copy;
+
+ to_copy = pagecpy;
+ if (pagecpy == len)
+ to_copy--; /* Final '\0' */
+ count = lib_ring_buffer_do_strcpy_from_user_inatomic(config,
+ rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ src, to_copy);
+ offset += count;
+ /* Padding */
+ if (unlikely(count < to_copy)) {
+ size_t pad_len = to_copy - count;
+
+ /* Next pages will have padding */
+ src_terminated = 1;
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } else {
+ size_t pad_len;
+
+ pad_len = pagecpy;
+ if (pagecpy == len)
+ pad_len--; /* Final '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt
+ + (offset & ~PAGE_MASK),
+ pad, pad_len);
+ offset += pad_len;
+ }
+ } while (unlikely(len != pagecpy));
+ /* Ending '\0' */
+ lib_ring_buffer_do_memset(rpages->p[index].virt + (offset & ~PAGE_MASK),
+ '\0', 1);
+}
+EXPORT_SYMBOL_GPL(_lib_ring_buffer_strcpy_from_user_inatomic);
/**
* lib_ring_buffer_read - read data from ring_buffer_buffer.
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, orig_len;
struct lib_ring_buffer_backend_pages *rpages;
size_t offset, void __user *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy;
struct lib_ring_buffer_backend_pages *rpages;
* @dest : destination address
* @len : destination's length
*
- * return string's length
+ * Return string's length, or -EINVAL on error.
* Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
*/
int lib_ring_buffer_read_cstr(struct lib_ring_buffer_backend *bufb, size_t offset,
void *dest, size_t len)
{
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
size_t index;
ssize_t pagecpy, pagelen, strpagelen, orig_offset;
char *str;
offset &= chanb->buf_size - 1;
index = (offset & (chanb->subbuf_size - 1)) >> PAGE_SHIFT;
orig_offset = offset;
+ if (unlikely(!len))
+ return -EINVAL;
for (;;) {
id = bufb->buf_rsb.id;
sb_bindex = subbuffer_id_get_index(config, id);
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
*
* Return the address where a given offset is located (for read).
* Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than a
+ * page size.
*/
void *lib_ring_buffer_read_offset_address(struct lib_ring_buffer_backend *bufb,
size_t offset)
size_t index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;
size_t sbidx, index;
struct lib_ring_buffer_backend_pages *rpages;
struct channel_backend *chanb = &bufb->chan->backend;
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
unsigned long sb_bindex, id;
offset &= chanb->buf_size - 1;