* Dual LGPL v2.1/GPL v2 license.
*/
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
#include <urcu/compiler.h>
+#include <urcu/ref.h>
-#include "config.h"
+#include "smp.h"
+#include <ust/ringbuffer-config.h>
#include "backend.h"
#include "frontend.h"
-#include "iterator.h"
-#include "nohz.h"
+#include "shm.h"
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+/*
+ * Use POSIX SHM: shm_open(3) and shm_unlink(3).
+ * close(2) to close the fd returned by shm_open.
+ * shm_unlink releases the shared memory object name.
+ * ftruncate(2) sets the size of the memory object.
+ * mmap/munmap maps the shared memory obj to a virtual address in the
+ * calling proceess (should be done both in libust and consumer).
+ * See shm_overview(7) for details.
+ * Pass file descriptor returned by shm_open(3) to ltt-sessiond through
+ * a UNIX socket.
+ *
+ * Since we don't need to access the object using its name, we can
+ * immediately shm_unlink(3) it, and only keep the handle with its file
+ * descriptor.
+ */
/*
* Internal structure representing offsets to use at a sub-buffer switch.
switch_old_end:1;
};
-#ifdef CONFIG_NO_HZ
-enum tick_nohz_val {
- TICK_NOHZ_STOP,
- TICK_NOHZ_FLUSH,
- TICK_NOHZ_RESTART,
-};
-
-static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
-#endif /* CONFIG_NO_HZ */
-
-static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
-
-DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
+__thread unsigned int lib_ring_buffer_nesting;
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu);
+ struct lib_ring_buffer *buf, int cpu,
+ struct shm_handle *handle);
/*
* Must be called under cpu hotplug protection.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
+void lib_ring_buffer_free(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
+ struct channel *chan = shmp(handle, buf->backend.chan);
- lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
- kfree(buf->commit_hot);
- kfree(buf->commit_cold);
+ lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu, handle);
+ /* buf->commit_hot will be freed by shm teardown */
+ /* buf->commit_cold will be freed by shm teardown */
lib_ring_buffer_backend_free(&buf->backend);
}
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_reset(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
* Reset iterator first. It will put the subbuffer if it currently holds
* it.
*/
- lib_ring_buffer_iterator_reset(buf);
v_set(config, &buf->offset, 0);
for (i = 0; i < chan->backend.num_subbuf; i++) {
- v_set(config, &buf->commit_hot[i].cc, 0);
- v_set(config, &buf->commit_hot[i].seq, 0);
- v_set(config, &buf->commit_cold[i].cc_sb, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+ v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
}
- atomic_long_set(&buf->consumed, 0);
- atomic_set(&buf->record_disabled, 0);
+ uatomic_set(&buf->consumed, 0);
+ uatomic_set(&buf->record_disabled, 0);
v_set(config, &buf->last_tsc, 0);
- lib_ring_buffer_backend_reset(&buf->backend);
+ lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
v_set(config, &buf->records_lost_wrap, 0);
v_set(config, &buf->records_overrun, 0);
buf->finalized = 0;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
/**
* channel_reset - Reset channel to initial values.
/*
* Reset iterators first. Will put the subbuffer if held for reading.
*/
- channel_iterator_reset(chan);
- atomic_set(&chan->record_disabled, 0);
+ uatomic_set(&chan->record_disabled, 0);
/* Don't reset commit_count_mask, still valid */
channel_backend_reset(&chan->backend);
/* Don't reset switch/read timer interval */
/* Don't reset notifiers and notifier enable bits */
/* Don't reset reader reference count */
}
-EXPORT_SYMBOL_GPL(channel_reset);
/*
* Must be called under cpu hotplug protection.
*/
int lib_ring_buffer_create(struct lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu)
+ struct channel_backend *chanb, int cpu,
+ struct shm_handle *handle,
+ struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
void *priv = chanb->priv;
unsigned int num_subbuf;
if (buf->backend.allocated)
return 0;
- /*
- * Paranoia: per cpu dynamic allocation is not officially documented as
- * zeroing the memory, so let's do it here too, just in case.
- */
- memset(buf, 0, sizeof(*buf));
-
- ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+ cpu, handle, shmobj);
if (ret)
return ret;
- buf->commit_hot =
- kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
- * chan->backend.num_subbuf,
- 1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(cpu, 0)));
- if (!buf->commit_hot) {
+ align_shm(shmobj, __alignof__(struct commit_counters_hot));
+ set_shmp(buf->commit_hot,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_hot)) {
ret = -ENOMEM;
goto free_chanbuf;
}
- buf->commit_cold =
- kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
- * chan->backend.num_subbuf,
- 1 << INTERNODE_CACHE_SHIFT),
- GFP_KERNEL, cpu_to_node(max(cpu, 0)));
- if (!buf->commit_cold) {
+ align_shm(shmobj, __alignof__(struct commit_counters_cold));
+ set_shmp(buf->commit_cold,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_cold)) {
ret = -ENOMEM;
goto free_commit;
}
num_subbuf = chan->backend.num_subbuf;
- init_waitqueue_head(&buf->read_wait);
- raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
+ //init_waitqueue_head(&buf->read_wait);
/*
* Write the subbuffer header for first subbuffer so we know the total
*/
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
- subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
- tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
- config->cb.buffer_begin(buf, tsc, 0);
- v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
+ subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
+ tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
+ config->cb.buffer_begin(buf, tsc, 0, handle);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
if (config->cb.buffer_create) {
- ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
+ ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
if (ret)
goto free_init;
}
-
- /*
- * Ensure the buffer is ready before setting it to allocated and setting
- * the cpumask.
- * Used for cpu hotplug vs cpumask iteration.
- */
- smp_wmb();
buf->backend.allocated = 1;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
- chan->backend.cpumask));
- cpumask_set_cpu(cpu, chan->backend.cpumask);
- }
-
return 0;
/* Error handling */
free_init:
- kfree(buf->commit_cold);
+ /* commit_cold will be freed by shm teardown */
free_commit:
- kfree(buf->commit_hot);
+ /* commit_hot will be freed by shm teardown */
free_chanbuf:
lib_ring_buffer_backend_free(&buf->backend);
return ret;
}
+#if 0
static void switch_buffer_timer(unsigned long data)
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
*/
- if (atomic_long_read(&buf->active_readers))
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- mod_timer_pinned(&buf->switch_timer,
- jiffies + chan->switch_timer_interval);
- else
- mod_timer(&buf->switch_timer,
- jiffies + chan->switch_timer_interval);
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
+
+ //TODO timers
+ //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ // mod_timer_pinned(&buf->switch_timer,
+ // jiffies + chan->switch_timer_interval);
+ //else
+ // mod_timer(&buf->switch_timer,
+ // jiffies + chan->switch_timer_interval);
}
+#endif //0
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ //const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
- init_timer(&buf->switch_timer);
- buf->switch_timer.function = switch_buffer_timer;
- buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
- buf->switch_timer.data = (unsigned long)buf;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- add_timer_on(&buf->switch_timer, buf->backend.cpu);
- else
- add_timer(&buf->switch_timer);
+ //TODO
+ //init_timer(&buf->switch_timer);
+ //buf->switch_timer.function = switch_buffer_timer;
+ //buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
+ //buf->switch_timer.data = (unsigned long)buf;
+ //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ // add_timer_on(&buf->switch_timer, buf->backend.cpu);
+ //else
+ // add_timer(&buf->switch_timer);
buf->switch_timer_enabled = 1;
}
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
+ struct channel *chan = shmp(handle, buf->backend.chan);
if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
return;
- del_timer_sync(&buf->switch_timer);
+ //TODO
+ //del_timer_sync(&buf->switch_timer);
buf->switch_timer_enabled = 0;
}
+#if 0
/*
* Polling timer to check the channels for data.
*/
static void read_buffer_timer(unsigned long data)
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (atomic_long_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ //TODO
+ //wake_up_interruptible(&buf->read_wait);
+ //wake_up_interruptible(&chan->read_wait);
}
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- mod_timer_pinned(&buf->read_timer,
- jiffies + chan->read_timer_interval);
- else
- mod_timer(&buf->read_timer,
- jiffies + chan->read_timer_interval);
+ //TODO
+ //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ // mod_timer_pinned(&buf->read_timer,
+ // jiffies + chan->read_timer_interval);
+ //else
+ // mod_timer(&buf->read_timer,
+ // jiffies + chan->read_timer_interval);
}
+#endif //0
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
|| buf->read_timer_enabled)
return;
- init_timer(&buf->read_timer);
- buf->read_timer.function = read_buffer_timer;
- buf->read_timer.expires = jiffies + chan->read_timer_interval;
- buf->read_timer.data = (unsigned long)buf;
+ //TODO
+ //init_timer(&buf->read_timer);
+ //buf->read_timer.function = read_buffer_timer;
+ //buf->read_timer.expires = jiffies + chan->read_timer_interval;
+ //buf->read_timer.data = (unsigned long)buf;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- add_timer_on(&buf->read_timer, buf->backend.cpu);
- else
- add_timer(&buf->read_timer);
+ //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ // add_timer_on(&buf->read_timer, buf->backend.cpu);
+ //else
+ // add_timer(&buf->read_timer);
buf->read_timer_enabled = 1;
}
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
|| !buf->read_timer_enabled)
return;
- del_timer_sync(&buf->read_timer);
+ //TODO
+ //del_timer_sync(&buf->read_timer);
/*
* do one more check to catch data that has been written in the last
* timer period.
*/
- if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
+ if (lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
+ //TODO
+ //wake_up_interruptible(&buf->read_wait);
+ //wake_up_interruptible(&chan->read_wait);
}
buf->read_timer_enabled = 0;
}
-#ifdef CONFIG_HOTPLUG_CPU
-/**
- * lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- * @nb: notifier block
- * @action: hotplug action to take
- * @hcpu: CPU number
- *
- * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
- unsigned long action,
- void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
- struct channel *chan = caa_container_of(nb, struct channel,
- cpu_hp_notifier);
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
- const struct lib_ring_buffer_config *config = chan->backend.config;
-
- if (!chan->cpu_hp_enable)
- return NOTIFY_DONE;
-
- CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
- switch (action) {
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- wake_up_interruptible(&chan->hp_wait);
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
- return NOTIFY_OK;
-
- case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
- return NOTIFY_OK;
-
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- /*
- * Performing a buffer switch on a remote CPU. Performed by
- * the CPU responsible for doing the hotunplug after the target
- * CPU stopped running completely. Ensures that all data
- * from that remote CPU is flushed.
- */
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
- return NOTIFY_OK;
-
- default:
- return NOTIFY_DONE;
- }
-}
-#endif
-
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-/*
- * For per-cpu buffers, call the reader wakeups before switching the buffer, so
- * that wake-up-tracing generated events are flushed before going idle (in
- * tick_nohz). We test if the spinlock is locked to deal with the race where
- * readers try to sample the ring buffer before we perform the switch. We let
- * the readers retry in that case. If there is data in the buffer, the wake up
- * is going to forbid the CPU running the reader thread from going idle.
- */
-static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
- unsigned long val,
- void *data)
-{
- struct channel *chan = caa_container_of(nb, struct channel,
- tick_nohz_notifier);
- const struct lib_ring_buffer_config *config = chan->backend.config;
- struct lib_ring_buffer *buf;
- int cpu = smp_processor_id();
-
- if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
- /*
- * We don't support keeping the system idle with global buffers
- * and streaming active. In order to do so, we would need to
- * sample a non-nohz-cpumask racelessly with the nohz updates
- * without adding synchronization overhead to nohz. Leave this
- * use-case out for now.
- */
- return 0;
- }
-
- buf = channel_get_ring_buffer(config, chan, cpu);
- switch (val) {
- case TICK_NOHZ_FLUSH:
- raw_spin_lock(&buf->raw_tick_nohz_spinlock);
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
- && chan->read_timer_interval
- && atomic_long_read(&buf->active_readers)
- && (lib_ring_buffer_poll_deliver(config, buf, chan)
- || lib_ring_buffer_pending_data(config, buf, chan))) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
- }
- if (chan->switch_timer_interval)
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
- raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
- break;
- case TICK_NOHZ_STOP:
- spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
- spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
- break;
- case TICK_NOHZ_RESTART:
- spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
- lib_ring_buffer_start_read_timer(buf);
- lib_ring_buffer_start_switch_timer(buf);
- spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
- break;
- }
-
- return 0;
-}
-
-void notrace lib_ring_buffer_tick_nohz_flush(void)
-{
- atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
- NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_stop(void)
+static void channel_unregister_notifiers(struct channel *chan,
+ struct shm_handle *handle)
{
- atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
- NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_restart(void)
-{
- atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
- NULL);
-}
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
-/*
- * Holds CPU hotplug.
- */
-static void channel_unregister_notifiers(struct channel *chan)
-{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
- channel_iterator_unregister_notifiers(chan);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#ifdef CONFIG_NO_HZ
- /*
- * Remove the nohz notifier first, so we are certain we stop
- * the timers.
- */
- atomic_notifier_chain_unregister(&tick_nohz_notifier,
- &chan->tick_nohz_notifier);
- /*
- * ring_buffer_nohz_lock will not be needed below, because
- * we just removed the notifiers, which were the only source of
- * concurrency.
- */
-#endif /* CONFIG_NO_HZ */
-#ifdef CONFIG_HOTPLUG_CPU
- get_online_cpus();
- chan->cpu_hp_enable = 0;
- for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
- }
- put_online_cpus();
- unregister_cpu_notifier(&chan->cpu_hp_notifier);
-#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+
+ lib_ring_buffer_stop_switch_timer(buf, handle);
+ lib_ring_buffer_stop_read_timer(buf, handle);
}
-#endif
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
+ lib_ring_buffer_stop_switch_timer(buf, handle);
+ lib_ring_buffer_stop_read_timer(buf, handle);
}
- channel_backend_unregister_notifiers(&chan->backend);
+ //channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct channel *chan)
+static void channel_free(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- channel_iterator_free(chan);
- channel_backend_free(&chan->backend);
- kfree(chan);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
+ /* chan is freed by shm teardown */
+ shm_object_table_destroy(handle->table);
+ free(handle);
}
/**
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
+struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ int *shm_fd, int *wait_fd, uint64_t *memory_map_size)
{
int ret, cpu;
+ size_t shmsize;
struct channel *chan;
+ struct shm_handle *handle;
+ struct shm_object *shmobj;
+ struct shm_ref *ref;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
- if (!chan)
+ handle = zmalloc(sizeof(struct shm_handle));
+ if (!handle)
return NULL;
- ret = channel_backend_init(&chan->backend, name, config, priv,
- subbuf_size, num_subbuf);
- if (ret)
- goto error;
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+
+ /* Calculate the shm allocation layout */
+ shmsize = sizeof(struct channel);
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ shmsize += sizeof(struct lib_ring_buffer_shmp) * num_possible_cpus();
+ else
+ shmsize += sizeof(struct lib_ring_buffer_shmp);
+
+ shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto error_append;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel)));
+ assert(handle->chan._ref.index == 0);
+ assert(handle->chan._ref.offset == 0);
+ chan = shmp(handle, handle->chan);
+ if (!chan)
+ goto error_append;
- ret = channel_iterator_init(chan);
+ ret = channel_backend_init(&chan->backend, name, config, priv,
+ subbuf_size, num_subbuf, handle);
if (ret)
- goto error_free_backend;
+ goto error_backend_init;
chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
- chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
- chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
- kref_init(&chan->ref);
- init_waitqueue_head(&chan->read_wait);
- init_waitqueue_head(&chan->hp_wait);
+ //TODO
+ //chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
+ //chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
+ //TODO
+ //init_waitqueue_head(&chan->read_wait);
+ //init_waitqueue_head(&chan->hp_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
- /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
- chan->tick_nohz_notifier.notifier_call =
- ring_buffer_tick_nohz_callback;
- chan->tick_nohz_notifier.priority = ~0U;
- atomic_notifier_chain_register(&tick_nohz_notifier,
- &chan->tick_nohz_notifier);
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
/*
* In case of non-hotplug cpu, if the ring-buffer is allocated
* in early initcall, it will not be notified of secondary cpus.
* In that off case, we need to allocate for all possible cpus.
*/
-#ifdef CONFIG_HOTPLUG_CPU
- chan->cpu_hp_notifier.notifier_call =
- lib_ring_buffer_cpu_hp_callback;
- chan->cpu_hp_notifier.priority = 6;
- register_cpu_notifier(&chan->cpu_hp_notifier);
-
- get_online_cpus();
- for_each_online_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
- spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
- }
- chan->cpu_hp_enable = 1;
- put_online_cpus();
-#else
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
- spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
- spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ lib_ring_buffer_start_switch_timer(buf, handle);
+ lib_ring_buffer_start_read_timer(buf, handle);
}
-#endif
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
+ lib_ring_buffer_start_switch_timer(buf, handle);
+ lib_ring_buffer_start_read_timer(buf, handle);
}
+ ref = &handle->chan._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size);
+ return handle;
+
+error_backend_init:
+error_append:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
- return chan;
+struct shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+ uint64_t memory_map_size)
+{
+ struct shm_handle *handle;
+ struct shm_object *object;
-error_free_backend:
- channel_backend_free(&chan->backend);
-error:
- kfree(chan);
+ handle = zmalloc(sizeof(struct shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+ /* Add channel object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ goto error_table_object;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ handle->chan._ref.index = 0;
+ handle->chan._ref.offset = 0;
+ return handle;
+
+error_table_object:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
+ free(handle);
return NULL;
}
-EXPORT_SYMBOL_GPL(channel_create);
+
+int channel_handle_add_stream(struct shm_handle *handle,
+ int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+ struct shm_object *object;
+
+ /* Add stream object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ return -1;
+ return 0;
+}
static
-void channel_release(struct kref *kref)
+void channel_release(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- struct channel *chan = caa_container_of(kref, struct channel, ref);
- channel_free(chan);
+ channel_free(chan, handle, shadow);
}
/**
* @chan: channel to destroy
*
* Holds cpu hotplug.
- * Call "destroy" callback, finalize channels, wait for readers to release their
- * reference, then destroy ring buffer data. Note that when readers have
- * completed data consumption of finalized channels, get_subbuf() will return
- * -ENODATA. They should release their handle at that point.
- * Returns the private data pointer.
+ * Call "destroy" callback, finalize channels, decrement the channel
+ * reference count. Note that when readers have completed data
+ * consumption of finalized channels, get_subbuf() will return -ENODATA.
+ * They should release their handle at that point. Returns the private
+ * data pointer.
*/
-void *channel_destroy(struct channel *chan)
+void *channel_destroy(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- int cpu;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
+ int cpu;
- channel_unregister_notifiers(chan);
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return NULL;
+ }
+
+ channel_unregister_notifiers(chan, handle);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- /*
- * No need to hold cpu hotplug, because all notifiers have been
- * unregistered.
- */
for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
- cpu);
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
if (config->cb.buffer_finalize)
config->cb.buffer_finalize(buf,
chan->backend.priv,
- cpu);
+ cpu, handle);
if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
+ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
+ handle);
/*
* Perform flush before writing to finalized.
*/
- smp_wmb();
+ cmm_smp_wmb();
CMM_ACCESS_ONCE(buf->finalized) = 1;
- wake_up_interruptible(&buf->read_wait);
+ //wake_up_interruptible(&buf->read_wait);
}
} else {
- struct lib_ring_buffer *buf = chan->backend.buf;
+ struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
if (config->cb.buffer_finalize)
- config->cb.buffer_finalize(buf, chan->backend.priv, -1);
+ config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle);
if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
+ lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH,
+ handle);
/*
* Perform flush before writing to finalized.
*/
- smp_wmb();
+ cmm_smp_wmb();
CMM_ACCESS_ONCE(buf->finalized) = 1;
- wake_up_interruptible(&buf->read_wait);
+ //wake_up_interruptible(&buf->read_wait);
}
CMM_ACCESS_ONCE(chan->finalized) = 1;
- wake_up_interruptible(&chan->hp_wait);
- wake_up_interruptible(&chan->read_wait);
- kref_put(&chan->ref, channel_release);
+ //wake_up_interruptible(&chan->hp_wait);
+ //wake_up_interruptible(&chan->read_wait);
+ /*
+ * sessiond/consumer are keeping a reference on the shm file
+ * descriptor directly. No need to refcount.
+ */
priv = chan->backend.priv;
+ channel_release(chan, handle, shadow);
return priv;
}
-EXPORT_SYMBOL_GPL(channel_destroy);
struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu)
+ struct channel *chan, int cpu,
+ struct shm_handle *handle,
+ int *shm_fd, int *wait_fd,
+ uint64_t *memory_map_size)
{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
- return chan->backend.buf;
- else
- return per_cpu_ptr(chan->backend.buf, cpu);
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ ref = &chan->backend.buf[0].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
+ return shmp(handle, chan->backend.buf[0].shmp);
+ } else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
+ return shmp(handle, chan->backend.buf[cpu].shmp);
+ }
}
-EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
+ struct shm_handle *handle,
+ int shadow)
{
- struct channel *chan = buf->backend.chan;
-
- if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
+ if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
- kref_get(&chan->ref);
- smp_mb__after_atomic_inc();
+ cmm_smp_mb();
return 0;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
+ struct shm_handle *handle,
+ int shadow)
{
- struct channel *chan = buf->backend.chan;
-
- CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
- smp_mb__before_atomic_dec();
- atomic_long_dec(&buf->active_readers);
- kref_put(&chan->ref, channel_release);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
+ struct channel *chan = shmp(handle, buf->backend.chan);
-/*
- * Promote compiler barrier to a smp_mb().
- * For the specific ring buffer case, this IPI call should be removed if the
- * architecture does not reorder writes. This should eventually be provided by
- * a separate architecture-specific infrastructure.
- */
-static void remote_mb(void *info)
-{
- smp_mb();
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_readers);
}
/**
*
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
- unsigned long *consumed, unsigned long *produced)
+ unsigned long *consumed, unsigned long *produced,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
-retry:
finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
* Read finalized before counters.
*/
- smp_rmb();
- consumed_cur = atomic_long_read(&buf->consumed);
+ cmm_smp_rmb();
+ consumed_cur = uatomic_read(&buf->consumed);
/*
* No need to issue a memory barrier between consumed count read and
* write offset read, because consumed count can only change
*/
if (finalized)
return -ENODATA;
- else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
- goto retry;
else
return -EAGAIN;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
/**
* lib_ring_buffer_put_snapshot - move consumed counter forward
* @consumed_new: new consumed count value
*/
void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
- unsigned long consumed_new)
+ unsigned long consumed_new,
+ struct shm_handle *handle)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
+ struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
* If the consumed cmpxchg fails, this is because we have been pushed by
* the writer in flight recorder mode.
*/
- consumed = atomic_long_read(&buf->consumed);
+ consumed = uatomic_read(&buf->consumed);
while ((long) consumed - (long) consumed_new < 0)
- consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
- consumed_new);
+ consumed = uatomic_cmpxchg(&buf->consumed, consumed,
+ consumed_new);
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
/**
* lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
*
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
*/
int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
- unsigned long consumed)
+ unsigned long consumed,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
/*
* Read finalized before counters.
*/
- smp_rmb();
- consumed_cur = atomic_long_read(&buf->consumed);
+ cmm_smp_rmb();
+ consumed_cur = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed, chan);
- commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* Make sure we read the commit count before reading the buffer
* data and the write offset. Correct consumed offset ordering
* wrt commit count is insured by the use of cmpxchg to update
* the consumed offset.
- * smp_call_function_single can fail if the remote CPU is offline,
- * this is OK because then there is no wmb to execute there.
- * If our thread is executing on the same CPU as the on the buffers
- * belongs to, we don't have to synchronize it at all. If we are
- * migrated, the scheduler will take care of the memory barriers.
- * Normally, smp_call_function_single() should ensure program order when
- * executing the remote function, which implies that it surrounds the
- * function execution with :
- * smp_mb()
- * send IPI
- * csd_lock_wait
- * recv IPI
- * smp_mb()
- * exec. function
- * smp_mb()
- * csd unlock
- * smp_mb()
- *
- * However, smp_call_function_single() does not seem to clearly execute
- * such barriers. It depends on spinlock semantic to provide the barrier
- * before executing the IPI and, when busy-looping, csd_lock_wait only
- * executes smp_mb() when it has to wait for the other CPU.
- *
- * I don't trust this code. Therefore, let's add the smp_mb() sequence
- * required ourself, even if duplicated. It has no performance impact
- * anyway.
- *
- * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
- * read and write vs write. They do not ensure core synchronization. We
- * really have to ensure total order between the 3 barriers running on
- * the 2 CPUs.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- if (config->sync == RING_BUFFER_SYNC_PER_CPU
- && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- if (raw_smp_processor_id() != buf->backend.cpu) {
- /* Total order with IPI handler smp_mb() */
- smp_mb();
- smp_call_function_single(buf->backend.cpu,
- remote_mb, NULL, 1);
- /* Total order with IPI handler smp_mb() */
- smp_mb();
- }
- } else {
- /* Total order with IPI handler smp_mb() */
- smp_mb();
- smp_call_function(remote_mb, NULL, 1);
- /* Total order with IPI handler smp_mb() */
- smp_mb();
- }
- } else {
- /*
- * Local rmb to match the remote wmb to read the commit count
- * before the buffer data and the write offset.
- */
- smp_rmb();
- }
+ /*
+ * Local rmb to match the remote wmb to read the commit count
+ * before the buffer data and the write offset.
+ */
+ cmm_smp_rmb();
write_offset = v_read(config, &buf->offset);
* looking for matches the one contained in the subbuffer id.
*/
ret = update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan));
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
if (ret)
goto retry;
subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
*/
if (finalized)
return -ENODATA;
- else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
- goto retry;
else
return -EAGAIN;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
/**
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf,
+ struct shm_handle *handle)
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = bufb->chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, bufb->chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*
*/
read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
v_add(config, v_read(config,
- &bufb->array[read_sb_bindex]->records_unread),
+ &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
&bufb->records_read);
- v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
*/
consumed_idx = subbuf_index(consumed, chan);
update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan));
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
/*
* update_read_sb_index return value ignored. Don't exchange sub-buffer
* if the writer concurrently updated it.
*/
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
/*
* cons_offset is an iterator on all subbuffer offsets between the reader
void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long cons_offset,
- int cpu)
+ int cpu,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
- commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
- commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+ commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
- printk(KERN_WARNING
- "ring buffer %s, cpu %d: "
+ ERRMSG("ring buffer %s, cpu %d: "
"commit count in subbuffer %lu,\n"
"expecting multiples of %lu bytes\n"
" [ %lu bytes committed, %lu bytes reader-visible ]\n",
chan->backend.subbuf_size,
commit_count, commit_count_sb);
- printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
+ ERRMSG("ring buffer: %s, cpu %d: %lu bytes committed\n",
chan->backend.name, cpu, commit_count);
}
static
void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
struct channel *chan,
- void *priv, int cpu)
+ void *priv, int cpu,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
* references are left.
*/
write_offset = v_read(config, &buf->offset);
- cons_offset = atomic_long_read(&buf->consumed);
+ cons_offset = uatomic_read(&buf->consumed);
if (write_offset != cons_offset)
- printk(KERN_WARNING
- "ring buffer %s, cpu %d: "
+ ERRMSG("ring buffer %s, cpu %d: "
"non-consumed data\n"
" [ %lu bytes written, %lu bytes read ]\n",
chan->backend.name, cpu, write_offset, cons_offset);
- for (cons_offset = atomic_long_read(&buf->consumed);
+ for (cons_offset = uatomic_read(&buf->consumed);
(long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
chan)
- cons_offset) > 0;
cons_offset = subbuf_align(cons_offset, chan))
lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
- cpu);
+ cpu, handle);
}
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu)
+ struct lib_ring_buffer *buf, int cpu,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
- printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
+ ERRMSG("ring buffer %s, cpu %d: %lu records written, "
"%lu records overrun\n",
chan->backend.name, cpu,
v_read(config, &buf->records_count),
if (v_read(config, &buf->records_lost_full)
|| v_read(config, &buf->records_lost_wrap)
|| v_read(config, &buf->records_lost_big))
- printk(KERN_WARNING
- "ring buffer %s, cpu %d: records were lost. Caused by:\n"
+ ERRMSG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
" [ %lu buffer full, %lu nest buffer wrap-around, "
"%lu event too big ]\n",
chan->backend.name, cpu,
v_read(config, &buf->records_lost_wrap),
v_read(config, &buf->records_lost_big));
- lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
+ lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle);
}
/*
void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ u64 tsc,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
- config->cb.buffer_begin(buf, tsc, oldidx);
+ config->cb.buffer_begin(buf, tsc, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
+ cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &buf->commit_hot[oldidx].cc);
- commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
+ &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx);
+ commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
- config->cb.subbuffer_header_size());
+ config->cb.subbuffer_header_size(),
+ handle);
}
/*
void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ u64 tsc,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
data_size = subbuf_offset(offsets->old - 1, chan) + 1;
padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
+ subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
+ handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
- v_add(config, padding_size, &buf->commit_hot[oldidx].cc);
- commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
+ cmm_smp_wmb();
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx);
+ commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
- padding_size);
+ padding_size, handle);
}
/*
void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ u64 tsc,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
- config->cb.buffer_begin(buf, tsc, beginidx);
+ config->cb.buffer_begin(buf, tsc, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
+ cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &buf->commit_hot[beginidx].cc);
- commit_count = v_read(config, &buf->commit_hot[beginidx].cc);
+ &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx);
+ commit_count, beginidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
offsets->begin, commit_count,
- config->cb.subbuffer_header_size());
+ config->cb.subbuffer_header_size(),
+ handle);
}
/*
*/
static
void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
- struct switch_offsets *offsets,
- u64 tsc)
+ struct channel *chan,
+ struct switch_offsets *offsets,
+ u64 tsc,
+ struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
data_size = subbuf_offset(offsets->end - 1, chan) + 1;
padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+ subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
+ handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
- v_add(config, padding_size, &buf->commit_hot[endidx].cc);
- commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+ cmm_smp_wmb();
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
- commit_count, endidx);
+ commit_count, endidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
offsets->end, commit_count,
- padding_size);
+ padding_size, handle);
}
/*
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
* The next record that reserves space will be responsible for
* populating the following subbuffer header. We choose not to populate
* the next subbuffer header here because we want to be able to use
- * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
- * buffer flush, which must guarantee that all the buffer content
- * (records and header timestamps) are visible to the reader. This is
- * required for quiescence guarantees for the fusion merge.
+ * SWITCH_ACTIVE for periodical buffer flush, which must
+ * guarantee that all the buffer content (records and header
+ * timestamps) are visible to the reader. This is required for
+ * quiescence guarantees for the fusion merge.
*/
if (mode == SWITCH_FLUSH || off > 0) {
if (unlikely(off == 0)) {
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode,
+ struct shm_handle *handle)
{
- struct channel *chan = buf->backend.chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
oldidx = subbuf_index(offsets.old, chan);
- lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
+ lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
/*
* May need to populate header start on SWITCH_FLUSH.
*/
if (offsets.switch_old_start) {
- lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
offsets.old += config->cb.subbuffer_header_size();
}
/*
* Switch old subbuffer.
*/
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
/*
* Returns :
struct switch_offsets *offsets,
struct lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
+ struct shm_handle *handle = ctx->handle;
unsigned long reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
- ((unsigned long) v_read(config,
- &buf->commit_cold[sb_index].cc_sb)
+ &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
if (likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
subbuf_trunc(offsets->begin, chan)
- subbuf_trunc((unsigned long)
- atomic_long_read(&buf->consumed), chan)
+ uatomic_read(&buf->consumed), chan)
>= chan->backend.buf_size)) {
/*
* We do not overwrite non consumed buffers
int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct shm_handle *handle = ctx->handle;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+ buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
- buf = chan->backend.buf;
+ buf = shmp(handle, chan->backend.buf[0].shmp);
ctx->buf = buf;
offsets.size = 0;
* Clear noref flag for this subbuffer.
*/
lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.end - 1, chan));
+ subbuf_index(offsets.end - 1, chan),
+ handle);
/*
* Switch old subbuffer if needed.
*/
if (unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.old - 1, chan));
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+ subbuf_index(offsets.old - 1, chan),
+ handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
}
/*
* Populate new subbuffer.
*/
if (unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
if (unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
ctx->slot_size = offsets.size;
ctx->pre_offset = offsets.begin;
ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
return 0;
}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);