struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
*/
v_set(config, &buf->offset, 0);
for (i = 0; i < chan->backend.num_subbuf; i++) {
- v_set(config, &shmp(handle, buf->commit_hot)[i].cc, 0);
- v_set(config, &shmp(handle, buf->commit_hot)[i].seq, 0);
- v_set(config, &shmp(handle, buf->commit_cold)[i].cc_sb, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+ v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
struct shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
void *priv = chanb->priv;
unsigned int num_subbuf;
*/
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
- subbuffer_id_clear_noref(config, &shmp(handle, buf->backend.buf_wsb)[0].id);
+ subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
config->cb.buffer_begin(buf, tsc, 0, handle);
- v_add(config, subbuf_header_size, &shmp(handle, buf->commit_hot)[0].cc);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
*/
- if (uatomic_read(&buf->active_readers))
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ //const struct lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
{
struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (uatomic_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
static void channel_unregister_notifiers(struct channel *chan,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
//channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct channel *chan, struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- int ret;
-
- channel_backend_free(&chan->backend, handle);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
shm_object_table_destroy(handle->table);
free(handle);
const char *name, void *priv, void *buf_addr,
size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ int *shm_fd, int *wait_fd, uint64_t *memory_map_size)
{
int ret, cpu;
size_t shmsize;
struct channel *chan;
struct shm_handle *handle;
struct shm_object *shmobj;
+ struct shm_ref *ref;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
shmsize += sizeof(struct lib_ring_buffer_shmp);
shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto error_append;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel)));
+ assert(handle->chan._ref.index == 0);
+ assert(handle->chan._ref.offset == 0);
chan = shmp(handle, handle->chan);
if (!chan)
goto error_append;
lib_ring_buffer_start_switch_timer(buf, handle);
lib_ring_buffer_start_read_timer(buf, handle);
}
-
+ ref = &handle->chan._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size);
return handle;
error_backend_init:
return NULL;
}
+struct shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+ uint64_t memory_map_size)
+{
+ struct shm_handle *handle;
+ struct shm_object *object;
+
+ handle = zmalloc(sizeof(struct shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+ /* Add channel object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ goto error_table_object;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ handle->chan._ref.index = 0;
+ handle->chan._ref.offset = 0;
+ return handle;
+
+error_table_object:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
+
+int channel_handle_add_stream(struct shm_handle *handle,
+ int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+ struct shm_object *object;
+
+ /* Add stream object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ return -1;
+ return 0;
+}
+
static
-void channel_release(struct channel *chan, struct shm_handle *handle)
+void channel_release(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- channel_free(chan, handle);
+ channel_free(chan, handle, shadow);
}
/**
* They should release their handle at that point. Returns the private
* data pointer.
*/
-void *channel_destroy(struct channel *chan, struct shm_handle *handle)
+void *channel_destroy(struct channel *chan, struct shm_handle *handle,
+ int shadow)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv;
int cpu;
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return NULL;
+ }
+
channel_unregister_notifiers(chan, handle);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
* sessiond/consumer are keeping a reference on the shm file
* descriptor directly. No need to refcount.
*/
- channel_release(chan, handle);
priv = chan->backend.priv;
+ channel_release(chan, handle, shadow);
return priv;
}
struct lib_ring_buffer *channel_get_ring_buffer(
const struct lib_ring_buffer_config *config,
struct channel *chan, int cpu,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int *shm_fd, int *wait_fd,
+ uint64_t *memory_map_size)
{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ ref = &chan->backend.buf[0].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
return shmp(handle, chan->backend.buf[0].shmp);
- else
+ } else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
return shmp(handle, chan->backend.buf[cpu].shmp);
+ }
}
int lib_ring_buffer_open_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
-
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
}
void lib_ring_buffer_release_read(struct lib_ring_buffer *buf,
- struct shm_handle *handle)
+ struct shm_handle *handle,
+ int shadow)
{
struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
cmm_smp_rmb();
consumed_cur = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* Make sure we read the commit count before reading the buffer
* data and the write offset. Correct consumed offset ordering
{
struct lib_ring_buffer_backend *bufb = &buf->backend;
struct channel *chan = shmp(handle, bufb->chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*
*/
read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
v_add(config, v_read(config,
- &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread),
+ &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
&bufb->records_read);
- v_set(config, &shmp(handle, shmp(handle, bufb->array)[read_sb_bindex].shmp)->records_unread, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
int cpu,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[cons_idx].cc);
- commit_count_sb = v_read(config, &shmp(handle, buf->commit_cold)[cons_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+ commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
ERRMSG("ring buffer %s, cpu %d: "
void *priv, int cpu,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
struct lib_ring_buffer *buf, int cpu,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
void *priv = chan->backend.priv;
ERRMSG("ring buffer %s, cpu %d: %lu records written, "
u64 tsc,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(handle, buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+ &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
commit_count, oldidx, handle);
u64 tsc,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(handle, buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[oldidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
u64 tsc,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(handle, buf->commit_hot)[beginidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[beginidx].cc);
+ &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
commit_count, beginidx, handle);
u64 tsc,
struct shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(handle, buf->commit_hot)[endidx].cc);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
commit_count, endidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
struct switch_offsets *offsets,
u64 *tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
struct shm_handle *handle)
{
struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
u64 tsc;
struct switch_offsets *offsets,
struct lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct shm_handle *handle = ctx->handle;
unsigned long reserve_commit_diff;
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
- ((unsigned long) v_read(config,
- &shmp(handle, buf->commit_cold)[sb_index].cc_sb)
+ &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
if (likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
{
struct channel *chan = ctx->chan;
struct shm_handle *handle = ctx->handle;
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lib_ring_buffer_config *config = &chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;