* Dual LGPL v2.1/GPL v2 license.
*/
-#include "config.h"
+#include <unistd.h>
+#include <urcu/compiler.h>
+
+#include <ust/ringbuffer-config.h>
#include "backend_types.h"
#include "frontend_types.h"
+#include "shm.h"
/* Ring buffer backend API presented to the frontend */
/* Ring buffer and channel backend create/free */
int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
- struct channel_backend *chan, int cpu);
+ struct channel_backend *chan, int cpu,
+ struct shm_handle *handle,
+ struct shm_object *shmobj);
void channel_backend_unregister_notifiers(struct channel_backend *chanb);
void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
int channel_backend_init(struct channel_backend *chanb,
const char *name,
const struct lib_ring_buffer_config *config,
void *priv, size_t subbuf_size,
- size_t num_subbuf);
-void channel_backend_free(struct channel_backend *chanb);
+ size_t num_subbuf, struct shm_handle *handle);
+void channel_backend_free(struct channel_backend *chanb,
+ struct shm_handle *handle);
-void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
+void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb,
+ struct shm_handle *handle);
void channel_backend_reset(struct channel_backend *chanb);
int lib_ring_buffer_backend_init(void);
* sampling and subbuffer ID exchange).
*/
-#define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
+#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
tmp |= offset << SB_ID_OFFSET_SHIFT;
tmp |= SB_ID_NOREF_MASK;
/* Volatile store, read concurrently by readers. */
- ACCESS_ONCE(*id) = tmp;
+ CMM_ACCESS_ONCE(*id) = tmp;
}
}
static inline
void subbuffer_count_record(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
+ unsigned long idx, struct shm_handle *handle)
{
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
- v_inc(config, &bufb->array[sb_bindex]->records_commit);
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
}
/*
*/
static inline
void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb)
+ struct lib_ring_buffer_backend *bufb,
+ struct shm_handle *handle)
{
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- CHAN_WARN_ON(bufb->chan,
- !v_read(config, &bufb->array[sb_bindex]->records_unread));
+ CHAN_WARN_ON(shmp(handle, bufb->chan),
+ !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
/* Non-atomic decrement protected by exclusive subbuffer access */
- _v_dec(config, &bufb->array[sb_bindex]->records_unread);
+ _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
v_inc(config, &bufb->records_read);
}
unsigned long subbuffer_get_records_count(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
+ unsigned long idx,
+ struct shm_handle *handle)
{
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
- return v_read(config, &bufb->array[sb_bindex]->records_commit);
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
}
/*
unsigned long subbuffer_count_records_overrun(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
+ unsigned long idx,
+ struct shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long overruns, sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
- pages = bufb->array[sb_bindex];
- overruns = v_read(config, &pages->records_unread);
- v_set(config, &pages->records_unread,
- v_read(config, &pages->records_commit));
- v_set(config, &pages->records_commit, 0);
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
+ overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
+ v_set(config, &shmp(handle, pages->shmp)->records_unread,
+ v_read(config, &shmp(handle, pages->shmp)->records_commit));
+ v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
return overruns;
}
void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
unsigned long idx,
- unsigned long data_size)
+ unsigned long data_size,
+ struct shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
- pages = bufb->array[sb_bindex];
- pages->data_size = data_size;
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
+ shmp(handle, pages->shmp)->data_size = data_size;
}
static inline
unsigned long subbuffer_get_read_data_size(
const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_backend *bufb)
+ struct lib_ring_buffer_backend *bufb,
+ struct shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- pages = bufb->array[sb_bindex];
- return pages->data_size;
+ pages = shmp_index(handle, bufb->array, sb_bindex);
+ return shmp(handle, pages->shmp)->data_size;
}
static inline
unsigned long subbuffer_get_data_size(
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
+ unsigned long idx,
+ struct shm_handle *handle)
{
- struct lib_ring_buffer_backend_pages *pages;
+ struct lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
- pages = bufb->array[sb_bindex];
- return pages->data_size;
+ sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
+ pages = shmp_index(handle, bufb->array, sb_bindex);
+ return shmp(handle, pages->shmp)->data_size;
}
/**
static inline
void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx)
+ unsigned long idx,
+ struct shm_handle *handle)
{
unsigned long id, new_id;
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
+ id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
for (;;) {
/* This check is called on the fast path for each record. */
if (likely(!subbuffer_id_is_noref(config, id))) {
}
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
- new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
+ new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
if (likely(new_id == id))
break;
id = new_id;
static inline
void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_backend *bufb,
- unsigned long idx, unsigned long offset)
+ unsigned long idx, unsigned long offset,
+ struct shm_handle *handle)
{
if (config->mode != RING_BUFFER_OVERWRITE)
return;
* subbuffer_set_noref() uses a volatile store to deal with concurrent
* readers of the noref flag.
*/
- CHAN_WARN_ON(bufb->chan,
- subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
+ CHAN_WARN_ON(shmp(handle, bufb->chan),
+ subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
/*
* Memory barrier that ensures counter stores are ordered before set
* noref and offset.
*/
- smp_mb();
- subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
+ cmm_smp_mb();
+ subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
}
/**
struct lib_ring_buffer_backend *bufb,
struct channel_backend *chanb,
unsigned long consumed_idx,
- unsigned long consumed_count)
+ unsigned long consumed_count,
+ struct shm_handle *handle)
{
unsigned long old_id, new_id;
if (config->mode == RING_BUFFER_OVERWRITE) {
/*
* Exchange the target writer subbuffer with our own unused
- * subbuffer. No need to use ACCESS_ONCE() here to read the
+ * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
- old_id = bufb->buf_wsb[consumed_idx].id;
+ old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
if (unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
if (unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
- CHAN_WARN_ON(bufb->chan,
+ CHAN_WARN_ON(shmp(handle, bufb->chan),
!subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
consumed_count);
- new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
+ new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
bufb->buf_rsb.id);
if (unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
/* No page exchange, use the writer page directly */
- bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
+ bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
}
return 0;
}