const char *name,
const struct lttng_ust_lib_ring_buffer_config *config,
size_t subbuf_size,
- size_t num_subbuf, struct lttng_ust_shm_handle *handle);
+ size_t num_subbuf, struct lttng_ust_shm_handle *handle,
+ const int *stream_fds);
void channel_backend_free(struct channel_backend *chanb,
struct lttng_ust_shm_handle *handle);
return 0;
}
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static inline
void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_ust_lib_ring_buffer_backend *bufb,
unsigned long idx, struct lttng_ust_shm_handle *handle)
{
- unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ v_inc(config, &backend_pages->records_commit);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
/*
* Reader has exclusive subbuffer access for record consumption. No need to
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
+ struct channel *chan;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread));
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return;
+ CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
/* Non-atomic decrement protected by exclusive subbuffer access */
- _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread);
+ _v_dec(config, &backend_pages->records_unread);
v_inc(config, &bufb->records_read);
}
struct lttng_ust_shm_handle *handle)
{
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return v_read(config, &backend_pages->records_commit);
}
/*
* Must be executed at subbuffer delivery when the writer has _exclusive_
- * subbuffer access. See ring_buffer_check_deliver() for details.
- * ring_buffer_get_records_count() must be called to get the records count
- * before this function, because it resets the records_commit count.
+ * subbuffer access. See lib_ring_buffer_check_deliver() for details.
+ * lib_ring_buffer_get_records_count() must be called to get the records
+ * count before this function, because it resets the records_commit
+ * count.
*/
static inline
unsigned long subbuffer_count_records_overrun(
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long overruns, sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread);
- v_set(config, &shmp(handle, pages->shmp)->records_unread,
- v_read(config, &shmp(handle, pages->shmp)->records_commit));
- v_set(config, &shmp(handle, pages->shmp)->records_commit, 0);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ overruns = v_read(config, &backend_pages->records_unread);
+ v_set(config, &backend_pages->records_unread,
+ v_read(config, &backend_pages->records_commit));
+ v_set(config, &backend_pages->records_commit, 0);
return overruns;
}
unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- shmp(handle, pages->shmp)->data_size = data_size;
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return;
+ backend_pages->data_size = data_size;
}
static inline
struct lttng_ust_lib_ring_buffer_backend *bufb,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- return shmp(handle, pages->shmp)->data_size;
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return 0;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
}
static inline
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages;
unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
+}
- sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id);
- pages = shmp_index(handle, bufb->array, sb_bindex);
- return shmp(handle, pages->shmp)->data_size;
+static inline
+void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_counts *counts;
+
+ counts = shmp_index(handle, bufb->buf_cnt, idx);
+ if (!counts)
+ return;
+ counts->seq_cnt++;
}
/**
struct lttng_ust_shm_handle *handle)
{
unsigned long id, new_id;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
if (config->mode != RING_BUFFER_OVERWRITE)
return;
* Performing a volatile access to read the sb_pages, because we want to
* read a coherent version of the pointer and the associated noref flag.
*/
- id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id);
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ id = CMM_ACCESS_ONCE(wsb->id);
for (;;) {
/* This check is called on the fast path for each record. */
if (caa_likely(!subbuffer_id_is_noref(config, id))) {
}
new_id = id;
subbuffer_id_clear_noref(config, &new_id);
- new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id);
+ new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
if (caa_likely(new_id == id))
break;
id = new_id;
unsigned long idx, unsigned long offset,
struct lttng_ust_shm_handle *handle)
{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct channel *chan;
+
if (config->mode != RING_BUFFER_OVERWRITE)
return;
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
/*
* Because ring_buffer_set_noref() is only called by a single thread
* (the one which updated the cc_sb value), there are no concurrent
* subbuffer_set_noref() uses a volatile store to deal with concurrent
* readers of the noref flag.
*/
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id));
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
/*
* Memory barrier that ensures counter stores are ordered before set
* noref and offset.
*/
cmm_smp_mb();
- subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset);
+ subbuffer_id_set_noref_offset(config, &wsb->id, offset);
}
/**
unsigned long consumed_count,
struct lttng_ust_shm_handle *handle)
{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
unsigned long old_id, new_id;
+ wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
+ if (caa_unlikely(!wsb))
+ return -EPERM;
+
if (config->mode == RING_BUFFER_OVERWRITE) {
+ struct channel *chan;
+
/*
* Exchange the target writer subbuffer with our own unused
* subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
* old_wpage, because the value read will be confirmed by the
* following cmpxchg().
*/
- old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+ old_id = wsb->id;
if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
return -EAGAIN;
/*
if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
consumed_count)))
return -EAGAIN;
- CHAN_WARN_ON(shmp(handle, bufb->chan),
- !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+ chan = shmp(handle, bufb->chan);
+ if (caa_unlikely(!chan))
+ return -EPERM;
+ CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
consumed_count);
- new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id,
- bufb->buf_rsb.id);
+ new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
if (caa_unlikely(old_id != new_id))
return -EAGAIN;
bufb->buf_rsb.id = new_id;
} else {
/* No page exchange, use the writer page directly */
- bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id;
+ bufb->buf_rsb.id = wsb->id;
}
return 0;
}
inline_memcpy(dest, src, __len); \
} while (0)
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
+{
+ unsigned long i;
+
+ for (i = 0; i < len; i++)
+ dest[i] = c;
+}
+
/* arch-agnostic implementation */
-static inline int fls(unsigned int x)
+static inline int lttng_ust_fls(unsigned int x)
{
int r = 32;
r -= 2;
}
if (!(x & 0x80000000U)) {
- x <<= 1;
+ /* No need to bit shift on last operation */
r -= 1;
}
return r;
{
int order;
- order = fls(count) - 1;
+ order = lttng_ust_fls(count) - 1;
if (count & (count - 1))
order++;
return order;