*/
#include "frontend.h"
+#include <urcu-bp.h>
+#include <urcu/compiler.h>
/**
* lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
*
- * Disables preemption (acts as a RCU read-side critical section) and keeps a
- * ring buffer nesting count as supplementary safety net to ensure tracer client
- * code will never trigger an endless recursion. Returns the processor ID on
- * success, -EPERM on failure (nesting count too high).
+ * Grabs RCU read-side lock and keeps a ring buffer nesting count as
+ * supplementary safety net to ensure tracer client code will never
+ * trigger an endless recursion. Returns the processor ID on success,
+ * -EPERM on failure (nesting count too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
+int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
int cpu, nesting;
- rcu_read_lock_sched_notrace();
- cpu = smp_processor_id();
+ rcu_read_lock();
+ cpu = lttng_ust_get_cpu();
nesting = ++lib_ring_buffer_nesting; /* TLS */
- barrier();
+ cmm_barrier();
- if (unlikely(nesting > 4)) {
+ if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock_sched_notrace();
+ rcu_read_unlock();
return -EPERM;
} else
return cpu;
* lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
*/
static inline
-void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
+void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
{
- barrier();
+ cmm_barrier();
lib_ring_buffer_nesting--; /* TLS */
- rcu_read_unlock_sched_notrace();
+ rcu_read_unlock();
}
/*
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
* commit counter to increment it and commit seq value to compare it to
* the commit counter.
*/
- prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
+ //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
+ /*
+ * Because we don't use any timer in the application, we
+ * currently cannot guarantee that we have frequent
+ * events that let us detect 27-bit overflows.
+ * Therefore, for now, we force event headers
+ * to contain 64-bit timestamps.
+ */
+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+#if 0
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+#endif //0
- if (unlikely(subbuf_offset(*o_begin, chan) == 0))
+ if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
- if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
+ if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
> chan->backend.subbuf_size))
return 1;
*/
*o_end = *o_begin + ctx->slot_size;
- if (unlikely((subbuf_offset(*o_end, chan)) == 0))
+ if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
*/
static inline
-int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
- if (atomic_read(&chan->record_disabled))
+ if (uatomic_read(&chan->record_disabled))
return -EAGAIN;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+ buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
- buf = chan->backend.buf;
- if (atomic_read(&buf->record_disabled))
+ buf = shmp(handle, chan->backend.buf[0].shmp);
+ if (uatomic_read(&buf->record_disabled))
return -EAGAIN;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
- if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
- if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
+ if (caa_unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
!= o_old))
goto slow_path;
* Clear noref flag for this subbuffer.
*/
lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
- subbuf_index(o_end - 1, chan));
+ subbuf_index(o_end - 1, chan), handle);
ctx->pre_offset = o_begin;
ctx->buf_offset = o_begin + before_hdr_pad;
* disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
*/
static inline
-void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
{
- lib_ring_buffer_switch_slow(buf, mode);
+ lib_ring_buffer_switch_slow(buf, mode, handle);
}
/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, &buf->backend, endidx);
+ subbuffer_count_record(config, &buf->backend, endidx, handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
+ cmm_smp_wmb();
- v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
+ v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx);
+ commit_count, endidx, handle);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
ctx->buf_offset, commit_count,
- ctx->slot_size);
+ ctx->slot_size, handle);
}
/**
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
- const struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct lib_ring_buffer *buf = ctx->buf;
+ struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
/*
*/
save_last_tsc(config, buf, 0ULL);
- if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
+ if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
!= end_offset))
return -EPERM;
else
}
static inline
-void channel_record_disable(const struct lib_ring_buffer_config *config,
+void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
- atomic_inc(&chan->record_disabled);
+ uatomic_inc(&chan->record_disabled);
}
static inline
-void channel_record_enable(const struct lib_ring_buffer_config *config,
+void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan)
{
- atomic_dec(&chan->record_disabled);
+ uatomic_dec(&chan->record_disabled);
}
static inline
-void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
- atomic_inc(&buf->record_disabled);
+ uatomic_inc(&buf->record_disabled);
}
static inline
-void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf)
+void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
{
- atomic_dec(&buf->record_disabled);
+ uatomic_dec(&buf->record_disabled);
}
#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */