X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=8a32006139afed25204c1594408ecaae54b3f46d;hb=b4051ad8c170901d5297e1b3005b24e63cb0ab1e;hp=a2a9af39aea777998c5d96cdde5f632f500c8cf7;hpb=8c90a710949a214d87084ff67fc85f284bc93ef2;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index a2a9af39..8a320061 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -30,17 +30,20 @@ * See frontend.h for channel allocation and read-side API. */ -#include "frontend.h" +#include + #include #include +#include "frontend.h" + /** * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. * - * Grabs RCU read-side lock and keeps a ring buffer nesting count as - * supplementary safety net to ensure tracer client code will never - * trigger an endless recursion. Returns the processor ID on success, - * -EPERM on failure (nesting count too high). + * Keeps a ring buffer nesting count as supplementary safety net to + * ensure tracer client code will never trigger an endless recursion. + * Returns the processor ID on success, -EPERM on failure (nesting count + * too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -53,7 +56,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi { int cpu, nesting; - rcu_read_lock(); cpu = lttng_ust_get_cpu(); nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); @@ -61,7 +63,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); URCU_TLS(lib_ring_buffer_nesting)--; - rcu_read_unlock(); return -EPERM; } else return cpu; @@ -75,7 +76,6 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf { cmm_barrier(); URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ - rcu_read_unlock(); } /* @@ -87,6 +87,7 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf static inline int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { @@ -113,7 +114,7 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c return 1; ctx->slot_size = record_header_size(config, chan, *o_begin, - before_hdr_pad, ctx); + before_hdr_pad, ctx, client_ctx); ctx->slot_size += lib_ring_buffer_align(*o_begin + ctx->slot_size, ctx->largest_align) + ctx->data_size; @@ -155,7 +156,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c static inline int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer_ctx *ctx) + struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx) { struct channel *chan = ctx->chan; struct lttng_ust_shm_handle *handle = ctx->handle; @@ -163,21 +165,23 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; - if (uatomic_read(&chan->record_disabled)) + if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); - if (uatomic_read(&buf->record_disabled)) + if (caa_unlikely(!buf)) + return -EIO; + if (caa_unlikely(uatomic_read(&buf->record_disabled))) return -EAGAIN; ctx->buf = buf; /* * Perform retryable operations. */ - if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin, + if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin, &o_end, &o_old, &before_hdr_pad))) goto slow_path; @@ -208,7 +212,7 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi ctx->buf_offset = o_begin + before_hdr_pad; return 0; slow_path: - return lib_ring_buffer_reserve_slow(ctx); + return lib_ring_buffer_reserve_slow(ctx, client_ctx); } /** @@ -253,11 +257,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long offset_end = ctx->buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; + struct commit_counters_hot *cc_hot = shmp_index(handle, + buf->commit_hot, endidx); + + if (caa_unlikely(!cc_hot)) + return; /* * Must count record before incrementing the commit count. */ - subbuffer_count_record(config, &buf->backend, endidx, handle); + subbuffer_count_record(config, ctx, &buf->backend, endidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -265,7 +274,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi */ cmm_smp_wmb(); - v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); + v_add(config, ctx->slot_size, &cc_hot->cc); /* * commit count read can race with concurrent OOO commit count updates. @@ -285,17 +294,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi * count reaches back the reserve offset for a specific sub-buffer, * which is completely independent of the order. */ - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); + commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, - commit_count, endidx, handle); + commit_count, endidx, handle, ctx->tsc); /* * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. */ - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - ctx->buf_offset, commit_count, - ctx->slot_size, handle); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offset_end, commit_count, handle, cc_hot); } /**