X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=140159739feebc954f68d39b33806e3778f4ca01;hb=15500a1bb134d6bbd409da5568308c2a41291928;hp=6a06bba2c2393ba139bc506a8f88bb4027b59ea9;hpb=9501d22f55b3a23f382f631c99b2325d1419b5da;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index 6a06bba2..14015973 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -37,10 +37,10 @@ /** * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. * - * Grabs RCU read-side lock and keeps a ring buffer nesting count as - * supplementary safety net to ensure tracer client code will never - * trigger an endless recursion. Returns the processor ID on success, - * -EPERM on failure (nesting count too high). + * Keeps a ring buffer nesting count as supplementary safety net to + * ensure tracer client code will never trigger an endless recursion. + * Returns the processor ID on success, -EPERM on failure (nesting count + * too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -53,15 +53,13 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi { int cpu, nesting; - rcu_read_lock(); cpu = lttng_ust_get_cpu(); - nesting = ++lib_ring_buffer_nesting; /* TLS */ + nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); - lib_ring_buffer_nesting--; /* TLS */ - rcu_read_unlock(); + URCU_TLS(lib_ring_buffer_nesting)--; return -EPERM; } else return cpu; @@ -74,8 +72,7 @@ static inline void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config) { cmm_barrier(); - lib_ring_buffer_nesting--; /* TLS */ - rcu_read_unlock(); + URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ } /* @@ -163,14 +160,16 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; - if (uatomic_read(&chan->record_disabled)) + if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); - if (uatomic_read(&buf->record_disabled)) + if (caa_unlikely(!buf)) + return -EIO; + if (caa_unlikely(uatomic_read(&buf->record_disabled))) return -EAGAIN; ctx->buf = buf; @@ -253,11 +252,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long offset_end = ctx->buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; + struct commit_counters_hot *cc_hot = shmp_index(handle, + buf->commit_hot, endidx); + + if (caa_unlikely(!cc_hot)) + return; /* * Must count record before incrementing the commit count. */ - subbuffer_count_record(config, &buf->backend, endidx, handle); + subbuffer_count_record(config, ctx, &buf->backend, endidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -265,7 +269,7 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi */ cmm_smp_wmb(); - v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); + v_add(config, ctx->slot_size, &cc_hot->cc); /* * commit count read can race with concurrent OOO commit count updates. @@ -285,17 +289,16 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi * count reaches back the reserve offset for a specific sub-buffer, * which is completely independent of the order. */ - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); + commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, - commit_count, endidx, handle); + commit_count, endidx, handle, ctx->tsc); /* * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. */ - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - ctx->buf_offset, commit_count, - ctx->slot_size, handle); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offset_end, commit_count, handle, cc_hot); } /**