X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=5ddb8ac460951f0bae59a3f098cf6bca510a3799;hb=f52a5702dbeb8125ca373ba363b29cbff3042cc1;hp=56dbef2aa4326c8a4067ecc96ada66b887358afd;hpb=80249235cb6560c6bb915f1cd486aa5017f87456;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index 56dbef2a..5ddb8ac4 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -37,10 +37,10 @@ /** * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. * - * Grabs RCU read-side lock and keeps a ring buffer nesting count as - * supplementary safety net to ensure tracer client code will never - * trigger an endless recursion. Returns the processor ID on success, - * -EPERM on failure (nesting count too high). + * Keeps a ring buffer nesting count as supplementary safety net to + * ensure tracer client code will never trigger an endless recursion. + * Returns the processor ID on success, -EPERM on failure (nesting count + * too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -53,7 +53,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi { int cpu, nesting; - rcu_read_lock(); cpu = lttng_ust_get_cpu(); nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); @@ -61,7 +60,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); URCU_TLS(lib_ring_buffer_nesting)--; - rcu_read_unlock(); return -EPERM; } else return cpu; @@ -75,7 +73,6 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf { cmm_barrier(); URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ - rcu_read_unlock(); } /* @@ -163,14 +160,14 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; - if (uatomic_read(&chan->record_disabled)) + if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); - if (uatomic_read(&buf->record_disabled)) + if (caa_unlikely(uatomic_read(&buf->record_disabled))) return -EAGAIN; ctx->buf = buf;