Performance: Mark channel and buffer event disabled check unlikely
[lttng-ust.git] / libringbuffer / frontend_api.h
index b5406b0ef9d3678d44f706b23d66117b5659fd9e..5ddb8ac460951f0bae59a3f098cf6bca510a3799 100644 (file)
 /**
  * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
  *
- * Grabs RCU read-side lock and keeps a ring buffer nesting count as
- * supplementary safety net to ensure tracer client code will never
- * trigger an endless recursion. Returns the processor ID on success,
- * -EPERM on failure (nesting count too high).
+ * Keeps a ring buffer nesting count as supplementary safety net to
+ * ensure tracer client code will never trigger an endless recursion.
+ * Returns the processor ID on success, -EPERM on failure (nesting count
+ * too high).
  *
  * asm volatile and "memory" clobber prevent the compiler from moving
  * instructions out of the ring buffer nesting count. This is required to ensure
@@ -53,7 +53,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi
 {
        int cpu, nesting;
 
-       rcu_read_lock();
        cpu = lttng_ust_get_cpu();
        nesting = ++URCU_TLS(lib_ring_buffer_nesting);
        cmm_barrier();
@@ -61,7 +60,6 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi
        if (caa_unlikely(nesting > 4)) {
                WARN_ON_ONCE(1);
                URCU_TLS(lib_ring_buffer_nesting)--;
-               rcu_read_unlock();
                return -EPERM;
        } else
                return cpu;
@@ -75,7 +73,6 @@ void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *conf
 {
        cmm_barrier();
        URCU_TLS(lib_ring_buffer_nesting)--;            /* TLS */
-       rcu_read_unlock();
 }
 
 /*
@@ -126,6 +123,14 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c
         * boundary. It's safe to write.
         */
        *o_end = *o_begin + ctx->slot_size;
+
+       if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
+               /*
+                * The offset_end will fall at the very beginning of the next
+                * subbuffer.
+                */
+               return 1;
+
        return 0;
 }
 
@@ -155,14 +160,14 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (uatomic_read(&chan->record_disabled))
+       if (caa_unlikely(uatomic_read(&chan->record_disabled)))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
                buf = shmp(handle, chan->backend.buf[0].shmp);
-       if (uatomic_read(&buf->record_disabled))
+       if (caa_unlikely(uatomic_read(&buf->record_disabled)))
                return -EAGAIN;
        ctx->buf = buf;
 
@@ -280,14 +285,13 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi
        commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx, handle);
+                                     commit_count, endidx, handle, ctx->tsc);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            ctx->buf_offset, commit_count,
-                                            ctx->slot_size, handle);
+                       offset_end, commit_count, handle);
 }
 
 /**
This page took 0.024998 seconds and 4 git commands to generate.