fix: mm/vmscan: simplify trace_reclaim_flags and trace_shrink_flags (v5.2)
[lttng-modules.git] / lib / ringbuffer / frontend_api.h
index 31ccbfad020c5a1afa1b8570f1e6be0dd28438fa..a6f2c6f4f16742a4b35dafaeafcc25eaa13e1a61 100644 (file)
@@ -29,7 +29,8 @@
  * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
  */
 
-#include "../../wrapper/ringbuffer/frontend.h"
+#include <wrapper/ringbuffer/frontend.h>
+#include <wrapper/percpu-defs.h>
 #include <linux/errno.h>
 #include <linux/prefetch.h>
 
@@ -73,7 +74,7 @@ static inline
 void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
 {
        barrier();
-       __get_cpu_var(lib_ring_buffer_nesting)--;
+       (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
        rcu_read_unlock_sched_notrace();
 }
 
@@ -161,14 +162,14 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (atomic_read(&chan->record_disabled))
+       if (unlikely(atomic_read(&chan->record_disabled)))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
        else
                buf = chan->backend.buf;
-       if (atomic_read(&buf->record_disabled))
+       if (unlikely(atomic_read(&buf->record_disabled)))
                return -EAGAIN;
        ctx->buf = buf;
 
@@ -249,6 +250,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
 
        /*
         * Must count record before incrementing the commit count.
@@ -269,7 +271,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        } else
                smp_wmb();
 
-       v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
+       v_add(config, ctx->slot_size, &cc_hot->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -289,7 +291,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+       commit_count = v_read(config, &cc_hot->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
                                      commit_count, endidx, ctx->tsc);
@@ -297,9 +299,8 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
-       lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            ctx->buf_offset, commit_count,
-                                        ctx->slot_size);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offset_end, commit_count, cc_hot);
 }
 
 /**
This page took 0.025198 seconds and 4 git commands to generate.