Fix: wrapper random documentation
[lttng-modules.git] / lib / ringbuffer / frontend_api.h
index 8a58ace456ad29ab3235484ec52a01e15bc6e6d3..3c46a1774814d305c6cc21449b7857b9e7bbd599 100644 (file)
@@ -1,24 +1,22 @@
-#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
-#define _LINUX_RING_BUFFER_FRONTEND_API_H
-
-/*
- * linux/ringbuffer/frontend_api.h
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
  *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * lib/ringbuffer/frontend_api.h
  *
  * Ring Buffer Library Synchronization Header (buffer write API).
  *
- * Author:
- *     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * See ring_buffer_frontend.c for more information on wait-free algorithms.
  * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
- *
- * Dual LGPL v2.1/GPL v2 license.
  */
 
-#include "../../wrapper/ringbuffer/frontend.h"
+#ifndef _LIB_RING_BUFFER_FRONTEND_API_H
+#define _LIB_RING_BUFFER_FRONTEND_API_H
+
+#include <wrapper/ringbuffer/frontend.h>
+#include <wrapper/percpu-defs.h>
 #include <linux/errno.h>
+#include <linux/prefetch.h>
 
 /**
  * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
@@ -44,7 +42,7 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
        nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
        barrier();
 
-       if (unlikely(nesting > 4)) {
+       if (unlikely(nesting > RING_BUFFER_MAX_NESTING)) {
                WARN_ON_ONCE(1);
                per_cpu(lib_ring_buffer_nesting, cpu)--;
                rcu_read_unlock_sched_notrace();
@@ -60,7 +58,7 @@ static inline
 void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
 {
        barrier();
-       __get_cpu_var(lib_ring_buffer_nesting)--;
+       (*lttng_this_cpu_ptr(&lib_ring_buffer_nesting))--;
        rcu_read_unlock_sched_notrace();
 }
 
@@ -73,6 +71,7 @@ void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
 static inline
 int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
                                struct lib_ring_buffer_ctx *ctx,
+                               void *client_ctx,
                                unsigned long *o_begin, unsigned long *o_end,
                                unsigned long *o_old, size_t *before_hdr_pad)
 {
@@ -93,14 +92,13 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
        prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
        if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC;
+               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
        if (unlikely(subbuf_offset(*o_begin, chan) == 0))
                return 1;
 
        ctx->slot_size = record_header_size(config, chan, *o_begin,
-                                           ctx->data_size, before_hdr_pad,
-                                           ctx->rflags, ctx);
+                                           before_hdr_pad, ctx, client_ctx);
        ctx->slot_size +=
                lib_ring_buffer_align(*o_begin + ctx->slot_size,
                                      ctx->largest_align) + ctx->data_size;
@@ -142,28 +140,29 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
 
 static inline
 int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer_ctx *ctx)
+                           struct lib_ring_buffer_ctx *ctx,
+                           void *client_ctx)
 {
        struct channel *chan = ctx->chan;
        struct lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (atomic_read(&chan->record_disabled))
+       if (unlikely(atomic_read(&chan->record_disabled)))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
        else
                buf = chan->backend.buf;
-       if (atomic_read(&buf->record_disabled))
+       if (unlikely(atomic_read(&buf->record_disabled)))
                return -EAGAIN;
        ctx->buf = buf;
 
        /*
         * Perform retryable operations.
         */
-       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+       if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
                                                 &o_end, &o_old, &before_hdr_pad)))
                goto slow_path;
 
@@ -194,7 +193,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
        ctx->buf_offset = o_begin + before_hdr_pad;
        return 0;
 slow_path:
-       return lib_ring_buffer_reserve_slow(ctx);
+       return lib_ring_buffer_reserve_slow(ctx, client_ctx);
 }
 
 /**
@@ -237,6 +236,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx];
 
        /*
         * Must count record before incrementing the commit count.
@@ -257,7 +257,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        } else
                smp_wmb();
 
-       v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
+       v_add(config, ctx->slot_size, &cc_hot->cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -277,17 +277,16 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+       commit_count = v_read(config, &cc_hot->cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx);
+                                     commit_count, endidx, ctx->tsc);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
-       lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
-                                            ctx->buf_offset, commit_count,
-                                        ctx->slot_size);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offset_end, commit_count, cc_hot);
 }
 
 /**
@@ -356,4 +355,4 @@ void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
        atomic_dec(&buf->record_disabled);
 }
 
-#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
+#endif /* _LIB_RING_BUFFER_FRONTEND_API_H */
This page took 0.025249 seconds and 4 git commands to generate.