-#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
-#define _LTTNG_RING_BUFFER_FRONTEND_API_H
-
/*
- * libringbuffer/frontend_api.h
+ * SPDX-License-Identifier: LGPL-2.1-only
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Ring Buffer Library Synchronization Header (buffer write API).
- *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
* See ring_buffer_frontend.c for more information on wait-free
* algorithms.
* See frontend.h for channel allocation and read-side API.
*/
-#include "frontend.h"
-#include <urcu-bp.h>
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
+
+#include <stddef.h>
+
#include <urcu/compiler.h>
+#include "frontend.h"
+
/**
- * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
+ * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection.
*
- * Grabs RCU read-side lock and keeps a ring buffer nesting count as
- * supplementary safety net to ensure tracer client code will never
- * trigger an endless recursion. Returns the processor ID on success,
- * -EPERM on failure (nesting count too high).
+ * The rint buffer buffer nesting count is a safety net to ensure tracer
+ * client code will never trigger an endless recursion.
+ * Returns 0 on success, -EPERM on failure (nesting count too high).
*
* asm volatile and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* section.
*/
static inline
-int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+int lib_ring_buffer_nesting_inc(const struct lttng_ust_lib_ring_buffer_config *config)
{
- int cpu, nesting;
+ int nesting;
- rcu_read_lock();
- cpu = lttng_ust_get_cpu();
nesting = ++URCU_TLS(lib_ring_buffer_nesting);
cmm_barrier();
-
if (caa_unlikely(nesting > 4)) {
WARN_ON_ONCE(1);
URCU_TLS(lib_ring_buffer_nesting)--;
- rcu_read_unlock();
return -EPERM;
- } else
- return cpu;
+ }
+ return 0;
}
-/**
- * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
- */
static inline
-void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config)
+void lib_ring_buffer_nesting_dec(const struct lttng_ust_lib_ring_buffer_config *config)
{
cmm_barrier();
URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
- rcu_read_unlock();
}
/*
static inline
int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct channel *chan = ctx->chan;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
- before_hdr_pad, ctx);
+ before_hdr_pad, ctx, client_ctx);
ctx->slot_size +=
- lib_ring_buffer_align(*o_begin + ctx->slot_size,
+ lttng_ust_lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
> chan->backend.subbuf_size))
* boundary. It's safe to write.
*/
*o_end = *o_begin + ctx->slot_size;
+
+ if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
+ /*
+ * The offset_end will fall at the very beginning of the next
+ * subbuffer.
+ */
+ return 1;
+
return 0;
}
static inline
int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
- struct channel *chan = ctx->chan;
- struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
+ struct lttng_ust_shm_handle *handle = ctx->chan->handle;
struct lttng_ust_lib_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
- if (uatomic_read(&chan->record_disabled))
+ if (caa_unlikely(uatomic_read(&chan->record_disabled)))
return -EAGAIN;
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
- else
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ ctx->reserve_cpu = lttng_ust_get_cpu();
+ buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
+ } else {
buf = shmp(handle, chan->backend.buf[0].shmp);
- if (uatomic_read(&buf->record_disabled))
+ }
+ if (caa_unlikely(!buf))
+ return -EIO;
+ if (caa_unlikely(uatomic_read(&buf->record_disabled)))
return -EAGAIN;
ctx->buf = buf;
/*
* Perform retryable operations.
*/
- if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return lib_ring_buffer_reserve_slow(ctx);
+ return lib_ring_buffer_reserve_slow(ctx, client_ctx);
}
/**
void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
const struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- struct channel *chan = ctx->chan;
- struct lttng_ust_shm_handle *handle = ctx->handle;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
+ struct lttng_ust_shm_handle *handle = ctx->chan->handle;
struct lttng_ust_lib_ring_buffer *buf = ctx->buf;
unsigned long offset_end = ctx->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle,
+ buf->commit_hot, endidx);
+
+ if (caa_unlikely(!cc_hot))
+ return;
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, &buf->backend, endidx, handle);
+ subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
/*
* Order all writes to buffer before the commit count update that will
*/
cmm_smp_wmb();
- v_add(config, ctx->slot_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ v_add(config, ctx->slot_size, &cc_hot->cc);
/*
* commit count read can race with concurrent OOO commit count updates.
* count reaches back the reserve offset for a specific sub-buffer,
* which is completely independent of the order.
*/
- commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle);
+ commit_count, endidx, handle, ctx->tsc);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
*/
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- ctx->buf_offset, commit_count,
- ctx->slot_size, handle);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offset_end, commit_count, handle, cc_hot);
}
/**
static inline
void channel_record_disable(const struct lttng_ust_lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
uatomic_inc(&chan->record_disabled);
}
static inline
void channel_record_enable(const struct lttng_ust_lib_ring_buffer_config *config,
- struct channel *chan)
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
uatomic_dec(&chan->record_disabled);
}