-#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-
/*
- * libringbuffer/frontend_internal.h
- *
- * Ring Buffer Library Synchronization Header (internal helpers).
+ * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Ring Buffer Library Synchronization Header (internal helpers).
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
- *
- * Dual LGPL v2.1/GPL v2 license.
*/
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+
#include <urcu/compiler.h>
#include <urcu/tls-compat.h>
#include <signal.h>
+#include <stdint.h>
#include <pthread.h>
-#include <lttng/ringbuffer-config.h>
+#include <lttng/ringbuffer-context.h>
+#include "ringbuffer-config.h"
#include "backend_types.h"
+#include "backend_internal.h"
#include "frontend_types.h"
#include "shm.h"
/* buf_trunc mask selects only the buffer number. */
static inline
-unsigned long buf_trunc(unsigned long offset, struct channel *chan)
+unsigned long buf_trunc(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.buf_size - 1);
/* Select the buffer number value (counter). */
static inline
-unsigned long buf_trunc_val(unsigned long offset, struct channel *chan)
+unsigned long buf_trunc_val(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
}
/* buf_offset mask selects only the offset within the current buffer. */
static inline
-unsigned long buf_offset(unsigned long offset, struct channel *chan)
+unsigned long buf_offset(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return offset & (chan->backend.buf_size - 1);
}
/* subbuf_offset mask selects the offset within the current subbuffer. */
static inline
-unsigned long subbuf_offset(unsigned long offset, struct channel *chan)
+unsigned long subbuf_offset(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return offset & (chan->backend.subbuf_size - 1);
}
/* subbuf_trunc mask selects the subbuffer number. */
static inline
-unsigned long subbuf_trunc(unsigned long offset, struct channel *chan)
+unsigned long subbuf_trunc(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return offset & ~(chan->backend.subbuf_size - 1);
}
/* subbuf_align aligns the offset to the next subbuffer. */
static inline
-unsigned long subbuf_align(unsigned long offset, struct channel *chan)
+unsigned long subbuf_align(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return (offset + chan->backend.subbuf_size)
& ~(chan->backend.subbuf_size - 1);
/* subbuf_index returns the index of the current subbuffer within the buffer. */
static inline
-unsigned long subbuf_index(unsigned long offset, struct channel *chan)
+unsigned long subbuf_index(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
}
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+ __attribute__((visibility("hidden")));
extern
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
enum switch_mode mode,
- struct lttng_ust_shm_handle *handle);
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle,
- uint64_t tsc);
+ uint64_t tsc)
+ __attribute__((visibility("hidden")));
/* Buffer write helpers */
static inline
void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
unsigned long offset)
{
unsigned long consumed_old, consumed_new;
consumed_new) != consumed_old));
}
+/*
+ * Move consumed position to the beginning of subbuffer in which the
+ * write offset is. Should only be used on ring buffers that are not
+ * actively being written into, because clear_reader does not take into
+ * account the commit counters when moving the consumed position, which
+ * can make concurrent trace producers or consumers observe consumed
+ * position further than the write offset, which breaks ring buffer
+ * algorithm guarantees.
+ */
+static inline
+void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long offset, consumed_old, consumed_new;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+
+ do {
+ offset = v_read(config, &buf->offset);
+ consumed_old = uatomic_read(&buf->consumed);
+ CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
+ - subbuf_trunc(consumed_old, chan))
+ < 0);
+ consumed_new = subbuf_trunc(offset, chan);
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ consumed_new) != consumed_old));
+}
+
static inline
int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan)
+ struct lttng_ust_lib_ring_buffer_channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
+ struct commit_counters_hot *cc_hot;
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
+ cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ if (caa_unlikely(!cc_hot))
+ return 0;
+ commit_count = v_read(config, &cc_hot->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
static inline
void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
static inline
void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
- struct channel *chan,
- unsigned long idx,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
unsigned long buf_offset,
unsigned long commit_count,
- struct lttng_ust_shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ struct commit_counters_hot *cc_hot)
{
unsigned long commit_seq_old;
if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
- while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq,
- commit_seq_old, commit_count);
+ commit_seq_old = v_read(config, &cc_hot->seq);
+ if (caa_likely((long) (commit_seq_old - commit_count) < 0))
+ v_set(config, &cc_hot->seq, commit_count);
}
extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj);
+ struct shm_object *shmobj)
+ __attribute__((visibility("hidden")));
+
extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle);
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
/* Keep track of trap nesting inside ring buffer code */
-extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
+extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting)
+ __attribute__((visibility("hidden")));
#endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */