/*
- * lttng-ring-buffer-client.h
- *
- * LTTng lib ring buffer client template.
+ * SPDX-License-Identifier: LGPL-2.1-only
*
* Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * LTTng lib ring buffer client template.
*/
+#include <stddef.h>
#include <stdint.h>
-#include <lttng/ust-events.h>
-#include "lttng/bitfield.h"
+
+#include <ust-events-internal.h>
+#include "ust-bitfield.h"
+#include "ust-compat.h"
#include "clock.h"
+#include "context-internal.h"
#include "lttng-tracer.h"
#include "../libringbuffer/frontend_types.h"
size_t event_context_len;
};
-static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
+static inline uint64_t lib_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return trace_clock_read64();
}
static inline
-size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
size_t ctx_len)
{
size_t orig_offset = offset;
}
static inline
-void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
+void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len,
enum app_ctx_mode mode)
{
int i;
}
for (i = 0; i < ctx->nr_fields; i++) {
if (mode == APP_CTX_ENABLED) {
- offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ offset += ctx->fields[i]->get_size(ctx->fields[i], offset);
} else {
- if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ if (lttng_context_is_app(ctx->fields[i]->event_field->name)) {
/*
* Before UST 2.8, we cannot use the
* application context, because we
* concurrently with application context
* register/unregister.
*/
- offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
+ offset += lttng_ust_dummy_get_size(ctx->fields[i], offset);
} else {
- offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ offset += ctx->fields[i]->get_size(ctx->fields[i], offset);
}
}
}
static inline
void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
struct lttng_channel *chan,
- struct lttng_ctx *ctx,
+ struct lttng_ust_ctx *ctx,
enum app_ctx_mode mode)
{
int i;
lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
for (i = 0; i < ctx->nr_fields; i++) {
if (mode == APP_CTX_ENABLED) {
- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ ctx->fields[i]->record(ctx->fields[i], bufctx, chan);
} else {
- if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ if (lttng_context_is_app(ctx->fields[i]->event_field->name)) {
/*
* Before UST 2.8, we cannot use the
* application context, because we
* concurrently with application context
* register/unregister.
*/
- lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
+ lttng_ust_dummy_record(ctx->fields[i], bufctx, chan);
} else {
- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ ctx->fields[i]->record(ctx->fields[i], bufctx, chan);
}
}
}
*/
static __inline__
size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
size_t *pre_header_padding,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
struct lttng_client_ctx *client_ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
- struct lttng_event *event = ctx->priv;
- struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
size_t orig_offset = offset;
size_t padding;
padding = 0;
WARN_ON_ONCE(1);
}
- if (lttng_ctx) {
- /* 2.8+ probe ABI. */
- offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
- client_ctx->packet_context_len);
- offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
- client_ctx->event_context_len);
- } else {
- /* Pre 2.8 probe ABI. */
- offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
- client_ctx->packet_context_len);
- offset += ctx_get_aligned_size(offset, event->ctx,
- client_ctx->event_context_len);
- }
+ offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
+ client_ctx->event_context_len);
*pre_header_padding = padding;
return offset - orig_offset;
}
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- struct lttng_event *event = ctx->priv;
- struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
if (caa_unlikely(ctx->rflags))
goto slow_path;
WARN_ON_ONCE(1);
}
- if (lttng_ctx) {
- /* 2.8+ probe ABI. */
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
- } else {
- /* Pre 2.8 probe ABI. */
- ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
- ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
- }
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- struct lttng_event *event = ctx->priv;
- struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
switch (lttng_chan->header_type) {
case 1: /* compact */
default:
WARN_ON_ONCE(1);
}
- if (lttng_ctx) {
- /* 2.8+ probe ABI. */
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
- } else {
- /* Pre 2.8 probe ABI. */
- ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
- ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
- }
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
static const struct lttng_ust_lib_ring_buffer_config client_config;
-static uint64_t client_ring_buffer_clock_read(struct channel *chan)
+static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return lib_ring_buffer_clock_read(chan);
}
static
size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct channel *chan, size_t offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
size_t *pre_header_padding,
struct lttng_ust_lib_ring_buffer_ctx *ctx,
void *client_ctx)
unsigned int subbuf_idx,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
unsigned int subbuf_idx, unsigned long data_size,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
struct packet_header *header =
(struct packet_header *)
lib_ring_buffer_offset_address(&buf->backend,
header->ctx.content_size =
(uint64_t) data_size * CHAR_BIT; /* in bits */
header->ctx.packet_size =
- (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
struct lttng_ust_shm_handle *handle,
uint64_t *stream_id)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle,
+ buf->backend.chan);
struct lttng_channel *lttng_chan = channel_get_private(chan);
*stream_id = lttng_chan->id;
struct lttng_ust_shm_handle *handle,
uint64_t *ts)
{
- struct channel *chan;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
chan = shmp(handle, handle->chan);
*ts = client_ring_buffer_clock_read(chan);
struct packet_header *header;
header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
*seq = header->ctx.packet_seq_num;
return 0;
}
.cb_ptr = &client_cb.parent,
};
-const struct lttng_ust_client_lib_ring_buffer_client_cb *LTTNG_CLIENT_CALLBACKS = &client_cb;
-
static
struct lttng_channel *_channel_create(const char *name,
void *buf_addr,
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- struct lttng_event *event = ctx->priv;
- struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_ust_stack_ctx *lttng_ctx = ctx->priv;
struct lttng_client_ctx client_ctx;
int ret, cpu;
/* Compute internal size of context structures. */
-
- if (lttng_ctx) {
- /* 2.8+ probe ABI. */
- ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
- APP_CTX_ENABLED);
- ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
- APP_CTX_ENABLED);
- } else {
- /* Pre 2.8 probe ABI. */
- ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len,
- APP_CTX_DISABLED);
- ctx_get_struct_size(event->ctx, &client_ctx.event_context_len,
- APP_CTX_DISABLED);
- }
+ ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
+ APP_CTX_ENABLED);
+ ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
+ APP_CTX_ENABLED);
cpu = lib_ring_buffer_get_cpu(&client_config);
if (cpu < 0)
ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
if (caa_unlikely(ret))
goto put;
- if (caa_likely(ctx->ctx_len
- >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
- if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->backend_pages)) {
- ret = -EPERM;
- goto put;
- }
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages)) {
+ ret = -EPERM;
+ goto put;
}
lttng_write_event_header(&client_config, ctx, event_id);
return 0;
#if 0
static
-wait_queue_head_t *lttng_get_reader_wait_queue(struct channel *chan)
+wait_queue_head_t *lttng_get_reader_wait_queue(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return &chan->read_wait;
}
static
-wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
+wait_queue_head_t *lttng_get_hp_wait_queue(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return &chan->hp_wait;
}
#endif //0
static
-int lttng_is_finalized(struct channel *chan)
+int lttng_is_finalized(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_finalized(chan);
}
static
-int lttng_is_disabled(struct channel *chan)
+int lttng_is_disabled(struct lttng_ust_lib_ring_buffer_channel *chan)
{
return lib_ring_buffer_channel_is_disabled(chan);
}
static
-int lttng_flush_buffer(struct channel *chan, struct lttng_ust_shm_handle *handle)
+int lttng_flush_buffer(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
{
struct lttng_ust_lib_ring_buffer *buf;
int cpu;
static struct lttng_transport lttng_relay_transport = {
.name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
.ops = {
+ .struct_size = sizeof(struct lttng_ust_channel_ops),
.channel_create = _channel_create,
.channel_destroy = lttng_channel_destroy,
- .u.has_strcpy = 1,
.event_reserve = lttng_event_reserve,
.event_commit = lttng_event_commit,
.event_write = lttng_event_write,
.packet_avail_size = NULL, /* Would be racy anyway */
- //.get_reader_wait_queue = lttng_get_reader_wait_queue,
- //.get_hp_wait_queue = lttng_get_hp_wait_queue,
.is_finalized = lttng_is_finalized,
.is_disabled = lttng_is_disabled,
.flush_buffer = lttng_flush_buffer,