#include <linux/module.h>
#include <linux/types.h>
-#include "lib/bitfield.h"
-#include "wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
-#include "wrapper/trace-clock.h"
-#include "lttng-events.h"
-#include "lttng-tracer.h"
-#include "wrapper/ringbuffer/frontend_types.h"
+#include <lib/bitfield.h>
+#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
+#include <wrapper/trace-clock.h>
+#include <lttng-events.h>
+#include <lttng-tracer.h>
+#include <wrapper/ringbuffer/frontend_types.h>
#define LTTNG_COMPACT_EVENT_BITS 5
#define LTTNG_COMPACT_TSC_BITS 27
+static struct lttng_transport lttng_relay_transport;
+
/*
* Keep the natural field alignment for _each field_ within this structure if
* you ever add/remove a field from this header. Packed attribute is not used
*/
uint8_t uuid[16];
uint32_t stream_id;
+ uint64_t stream_instance_id;
struct {
/* Stream packet context */
uint64_t timestamp_end; /* Cycle count at subbuffer end */
uint64_t content_size; /* Size of data in subbuffer */
uint64_t packet_size; /* Subbuffer size (include padding) */
+ uint64_t packet_seq_num; /* Packet sequence number */
unsigned long events_discarded; /*
* Events lost in this subbuffer since
* the beginning of the trace.
if (likely(!ctx))
return 0;
+ offset += lib_ring_buffer_align(offset, ctx->largest_align);
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
return offset - orig_offset;
if (likely(!ctx))
return;
+ lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
for (i = 0; i < ctx->nr_fields; i++)
ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
}
* contains.
*/
static __inline__
-unsigned char record_header_size(const struct lib_ring_buffer_config *config,
+size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
struct lib_ring_buffer_ctx *ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
- struct lttng_event *event = ctx->priv;
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_event *event = lttng_probe_ctx->event;
size_t orig_offset = offset;
size_t padding;
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, event->ctx);
offset += ctx_get_size(offset, lttng_chan->ctx);
+ offset += ctx_get_size(offset, event->ctx);
*pre_header_padding = padding;
return offset - orig_offset;
}
-#include "wrapper/ringbuffer/api.h"
+#include <wrapper/ringbuffer/api.h>
static
void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- struct lttng_event *event = ctx->priv;
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_event *event = lttng_probe_ctx->event;
if (unlikely(ctx->rflags))
goto slow_path;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
- struct lttng_event *event = ctx->priv;
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_event *event = lttng_probe_ctx->event;
switch (lttng_chan->header_type) {
case 1: /* compact */
header->magic = CTF_MAGIC_NUMBER;
memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
header->stream_id = lttng_chan->id;
+ header->stream_instance_id = buf->backend.cpu;
header->ctx.timestamp_begin = tsc;
header->ctx.timestamp_end = 0;
header->ctx.content_size = ~0ULL; /* for debugging */
header->ctx.packet_size = ~0ULL;
+ header->ctx.packet_seq_num = chan->backend.num_subbuf * \
+ buf->backend.buf_cnt[subbuf_idx].seq_cnt + \
+ subbuf_idx;
header->ctx.events_discarded = 0;
header->ctx.cpu_id = buf->backend.cpu;
}
const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf)
{
- struct lib_ring_buffer_backend *bufb;
- unsigned long sb_bindex;
- struct packet_header *header;
-
- bufb = &buf->backend;
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- header = (struct packet_header *)
- lib_ring_buffer_offset_address(bufb,
- sb_bindex * bufb->chan->backend.subbuf_size);
-
- return header;
+ return lib_ring_buffer_read_offset_address(&buf->backend, 0);
}
static int client_timestamp_begin(const struct lib_ring_buffer_config *config,
return 0;
}
+static int client_sequence_number(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf,
+ uint64_t *seq)
+{
+ struct packet_header *header = client_packet_header(config, buf);
+
+ *seq = header->ctx.packet_seq_num;
+
+ return 0;
+}
+
+static
+int client_instance_id(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf,
+ uint64_t *id)
+{
+ struct packet_header *header = client_packet_header(config, buf);
+ *id = header->stream_instance_id;
+
+ return 0;
+}
+
static const struct lib_ring_buffer_config client_config = {
.cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
.cb.record_header_size = client_record_header_size,
.wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
};
+static
+void release_priv_ops(void *priv_ops)
+{
+ module_put(THIS_MODULE);
+}
+
+static
+void lttng_channel_destroy(struct channel *chan)
+{
+ channel_destroy(chan);
+}
+
static
struct channel *_channel_create(const char *name,
struct lttng_channel *lttng_chan, void *buf_addr,
unsigned int switch_timer_interval,
unsigned int read_timer_interval)
{
- lttng_chan->ops->timestamp_begin = client_timestamp_begin;
- lttng_chan->ops->timestamp_end = client_timestamp_end;
- lttng_chan->ops->events_discarded = client_events_discarded;
- lttng_chan->ops->content_size = client_content_size;
- lttng_chan->ops->packet_size = client_packet_size;
- lttng_chan->ops->stream_id = client_stream_id;
- lttng_chan->ops->current_timestamp = client_current_timestamp;
+ struct channel *chan;
- return channel_create(&client_config, name, lttng_chan, buf_addr,
+ chan = channel_create(&client_config, name, lttng_chan, buf_addr,
subbuf_size, num_subbuf, switch_timer_interval,
read_timer_interval);
-}
+ if (chan) {
+ /*
+ * Ensure this module is not unloaded before we finish
+ * using lttng_relay_transport.ops.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ printk(KERN_WARNING "LTT : Can't lock transport module.\n");
+ goto error;
+ }
+ chan->backend.priv_ops = <tng_relay_transport.ops;
+ chan->backend.release_priv_ops = release_priv_ops;
+ }
+ return chan;
-static
-void lttng_channel_destroy(struct channel *chan)
-{
- channel_destroy(chan);
+error:
+ lttng_channel_destroy(chan);
+ return NULL;
}
static
int ret, cpu;
cpu = lib_ring_buffer_get_cpu(&client_config);
- if (cpu < 0)
+ if (unlikely(cpu < 0))
return -EPERM;
ctx->cpu = cpu;
}
ret = lib_ring_buffer_reserve(&client_config, ctx);
- if (ret)
+ if (unlikely(ret))
goto put;
+ lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages);
lttng_write_event_header(&client_config, ctx, event_id);
return 0;
put:
lib_ring_buffer_memset(&client_config, ctx, c, len);
}
+static
+void lttng_event_strcpy(struct lib_ring_buffer_ctx *ctx, const char *src,
+ size_t len)
+{
+ lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+void lttng_event_strcpy_from_user(struct lib_ring_buffer_ctx *ctx,
+ const char __user *src, size_t len)
+{
+ lib_ring_buffer_strcpy_from_user_inatomic(&client_config, ctx, src,
+ len, '#');
+}
+
static
wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
{
.event_write = lttng_event_write,
.event_write_from_user = lttng_event_write_from_user,
.event_memset = lttng_event_memset,
+ .event_strcpy = lttng_event_strcpy,
+ .event_strcpy_from_user = lttng_event_strcpy_from_user,
.packet_avail_size = NULL, /* Would be racy anyway */
.get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
.get_hp_wait_queue = lttng_get_hp_wait_queue,
.is_finalized = lttng_is_finalized,
.is_disabled = lttng_is_disabled,
+ .timestamp_begin = client_timestamp_begin,
+ .timestamp_end = client_timestamp_end,
+ .events_discarded = client_events_discarded,
+ .content_size = client_content_size,
+ .packet_size = client_packet_size,
+ .stream_id = client_stream_id,
+ .current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
},
};