return trace_clock_read64();
}
+static inline
+size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+{
+ int i;
+ size_t orig_offset = offset;
+
+ if (likely(!ctx))
+ return 0;
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(offset);
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_record(struct lib_ring_buffer_ctx *bufctx,
+ struct ltt_channel *chan,
+ struct lttng_ctx *ctx)
+{
+ int i;
+
+ if (likely(!ctx))
+ return;
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+}
+
/*
* record_header_size - Calculate the header size and padding necessary.
* @config: ring buffer instance configuration
struct lib_ring_buffer_ctx *ctx)
{
struct ltt_channel *ltt_chan = channel_get_private(chan);
+ struct ltt_event *event = ctx->priv;
size_t orig_offset = offset;
size_t padding;
offset += sizeof(uint32_t); /* id */
offset += lib_ring_buffer_align(offset, ltt_alignof(uint64_t));
offset += sizeof(uint64_t); /* timestamp */
-
}
break;
default:
WARN_ON_ONCE(1);
}
+ offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_size(offset, ltt_chan->ctx);
*pre_header_padding = padding;
return offset - orig_offset;
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
if (unlikely(ctx->rflags))
goto slow_path;
default:
WARN_ON_ONCE(1);
}
+
+ ctx_record(ctx, ltt_chan, event->ctx);
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
+
return;
slow_path:
uint32_t event_id)
{
struct ltt_channel *ltt_chan = channel_get_private(ctx->chan);
+ struct ltt_event *event = ctx->priv;
switch (ltt_chan->header_type) {
case 1: /* compact */
default:
WARN_ON_ONCE(1);
}
+ ctx_record(ctx, ltt_chan, event->ctx);
+ ctx_record(ctx, ltt_chan, ltt_chan->ctx);
}
static const struct lib_ring_buffer_config client_config;
void ltt_buffer_read_close(struct lib_ring_buffer *buf)
{
lib_ring_buffer_release_read(buf);
-
}
static