*/
#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
-#include <ust/kernelcompat.h>
-#define _LGPL_SOURCE
#include <urcu-bp.h>
#include <urcu/rculist.h>
+#include <ust/core.h>
+#include <ust/clock.h>
#include "buffers.h"
#include "tracer.h"
-//#include "list.h"
-#include "usterr.h"
+#include "usterr_signal_safe.h"
+#include "ust_snprintf.h"
+
+/*
+ * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here.
+ * It is just an approximation for the tracer stack.
+ */
+#define PAGE_SIZE_STATIC 4096
enum ltt_type {
LTT_TYPE_SIGNED_INT,
LTT_TYPE_NONE,
};
-static int ust_get_cpu(void)
-{
- return sched_getcpu();
-}
+/*
+ * Special stack for the tracer. Keeps serialization offsets for each field.
+ * Per-thread. Deals with reentrancy from signals by simply ensuring that
+ * interrupting signals put the stack back to its original position.
+ */
+#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long))
+static unsigned long __thread tracer_stack[TRACER_STACK_LEN];
+
+static unsigned int __thread tracer_stack_pos;
#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
size_t buf_offset,
char trace_size, enum ltt_type trace_type,
char c_size, enum ltt_type c_type,
- int *largest_align, va_list *args)
+ unsigned int *stack_pos_ctx,
+ int *largest_align,
+ va_list *args)
{
union {
unsigned long v_ulong;
tmp.v_string.s = va_arg(*args, const char *);
if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
tmp.v_string.s = "<NULL>";
- tmp.v_string.len = strlen(tmp.v_string.s)+1;
+ if (!buf) {
+ /*
+ * Reserve tracer stack entry.
+ */
+ tracer_stack_pos++;
+ assert(tracer_stack_pos <= TRACER_STACK_LEN);
+ cmm_barrier();
+ tracer_stack[*stack_pos_ctx] =
+ strlen(tmp.v_string.s) + 1;
+ }
+ tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++];
if (buf)
- ust_buffers_write(buf, buf_offset, tmp.v_string.s,
- tmp.v_string.len);
+ ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s,
+ tmp.v_string.len);
buf_offset += tmp.v_string.len;
goto copydone;
default:
notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
struct ltt_serialize_closure *closure,
- void *serialize_private, int *largest_align,
+ void *serialize_private,
+ unsigned int stack_pos_ctx,
+ int *largest_align,
const char *fmt, va_list *args)
{
char trace_size = 0, c_size = 0; /*
buf_offset = serialize_trace_data(buf,
buf_offset, trace_size,
trace_type, c_size, c_type,
- largest_align, args);
+ &stack_pos_ctx,
+ largest_align,
+ args);
trace_size = 0;
c_size = 0;
trace_type = LTT_TYPE_NONE;
* Assume that the padding for alignment starts at a sizeof(void *) address.
*/
static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
- void *serialize_private, int *largest_align,
+ void *serialize_private,
+ unsigned int stack_pos_ctx, int *largest_align,
const char *fmt, va_list *args)
{
ltt_serialize_cb cb = closure->callbacks[0];
closure->cb_idx = 0;
return (size_t)cb(NULL, 0, closure, serialize_private,
- largest_align, fmt, args);
+ stack_pos_ctx, largest_align, fmt, args);
}
static notrace
void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
struct ltt_serialize_closure *closure,
- void *serialize_private, int largest_align,
+ void *serialize_private,
+ unsigned int stack_pos_ctx,
+ int largest_align,
const char *fmt, va_list *args)
{
ltt_serialize_cb cb = closure->callbacks[0];
closure->cb_idx = 0;
buf_offset += ltt_align(buf_offset, largest_align);
- cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args);
+ cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
+ fmt, args);
}
-notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
- struct registers *regs, void *call_data,
+notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
const char *fmt, va_list *args)
{
int largest_align, ret;
- struct ltt_active_marker *pdata;
+ struct ltt_active_ust_marker *pdata;
uint16_t eID;
size_t data_size, slot_size;
unsigned int chan_index;
struct ust_channel *channel;
struct ust_trace *trace, *dest_trace = NULL;
struct ust_buffer *buf;
- void *transport_data;
u64 tsc;
long buf_offset;
va_list args_copy;
void *serialize_private = NULL;
int cpu;
unsigned int rflags;
+ unsigned int stack_pos_ctx;
/*
* This test is useful for quickly exiting static tracing when no trace
return;
rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
-//ust// cpu = smp_processor_id();
cpu = ust_get_cpu();
-//ust// __get_cpu_var(ltt_nesting)++;
- /* FIXME: should nesting be per-cpu? */
- ltt_nesting++;
- pdata = (struct ltt_active_marker *)probe_data;
+ /* Force volatile access. */
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
+ stack_pos_ctx = tracer_stack_pos;
+ cmm_barrier();
+
+ pdata = (struct ltt_active_ust_marker *)probe_data;
eID = mdata->event_id;
chan_index = mdata->channel_id;
closure.callbacks = pdata->probe->callbacks;
*/
largest_align = 1; /* must be non-zero for ltt_align */
data_size = ltt_get_data_size(&closure, serialize_private,
- &largest_align, fmt, &args_copy);
+ stack_pos_ctx, &largest_align,
+ fmt, &args_copy);
largest_align = min_t(int, largest_align, sizeof(void *));
va_end(args_copy);
/* Iterate on each trace */
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
/*
* Expect the filter to filter out events. If we get here,
* we went through tracepoint activation as a first step.
}
/* reserve space : header and data */
- ret = ltt_reserve_slot(trace, channel, &transport_data,
- data_size, &slot_size, &buf_offset,
- &tsc, &rflags,
- largest_align, cpu);
+ ret = ltt_reserve_slot(channel, trace, data_size, largest_align,
+ cpu, &buf, &slot_size, &buf_offset,
+ &tsc, &rflags);
if (unlikely(ret < 0))
continue; /* buffer full */
//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
buf = channel->buf[cpu];
/* Out-of-order write : header and data */
- buf_offset = ltt_write_event_header(trace,
- channel, buf, buf_offset,
+ buf_offset = ltt_write_event_header(channel, buf, buf_offset,
eID, data_size, tsc, rflags);
ltt_write_event_data(buf, buf_offset, &closure,
- serialize_private,
- largest_align, fmt, &args_copy);
+ serialize_private,
+ stack_pos_ctx, largest_align,
+ fmt, &args_copy);
va_end(args_copy);
/* Out-of-order commit */
ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
- DBG("just commited event at offset %ld and size %zd", buf_offset, slot_size);
+ DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
}
-//ust// __get_cpu_var(ltt_nesting)--;
- ltt_nesting--;
+
+ cmm_barrier();
+ tracer_stack_pos = stack_pos_ctx;
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
+
rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
}
-notrace void ltt_trace(const struct marker *mdata, void *probe_data,
- struct registers *regs, void *call_data,
+notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- ltt_vtrace(mdata, probe_data, regs, call_data, fmt, &args);
+ ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
va_end(args);
}
outbuf = &false_buf;
bufsize = 1;
}
- result = vsnprintf(outbuf, bufsize, new_fmt, ap);
+ result = ust_safe_vsnprintf(outbuf, bufsize, new_fmt, ap);
return result;
}