* va_list * to ltt_vtrace.
*/
+#define _GNU_SOURCE
+#include <unistd.h>
+#include <sys/syscall.h>
#include <stdarg.h>
-//ust// #include <linux/ctype.h>
-//ust// #include <linux/string.h>
-//ust// #include <linux/module.h>
-//ust// #include <linux/ltt-tracer.h>
#include <string.h>
#include <stdint.h>
#include <stdio.h>
#include <urcu-bp.h>
#include <urcu/rculist.h>
-#include "relay.h"
-#include <ust/tracer.h>
+#include "buffers.h"
+#include "tracer.h"
//#include "list.h"
#include "usterr.h"
LTT_TYPE_NONE,
};
+static int ust_get_cpu(void)
+{
+ return sched_getcpu();
+}
+
#define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
/*
return fmt;
}
-static inline size_t serialize_trace_data(struct rchan_buf *buf,
+static inline size_t serialize_trace_data(struct ust_buffer *buf,
size_t buf_offset,
char trace_size, enum ltt_type trace_type,
char c_size, enum ltt_type c_type,
tmp.v_string.s = "<NULL>";
tmp.v_string.len = strlen(tmp.v_string.s)+1;
if (buf)
- ltt_relay_write(buf, buf_offset, tmp.v_string.s,
+ ust_buffers_write(buf, buf_offset, tmp.v_string.s,
tmp.v_string.len);
buf_offset += tmp.v_string.len;
goto copydone;
switch (trace_size) {
case 1:
if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint8_t[]){ (uint8_t)tmp.v_uint64 },
sizeof(uint8_t));
else
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint8_t[]){ (uint8_t)tmp.v_ulong },
sizeof(uint8_t));
break;
case 2:
if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint16_t[]){ (uint16_t)tmp.v_uint64 },
sizeof(uint16_t));
else
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint16_t[]){ (uint16_t)tmp.v_ulong },
sizeof(uint16_t));
break;
case 4:
if (c_size == 8)
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint32_t[]){ (uint32_t)tmp.v_uint64 },
sizeof(uint32_t));
else
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint32_t[]){ (uint32_t)tmp.v_ulong },
sizeof(uint32_t));
break;
* c_size cannot be other than 8 here because
* trace_size > 4.
*/
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint64_t[]){ (uint64_t)tmp.v_uint64 },
sizeof(uint64_t));
break;
if (buf) {
switch (trace_type) {
case LTT_TYPE_SIGNED_INT:
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(int64_t[]){ (int64_t)tmp.v_ulong },
sizeof(int64_t));
break;
case LTT_TYPE_UNSIGNED_INT:
- ltt_relay_write(buf, buf_offset,
+ ust_buffers_write(buf, buf_offset,
(uint64_t[]){ (uint64_t)tmp.v_ulong },
sizeof(uint64_t));
break;
return result;
}
-notrace size_t ltt_serialize_data(struct rchan_buf *buf, size_t buf_offset,
+notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
struct ltt_serialize_closure *closure,
void *serialize_private, int *largest_align,
const char *fmt, va_list *args)
}
static notrace
-void ltt_write_event_data(struct rchan_buf *buf, size_t buf_offset,
+void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
struct ltt_serialize_closure *closure,
void *serialize_private, int largest_align,
const char *fmt, va_list *args)
uint16_t eID;
size_t data_size, slot_size;
unsigned int chan_index;
- struct ltt_channel_struct *channel;
+ struct ust_channel *channel;
struct ltt_trace_struct *trace, *dest_trace = NULL;
- struct rchan_buf *buf;
+ struct ust_buffer *buf;
void *transport_data;
u64 tsc;
long buf_offset;
struct ltt_serialize_closure closure;
struct ltt_probe_private_data *private_data = call_data;
void *serialize_private = NULL;
-//ust// int cpu;
+ int cpu;
unsigned int rflags;
/*
rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
//ust// cpu = smp_processor_id();
+ cpu = ust_get_cpu();
//ust// __get_cpu_var(ltt_nesting)++;
+ /* FIXME: should nesting be per-cpu? */
ltt_nesting++;
pdata = (struct ltt_active_marker *)probe_data;
if (!channel->active)
continue;
+ /* If a new cpu was plugged since the trace was started, we did
+ * not add it to the trace, and therefore we write the event to
+ * cpu 0.
+ */
+ if(cpu >= channel->n_cpus) {
+ cpu = 0;
+ }
+
/* reserve space : header and data */
ret = ltt_reserve_slot(trace, channel, &transport_data,
data_size, &slot_size, &buf_offset,
&tsc, &rflags,
- largest_align);
+ largest_align, cpu);
if (unlikely(ret < 0))
continue; /* buffer full */
va_copy(args_copy, *args);
/* FIXME : could probably encapsulate transport better. */
//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
- buf = ((struct rchan *)channel->trans_channel_data)->buf;
+ buf = channel->buf[cpu];
/* Out-of-order write : header and data */
buf_offset = ltt_write_event_header(trace,
- channel, buf, buf_offset,
+ buf, buf_offset,
eID, data_size, tsc, rflags);
ltt_write_event_data(buf, buf_offset, &closure,
serialize_private,