*/
#define _GNU_SOURCE
+#define _LGPL_SOURCE
#include <unistd.h>
#include <sys/syscall.h>
#include <stdarg.h>
#include <stdint.h>
#include <stdio.h>
-#define _LGPL_SOURCE
#include <urcu-bp.h>
#include <urcu/rculist.h>
#include <ust/clock.h>
#include "buffers.h"
#include "tracer.h"
-#include "usterr.h"
+#include "usterr_signal_safe.h"
#include "ust_snprintf.h"
/*
*/
tracer_stack_pos++;
assert(tracer_stack_pos <= TRACER_STACK_LEN);
- barrier();
+ cmm_barrier();
tracer_stack[*stack_pos_ctx] =
strlen(tmp.v_string.s) + 1;
}
}
-notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
- struct registers *regs, void *call_data,
+notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
const char *fmt, va_list *args)
{
int largest_align, ret;
- struct ltt_active_marker *pdata;
+ struct ltt_active_ust_marker *pdata;
uint16_t eID;
size_t data_size, slot_size;
unsigned int chan_index;
struct ust_channel *channel;
struct ust_trace *trace, *dest_trace = NULL;
struct ust_buffer *buf;
- void *transport_data;
u64 tsc;
long buf_offset;
va_list args_copy;
cpu = ust_get_cpu();
/* Force volatile access. */
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1);
stack_pos_ctx = tracer_stack_pos;
- barrier();
+ cmm_barrier();
- pdata = (struct ltt_active_marker *)probe_data;
+ pdata = (struct ltt_active_ust_marker *)probe_data;
eID = mdata->event_id;
chan_index = mdata->channel_id;
closure.callbacks = pdata->probe->callbacks;
va_end(args_copy);
/* Iterate on each trace */
- list_for_each_entry_rcu(trace, <t_traces.head, list) {
+ cds_list_for_each_entry_rcu(trace, <t_traces.head, list) {
/*
* Expect the filter to filter out events. If we get here,
* we went through tracepoint activation as a first step.
DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
}
- barrier();
+ cmm_barrier();
tracer_stack_pos = stack_pos_ctx;
- STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
+ CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1);
rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
}
-notrace void ltt_trace(const struct marker *mdata, void *probe_data,
- struct registers *regs, void *call_data,
+notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data,
+ void *call_data,
const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- ltt_vtrace(mdata, probe_data, regs, call_data, fmt, &args);
+ ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
va_end(args);
}