#ifndef _UST_BUFFERS_H
#define _UST_BUFFERS_H
-#include <kcompat/kref.h>
#include <assert.h>
+
+#include <ust/core.h>
+
+#include "usterr.h"
#include "channels.h"
#include "tracerconst.h"
#include "tracercore.h"
#include "header-inline.h"
-#include <usterr.h>
/***** FIXME: SHOULD BE REMOVED ***** */
*/
enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH };
-extern int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
- struct ust_channel *ltt_channel, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align, int cpu);
+extern int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+ struct ust_trace *trace, size_t data_size,
+ int largest_align, int cpu,
+ struct ust_buffer **ret_buf,
+ size_t *slot_size, long *buf_offset,
+ u64 *tsc, unsigned int *rflags);
extern void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
enum force_switch_mode mode);
return 0;
}
-static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
- struct ust_channel *chan, void **transport_data,
- size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
- unsigned int *rflags, int largest_align, int cpu)
+static __inline__ int ltt_reserve_slot(struct ust_channel *chan,
+ struct ust_trace *trace, size_t data_size,
+ int largest_align, int cpu,
+ struct ust_buffer **ret_buf,
+ size_t *slot_size, long *buf_offset, u64 *tsc,
+ unsigned int *rflags)
{
- struct ust_buffer *buf = chan->buf[cpu];
+ struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
long o_begin, o_end, o_old;
size_t before_hdr_pad;
/*
* Perform retryable operations.
*/
- /* FIXME: make this rellay per cpu? */
+ /* FIXME: make this really per cpu? */
if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
DBG("Dropping event because nesting is too deep.");
uatomic_inc(&buf->events_lost);
*buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return ltt_reserve_slot_lockless_slow(trace, chan,
- transport_data, data_size, slot_size, buf_offset, tsc,
- rflags, largest_align, cpu);
+ return ltt_reserve_slot_lockless_slow(chan, trace, data_size,
+ largest_align, cpu, ret_buf,
+ slot_size, buf_offset, tsc,
+ rflags);
}
/*
long endidx = SUBBUF_INDEX(offset_end - 1, chan);
long commit_count;
-#ifdef LTT_NO_IPI_BARRIER
smp_wmb();
-#else
- /*
- * Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_mb() by the IPI
- * sent by get_subbuf().
- */
- barrier();
-#endif
+
uatomic_add(&buf->commit_count[endidx].cc, slot_size);
/*
* commit count read can race with concurrent OOO commit count updates.