#include <assert.h>
#include <ust/core.h>
+#include <ust/clock.h>
#include "usterr.h"
#include "channels.h"
int data_ready_fd_write;
/* the reading end of the pipe */
int data_ready_fd_read;
+ /*
+ * List of buffers with an open pipe, used for fork and forced subbuffer
+ * switch.
+ */
+ struct cds_list_head open_buffers_list;
unsigned int finalized;
//ust// struct timer_list switch_timer; /* timer for periodical switch */
struct ust_channel *chan;
- struct kref kref;
+ struct urcu_ref urcu_ref;
void *buf_data;
size_t buf_size;
int shmid;
unsigned int cpu;
/* commit count per subbuffer; must be at end of struct */
- long commit_seq[0] ____cacheline_aligned; /* ATOMIC */
+ long commit_seq[0]; /* ATOMIC */
} ____cacheline_aligned;
/*
consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
/*
- * No memory barrier here, since we are only interested
+ * No memory cmm_barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
* get the data is we are racing. The mb() that ensures correct
* memory order is in get_subbuf.
/*
* Perform retryable operations.
*/
- /* FIXME: make this rellay per cpu? */
- if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
+ /* FIXME: make this really per cpu? */
+ if (unlikely(CMM_LOAD_SHARED(ltt_nesting) > 4)) {
DBG("Dropping event because nesting is too deep.");
uatomic_inc(&buf->events_lost);
return -EPERM;
long endidx = SUBBUF_INDEX(offset_end - 1, chan);
long commit_count;
-#ifdef LTT_NO_IPI_BARRIER
- smp_wmb();
-#else
- /*
- * Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_mb() by the IPI
- * sent by get_subbuf().
- */
- barrier();
-#endif
+ cmm_smp_wmb();
+
uatomic_add(&buf->commit_count[endidx].cc, slot_size);
/*
* commit count read can race with concurrent OOO commit count updates.
ltt_write_commit_counter(chan, buf, endidx, buf_offset, commit_count, data_size);
}
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
- const void *src, size_t len, ssize_t cpy);
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+ size_t len, size_t copied, int terminated);
static __inline__ int ust_buffers_write(struct ust_buffer *buf, size_t offset,
const void *src, size_t len)
{
- size_t cpy;
size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+ assert(buf_offset + len
+ <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+ ust_buffers_do_copy(buf->buf_data + buf_offset, src, len);
+
+ return len;
+}
+
+/*
+ * ust_buffers_do_memset - write character into dest.
+ * @dest: destination
+ * @src: source character
+ * @len: length to write
+ */
+static __inline__
+void ust_buffers_do_memset(void *dest, char src, size_t len)
+{
+ /*
+ * What we really want here is an __inline__ memset, but we
+ * don't have constants, so gcc generally uses a function call.
+ */
+ for (; len > 0; len--)
+ *(u8 *)dest++ = src;
+}
+
+/*
+ * ust_buffers_do_strncpy - copy a string up to a certain number of bytes
+ * @dest: destination
+ * @src: source
+ * @len: max. length to copy
+ * @terminated: output string ends with \0 (output)
+ *
+ * returns the number of bytes copied. Does not finalize with \0 if len is
+ * reached.
+ */
+static __inline__
+size_t ust_buffers_do_strncpy(void *dest, const void *src, size_t len,
+ int *terminated)
+{
+ size_t orig_len = len;
+
+ *terminated = 0;
+ /*
+ * What we really want here is an __inline__ strncpy, but we
+ * don't have constants, so gcc generally uses a function call.
+ */
+ for (; len > 0; len--) {
+ *(u8 *)dest = CMM_LOAD_SHARED(*(const u8 *)src);
+ /* Check with dest, because src may be modified concurrently */
+ if (*(const u8 *)dest == '\0') {
+ len--;
+ *terminated = 1;
+ break;
+ }
+ dest++;
+ src++;
+ }
+ return orig_len - len;
+}
- cpy = min_t(size_t, len, buf->buf_size - buf_offset);
- ust_buffers_do_copy(buf->buf_data + buf_offset, src, cpy);
+static __inline__
+int ust_buffers_strncpy(struct ust_buffer *buf, size_t offset, const void *src,
+ size_t len)
+{
+ size_t buf_offset = BUFFER_OFFSET(offset, buf->chan);
+ ssize_t copied;
+ int terminated;
- if (unlikely(len != cpy))
- _ust_buffers_write(buf, buf_offset, src, len, cpy);
+ assert(buf_offset < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+ assert(buf_offset + len
+ <= buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+
+ copied = ust_buffers_do_strncpy(buf->buf_data + buf_offset,
+ src, len, &terminated);
+ if (unlikely(copied < len || !terminated))
+ _ust_buffers_strncpy_fixup(buf, offset, len, copied,
+ terminated);
return len;
}