#include <urcu/compiler.h>
#include <urcu/ref.h>
#include <urcu/tls-compat.h>
+#include <poll.h>
#include <helper.h>
#include "smp.h"
#include "tlsfixup.h"
#include "../liblttng-ust/compat.h" /* For ENODATA */
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
#define CLOCKID CLOCK_MONOTONIC
+#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
+#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
+
+/*
+ * Non-static to ensure the compiler does not optimize away the xor.
+ */
+uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
/*
* Use POSIX SHM: shm_open(3) and shm_unlink(3).
void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned int i;
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ abort();
+ config = &chan->backend.config;
/*
* Reset iterator first. It will put the subbuffer if it currently holds
* it.
/* Don't reset reader reference count */
}
+static
+void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_crash_abi *crash_abi,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb,
+ struct shm_object *shmobj,
+ struct lttng_ust_shm_handle *handle)
+{
+ int i;
+
+ for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
+ crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
+ crash_abi->mmap_length = shmobj->memory_map_size;
+ crash_abi->endian = RB_CRASH_ENDIAN;
+ crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
+ crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
+ crash_abi->word_size = sizeof(unsigned long);
+ crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
+
+ /* Offset of fields */
+ crash_abi->offset.prod_offset =
+ (uint32_t) ((char *) &buf->offset - (char *) buf);
+ crash_abi->offset.consumed_offset =
+ (uint32_t) ((char *) &buf->consumed - (char *) buf);
+ crash_abi->offset.commit_hot_array =
+ (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
+ crash_abi->offset.commit_hot_seq =
+ offsetof(struct commit_counters_hot, seq);
+ crash_abi->offset.buf_wsb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
+ crash_abi->offset.buf_wsb_id =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ crash_abi->offset.sb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
+ crash_abi->offset.sb_array_shmp_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ shmp._ref.offset);
+ crash_abi->offset.sb_backend_p_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ p._ref.offset);
+
+ /* Field length */
+ crash_abi->length.prod_offset = sizeof(buf->offset);
+ crash_abi->length.consumed_offset = sizeof(buf->consumed);
+ crash_abi->length.commit_hot_seq =
+ sizeof(((struct commit_counters_hot *) NULL)->seq);
+ crash_abi->length.buf_wsb_id =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ crash_abi->length.sb_array_shmp_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ crash_abi->length.sb_backend_p_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+
+ /* Array stride */
+ crash_abi->stride.commit_hot_array =
+ sizeof(struct commit_counters_hot);
+ crash_abi->stride.buf_wsb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ crash_abi->stride.sb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+
+ /* Buffer constants */
+ crash_abi->buf_size = chanb->buf_size;
+ crash_abi->subbuf_size = chanb->subbuf_size;
+ crash_abi->num_subbuf = chanb->num_subbuf;
+ crash_abi->mode = (uint32_t) chanb->config.mode;
+
+ if (config->cb.content_size_field) {
+ size_t offset, length;
+
+ config->cb.content_size_field(config, &offset, &length);
+ crash_abi->offset.content_size = offset;
+ crash_abi->length.content_size = length;
+ } else {
+ crash_abi->offset.content_size = 0;
+ crash_abi->length.content_size = 0;
+ }
+ if (config->cb.packet_size_field) {
+ size_t offset, length;
+
+ config->cb.packet_size_field(config, &offset, &length);
+ crash_abi->offset.packet_size = offset;
+ crash_abi->length.packet_size = length;
+ } else {
+ crash_abi->offset.packet_size = 0;
+ crash_abi->length.packet_size = 0;
+ }
+}
+
/*
* Must be called under cpu hotplug protection.
*/
if (buf->backend.allocated)
return 0;
- ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
- cpu, handle, shmobj);
- if (ret)
- return ret;
-
align_shm(shmobj, __alignof__(struct commit_counters_hot));
set_shmp(buf->commit_hot,
zalloc_shm(shmobj,
sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
if (!shmp(handle, buf->commit_hot)) {
- ret = -ENOMEM;
- goto free_chanbuf;
+ return -ENOMEM;
}
align_shm(shmobj, __alignof__(struct commit_counters_cold));
goto free_commit;
}
+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+ cpu, handle, shmobj);
+ if (ret) {
+ goto free_init;
+ }
+
/*
* Write the subbuffer header for first subbuffer so we know the total
* duration of data gathering.
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
config->cb.buffer_begin(buf, tsc, 0, handle);
v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
if (ret)
- goto free_init;
+ goto free_chanbuf;
}
+
+ init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
+
buf->backend.allocated = 1;
return 0;
for_each_possible_cpu(cpu) {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
+
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
}
its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
- its.it_value.tv_nsec = chan->switch_timer_interval % 1000000;
+ its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
}
its.it_value.tv_sec = chan->read_timer_interval / 1000000;
- its.it_value.tv_nsec = chan->read_timer_interval % 1000000;
+ its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
* padding to let readers get those sub-buffers.
* Used for live streaming.
* @read_timer_interval: Time interval (in us) to wake up pending readers.
+ * @stream_fds: array of stream file descriptors.
+ * @nr_stream_fds: number of file descriptors in array.
*
* Holds cpu hotplug.
* Returns NULL on failure.
void *priv_data_init,
void *buf_addr, size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ const int *stream_fds, int nr_stream_fds)
{
int ret;
size_t shmsize, chansize;
else
nr_streams = 1;
+ if (nr_stream_fds != nr_streams)
+ return NULL;
+
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
shmsize += priv_data_size;
/* Allocate normal memory for channel (not shared) */
- shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM);
+ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
+ -1);
if (!shmobj)
goto error_append;
/* struct channel is at object 0, offset 0 (hardcoded) */
}
ret = channel_backend_init(&chan->backend, name, config,
- subbuf_size, num_subbuf, handle);
+ subbuf_size, num_subbuf, handle,
+ stream_fds);
if (ret)
goto error_backend_init;
struct channel *chan = shmp(handle, buf->backend.chan);
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
- int ret;
- int finalized;
+ int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
retry:
finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
* Check that the subbuffer we are trying to consume has been
- * already fully committed.
+ * already fully committed. There are a few causes that can make
+ * this unavailability situation occur:
+ *
+ * Temporary (short-term) situation:
+ * - Application is running on a different CPU, between reserve
+ * and commit ring buffer operations,
+ * - Application is preempted between reserve and commit ring
+ * buffer operations,
+ *
+ * Long-term situation:
+ * - Application is stopped (SIGSTOP) between reserve and commit
+ * ring buffer operations. Could eventually be resumed by
+ * SIGCONT.
+ * - Application is killed (SIGTERM, SIGINT, SIGKILL) between
+ * reserve and commit ring buffer operation.
+ *
+ * From a consumer perspective, handling short-term
+ * unavailability situations is performed by retrying a few
+ * times after a delay. Handling long-term unavailability
+ * situations is handled by failing to get the sub-buffer.
+ *
+ * In all of those situations, if the application is taking a
+ * long time to perform its commit after ring buffer space
+ * reservation, we can end up in a situation where the producer
+ * will fill the ring buffer and try to write into the same
+ * sub-buffer again (which has a missing commit). This is
+ * handled by the producer in the sub-buffer switch handling
+ * code of the reserve routine by detecting unbalanced
+ * reserve/commit counters and discarding all further events
+ * until the situation is resolved in those situations. Two
+ * scenarios can occur:
+ *
+ * 1) The application causing the reserve/commit counters to be
+ * unbalanced has been terminated. In this situation, all
+ * further events will be discarded in the buffers, and no
+ * further buffer data will be readable by the consumer
+ * daemon. Tearing down the UST tracing session and starting
+ * anew is a work-around for those situations. Note that this
+ * only affects per-UID tracing. In per-PID tracing, the
+ * application vanishes with the termination, and therefore
+ * no more data needs to be written to the buffers.
+ * 2) The application causing the unbalance has been delayed for
+ * a long time, but will eventually try to increment the
+ * commit counter after eventually writing to the sub-buffer.
+ * This situation can cause events to be discarded until the
+ * application resumes its operations.
*/
if (((commit_count - chan->backend.subbuf_size)
& chan->commit_count_mask)
- - (buf_trunc(consumed_cur, chan)
+ - (buf_trunc(consumed, chan)
>> chan->backend.num_subbuf_order)
- != 0)
- goto nodata;
+ != 0) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
/*
* Check that we are not about to read the same subbuffer in
* which the writer head is.
*/
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
== 0)
goto nodata;
* the writer is getting access to a subbuffer we were trying to get
* access to. Also checks that the "consumed" buffer count we are
* looking for matches the one contained in the subbuffer id.
+ *
+ * The short-lived race window described here can be affected by
+ * application signals and preemption, thus requiring to bound
+ * the loop to a maximum number of retry.
*/
ret = update_read_sb_index(config, &buf->backend, &chan->backend,
consumed_idx, buf_trunc_val(consumed, chan),
handle);
- if (ret)
- goto retry;
+ if (ret) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
buf->get_subbuf_consumed = consumed;
/*
* lib_ring_buffer_switch_old_start: Populate old subbuffer header.
*
- * Only executed when the buffer is finalized, in SWITCH_FLUSH.
+ * Only executed by SWITCH_FLUSH, which can be issued while tracing is
+ * active or at buffer finalization (destroy).
*/
static
void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx, handle);
+ commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
- offsets->old, commit_count,
- config->cb.subbuffer_header_size(),
- handle);
+ offsets->old + config->cb.subbuffer_header_size(),
+ commit_count, handle);
}
/*
v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx, handle);
+ commit_count, oldidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
- offsets->old, commit_count,
- padding_size, handle);
+ offsets->old + padding_size, commit_count, handle);
}
/*
commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx, handle);
+ commit_count, beginidx, handle, tsc);
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
- offsets->begin, commit_count,
- config->cb.subbuffer_header_size(),
- handle);
+ offsets->begin + config->cb.subbuffer_header_size(),
+ commit_count, handle);
}
/*
struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- uint64_t *tsc)
+ uint64_t *tsc,
+ struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long off;
+ unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
offsets->old = offsets->begin;
* timestamps) are visible to the reader. This is required for
* quiescence guarantees for the fusion merge.
*/
- if (mode == SWITCH_FLUSH || off > 0) {
- if (caa_unlikely(off == 0)) {
- /*
- * A final flush that encounters an empty
- * sub-buffer cannot switch buffer if a
- * reader is located within this sub-buffer.
- * Anyway, the purpose of final flushing of a
- * sub-buffer at offset 0 is to handle the case
- * of entirely empty stream.
- */
- if (caa_unlikely(subbuf_trunc(offsets->begin, chan)
- - subbuf_trunc((unsigned long)
- uatomic_read(&buf->consumed), chan)
- >= chan->backend.buf_size))
- return -1;
- /*
- * The client does not save any header information.
- * Don't switch empty subbuffer on finalize, because it
- * is invalid to deliver a completely empty subbuffer.
- */
- if (!config->cb.subbuffer_header_size())
+ if (mode != SWITCH_FLUSH && !off)
+ return -1; /* we do not have to switch : buffer is empty */
+
+ if (caa_unlikely(off == 0)) {
+ unsigned long sb_index, commit_count;
+
+ /*
+ * We are performing a SWITCH_FLUSH. There may be concurrent
+ * writes into the buffer if e.g. invoked while performing a
+ * snapshot on an active trace.
+ *
+ * If the client does not save any header information
+ * (sub-buffer header size == 0), don't switch empty subbuffer
+ * on finalize, because it is invalid to deliver a completely
+ * empty subbuffer.
+ */
+ if (!config->cb.subbuffer_header_size())
+ return -1;
+
+ /* Test new buffer integrity */
+ sb_index = subbuf_index(offsets->begin, chan);
+ commit_count = v_read(config,
+ &shmp_index(handle, buf->commit_cold,
+ sb_index)->cc_sb);
+ reserve_commit_diff =
+ (buf_trunc(offsets->begin, chan)
+ >> chan->backend.num_subbuf_order)
+ - (commit_count & chan->commit_count_mask);
+ if (caa_likely(reserve_commit_diff == 0)) {
+ /* Next subbuffer not being written to. */
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ subbuf_trunc(offsets->begin, chan)
+ - subbuf_trunc((unsigned long)
+ uatomic_read(&buf->consumed), chan)
+ >= chan->backend.buf_size)) {
+ /*
+ * We do not overwrite non consumed buffers
+ * and we are full : don't switch.
+ */
return -1;
+ } else {
+ /*
+ * Next subbuffer not being written to, and we
+ * are either in overwrite mode or the buffer is
+ * not full. It's safe to write in this new
+ * subbuffer.
+ */
+ }
+ } else {
/*
- * Need to write the subbuffer start header on finalize.
+ * Next subbuffer reserve offset does not match the
+ * commit offset. Don't perform switch in
+ * producer-consumer and overwrite mode. Caused by
+ * either a writer OOPS or too many nested writes over a
+ * reserve/commit pair.
*/
- offsets->switch_old_start = 1;
+ return -1;
}
- offsets->begin = subbuf_align(offsets->begin, chan);
- } else
- return -1; /* we do not have to switch : buffer is empty */
+
+ /*
+ * Need to write the subbuffer start header on finalize.
+ */
+ offsets->switch_old_start = 1;
+ }
+ offsets->begin = subbuf_align(offsets->begin, chan);
/* Note: old points to the next subbuf at offset 0 */
offsets->end = offsets->begin;
return 0;
*/
do {
if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
- &tsc))
+ &tsc, handle))
return; /* Switch not needed */
} while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
!= offsets.old);
{
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_shm_handle *handle = ctx->handle;
- unsigned long reserve_commit_diff;
+ unsigned long reserve_commit_diff, offset_cmp;
- offsets->begin = v_read(config, &buf->offset);
+retry:
+ offsets->begin = offset_cmp = v_read(config, &buf->offset);
offsets->old = offsets->begin;
offsets->switch_new_start = 0;
offsets->switch_new_end = 0;
}
}
if (caa_unlikely(offsets->switch_new_start)) {
- unsigned long sb_index;
+ unsigned long sb_index, commit_count;
/*
* We are typically not filling the previous buffer completely.
+ config->cb.subbuffer_header_size();
/* Test new buffer integrity */
sb_index = subbuf_index(offsets->begin, chan);
+ /*
+ * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
+ * lib_ring_buffer_check_deliver() has the matching
+ * memory barriers required around commit_cold cc_sb
+ * updates to ensure reserve and commit counter updates
+ * are not seen reordered when updated by another CPU.
+ */
+ cmm_smp_rmb();
+ commit_count = v_read(config,
+ &shmp_index(handle, buf->commit_cold,
+ sb_index)->cc_sb);
+ /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
+ cmm_smp_rmb();
+ if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
+ /*
+ * The reserve counter have been concurrently updated
+ * while we read the commit counter. This means the
+ * commit counter we read might not match buf->offset
+ * due to concurrent update. We therefore need to retry.
+ */
+ goto retry;
+ }
reserve_commit_diff =
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
- - ((unsigned long) v_read(config,
- &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
- & chan->commit_count_mask);
+ - (commit_count & chan->commit_count_mask);
if (caa_likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
/*
* Next subbuffer reserve offset does not match the
- * commit offset. Drop record in producer-consumer and
+ * commit offset, and this did not involve update to the
+ * reserve counter. Drop record in producer-consumer and
* overwrite mode. Caused by either a writer OOPS or too
* many nested writes over a reserve/commit pair.
*/