#include <urcu/compiler.h>
#include <urcu/ref.h>
#include <urcu/tls-compat.h>
+#include <poll.h>
#include <helper.h>
#include "smp.h"
#include "tlsfixup.h"
#include "../liblttng-ust/compat.h" /* For ENODATA */
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
#define CLOCKID CLOCK_MONOTONIC
+#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
+#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
+
+/*
+ * Non-static to ensure the compiler does not optimize away the xor.
+ */
+uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
/*
* Use POSIX SHM: shm_open(3) and shm_unlink(3).
void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(handle, buf->backend.chan);
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
unsigned int i;
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ abort();
+ config = &chan->backend.config;
/*
* Reset iterator first. It will put the subbuffer if it currently holds
* it.
/* Don't reset reader reference count */
}
+static
+void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_crash_abi *crash_abi,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb,
+ struct shm_object *shmobj,
+ struct lttng_ust_shm_handle *handle)
+{
+ int i;
+
+ for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
+ crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
+ crash_abi->mmap_length = shmobj->memory_map_size;
+ crash_abi->endian = RB_CRASH_ENDIAN;
+ crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
+ crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
+ crash_abi->word_size = sizeof(unsigned long);
+ crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
+
+ /* Offset of fields */
+ crash_abi->offset.prod_offset =
+ (uint32_t) ((char *) &buf->offset - (char *) buf);
+ crash_abi->offset.consumed_offset =
+ (uint32_t) ((char *) &buf->consumed - (char *) buf);
+ crash_abi->offset.commit_hot_array =
+ (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
+ crash_abi->offset.commit_hot_seq =
+ offsetof(struct commit_counters_hot, seq);
+ crash_abi->offset.buf_wsb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
+ crash_abi->offset.buf_wsb_id =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ crash_abi->offset.sb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
+ crash_abi->offset.sb_array_shmp_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ shmp._ref.offset);
+ crash_abi->offset.sb_backend_p_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ p._ref.offset);
+
+ /* Field length */
+ crash_abi->length.prod_offset = sizeof(buf->offset);
+ crash_abi->length.consumed_offset = sizeof(buf->consumed);
+ crash_abi->length.commit_hot_seq =
+ sizeof(((struct commit_counters_hot *) NULL)->seq);
+ crash_abi->length.buf_wsb_id =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ crash_abi->length.sb_array_shmp_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ crash_abi->length.sb_backend_p_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+
+ /* Array stride */
+ crash_abi->stride.commit_hot_array =
+ sizeof(struct commit_counters_hot);
+ crash_abi->stride.buf_wsb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ crash_abi->stride.sb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+
+ /* Buffer constants */
+ crash_abi->buf_size = chanb->buf_size;
+ crash_abi->subbuf_size = chanb->subbuf_size;
+ crash_abi->num_subbuf = chanb->num_subbuf;
+ crash_abi->mode = (uint32_t) chanb->config.mode;
+
+ if (config->cb.content_size_field) {
+ size_t offset, length;
+
+ config->cb.content_size_field(config, &offset, &length);
+ crash_abi->offset.content_size = offset;
+ crash_abi->length.content_size = length;
+ } else {
+ crash_abi->offset.content_size = 0;
+ crash_abi->length.content_size = 0;
+ }
+ if (config->cb.packet_size_field) {
+ size_t offset, length;
+
+ config->cb.packet_size_field(config, &offset, &length);
+ crash_abi->offset.packet_size = offset;
+ crash_abi->length.packet_size = length;
+ } else {
+ crash_abi->offset.packet_size = 0;
+ crash_abi->length.packet_size = 0;
+ }
+}
+
/*
* Must be called under cpu hotplug protection.
*/
if (buf->backend.allocated)
return 0;
- ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
- cpu, handle, shmobj);
- if (ret)
- return ret;
-
align_shm(shmobj, __alignof__(struct commit_counters_hot));
set_shmp(buf->commit_hot,
zalloc_shm(shmobj,
sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
if (!shmp(handle, buf->commit_hot)) {
- ret = -ENOMEM;
- goto free_chanbuf;
+ return -ENOMEM;
}
align_shm(shmobj, __alignof__(struct commit_counters_cold));
goto free_commit;
}
+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+ cpu, handle, shmobj);
+ if (ret) {
+ goto free_init;
+ }
+
/*
* Write the subbuffer header for first subbuffer so we know the total
* duration of data gathering.
tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
config->cb.buffer_begin(buf, tsc, 0, handle);
v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
if (config->cb.buffer_create) {
ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
if (ret)
- goto free_init;
+ goto free_chanbuf;
}
+
+ init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
+
buf->backend.allocated = 1;
return 0;
for_each_possible_cpu(cpu) {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
+
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers))
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
chan->handle);
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
struct lttng_ust_lib_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ abort();
if (uatomic_read(&buf->active_readers)
&& lib_ring_buffer_poll_deliver(config, buf,
chan, handle)) {
}
its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
- its.it_value.tv_nsec = chan->switch_timer_interval % 1000000;
+ its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
}
its.it_value.tv_sec = chan->read_timer_interval / 1000000;
- its.it_value.tv_nsec = chan->read_timer_interval % 1000000;
+ its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
its.it_interval.tv_sec = its.it_value.tv_sec;
its.it_interval.tv_nsec = its.it_value.tv_nsec;
* padding to let readers get those sub-buffers.
* Used for live streaming.
* @read_timer_interval: Time interval (in us) to wake up pending readers.
+ * @stream_fds: array of stream file descriptors.
+ * @nr_stream_fds: number of file descriptors in array.
*
* Holds cpu hotplug.
* Returns NULL on failure.
void *priv_data_init,
void *buf_addr, size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ const int *stream_fds, int nr_stream_fds)
{
int ret;
size_t shmsize, chansize;
else
nr_streams = 1;
+ if (nr_stream_fds != nr_streams)
+ return NULL;
+
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
shmsize += priv_data_size;
/* Allocate normal memory for channel (not shared) */
- shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM);
+ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
+ -1);
if (!shmobj)
goto error_append;
/* struct channel is at object 0, offset 0 (hardcoded) */
}
ret = channel_backend_init(&chan->backend, name, config,
- subbuf_size, num_subbuf, handle);
+ subbuf_size, num_subbuf, handle,
+ stream_fds);
if (ret)
goto error_backend_init;
struct channel *chan = shmp(handle, buf->backend.chan);
const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
- int ret;
- int finalized;
+ int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
retry:
finalized = CMM_ACCESS_ONCE(buf->finalized);
/*
* Check that the subbuffer we are trying to consume has been
- * already fully committed.
+ * already fully committed. There are a few causes that can make
+ * this unavailability situation occur:
+ *
+ * Temporary (short-term) situation:
+ * - Application is running on a different CPU, between reserve
+ * and commit ring buffer operations,
+ * - Application is preempted between reserve and commit ring
+ * buffer operations,
+ *
+ * Long-term situation:
+ * - Application is stopped (SIGSTOP) between reserve and commit
+ * ring buffer operations. Could eventually be resumed by
+ * SIGCONT.
+ * - Application is killed (SIGTERM, SIGINT, SIGKILL) between
+ * reserve and commit ring buffer operation.
+ *
+ * From a consumer perspective, handling short-term
+ * unavailability situations is performed by retrying a few
+ * times after a delay. Handling long-term unavailability
+ * situations is handled by failing to get the sub-buffer.
+ *
+ * In all of those situations, if the application is taking a
+ * long time to perform its commit after ring buffer space
+ * reservation, we can end up in a situation where the producer
+ * will fill the ring buffer and try to write into the same
+ * sub-buffer again (which has a missing commit). This is
+ * handled by the producer in the sub-buffer switch handling
+ * code of the reserve routine by detecting unbalanced
+ * reserve/commit counters and discarding all further events
+ * until the situation is resolved in those situations. Two
+ * scenarios can occur:
+ *
+ * 1) The application causing the reserve/commit counters to be
+ * unbalanced has been terminated. In this situation, all
+ * further events will be discarded in the buffers, and no
+ * further buffer data will be readable by the consumer
+ * daemon. Tearing down the UST tracing session and starting
+ * anew is a work-around for those situations. Note that this
+ * only affects per-UID tracing. In per-PID tracing, the
+ * application vanishes with the termination, and therefore
+ * no more data needs to be written to the buffers.
+ * 2) The application causing the unbalance has been delayed for
+ * a long time, but will eventually try to increment the
+ * commit counter after eventually writing to the sub-buffer.
+ * This situation can cause events to be discarded until the
+ * application resumes its operations.
*/
if (((commit_count - chan->backend.subbuf_size)
& chan->commit_count_mask)
- (buf_trunc(consumed, chan)
>> chan->backend.num_subbuf_order)
- != 0)
- goto nodata;
+ != 0) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
/*
* Check that we are not about to read the same subbuffer in
* the writer is getting access to a subbuffer we were trying to get
* access to. Also checks that the "consumed" buffer count we are
* looking for matches the one contained in the subbuffer id.
+ *
+ * The short-lived race window described here can be affected by
+ * application signals and preemption, thus requiring to bound
+ * the loop to a maximum number of retry.
*/
ret = update_read_sb_index(config, &buf->backend, &chan->backend,
consumed_idx, buf_trunc_val(consumed, chan),
handle);
- if (ret)
- goto retry;
+ if (ret) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
buf->get_subbuf_consumed = consumed;
/*
* lib_ring_buffer_switch_old_start: Populate old subbuffer header.
*
- * Only executed when the buffer is finalized, in SWITCH_FLUSH.
+ * Only executed by SWITCH_FLUSH, which can be issued while tracing is
+ * active or at buffer finalization (destroy).
*/
static
void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
unsigned long sb_index, commit_count;
/*
- * We are performing a SWITCH_FLUSH. At this stage, there are no
- * concurrent writes into the buffer.
+ * We are performing a SWITCH_FLUSH. There may be concurrent
+ * writes into the buffer if e.g. invoked while performing a
+ * snapshot on an active trace.
*
- * The client does not save any header information. Don't
- * switch empty subbuffer on finalize, because it is invalid to
- * deliver a completely empty subbuffer.
+ * If the client does not save any header information
+ * (sub-buffer header size == 0), don't switch empty subbuffer
+ * on finalize, because it is invalid to deliver a completely
+ * empty subbuffer.
*/
if (!config->cb.subbuffer_header_size())
return -1;