#include "shm.h"
#include "rb-init.h"
#include "common/compat/errno.h" /* For ENODATA */
+#include "common/populate.h"
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
/*
* Non-static to ensure the compiler does not optimize away the xor.
*/
+uint8_t lttng_crash_magic_xor[]
+ __attribute__((visibility("hidden")));
uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
/*
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
+ v_set(config, &buf->last_timestamp, 0);
lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
- uint64_t tsc;
+ uint64_t timestamp;
int ret;
/* Test for cpu hotplug */
ret = -EPERM;
goto free_chanbuf;
}
- tsc = config->cb.ring_buffer_clock_read(shmp_chan);
- config->cb.buffer_begin(buf, tsc, 0, handle);
+ timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+ config->cb.buffer_begin(buf, timestamp, 0, handle);
cc_hot = shmp_index(handle, buf->commit_hot, 0);
if (!cc_hot) {
ret = -EPERM;
struct shm_object *shmobj;
unsigned int nr_streams;
int64_t blocking_timeout_ms;
+ bool populate = lttng_ust_map_populate_is_enabled();
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- nr_streams = num_possible_cpus();
+ nr_streams = get_possible_cpus_array_len();
else
nr_streams = 1;
read_timer_interval))
return NULL;
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Allocate normal memory for channel (not shared) */
shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
- -1, -1);
+ -1, -1, populate);
if (!shmobj)
goto error_append;
/* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
{
struct lttng_ust_shm_handle *handle;
struct shm_object *object;
+ bool populate = lttng_ust_map_populate_is_enabled();
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
if (!handle)
return NULL;
/* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
+ handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
if (!handle->table)
goto error_table_alloc;
/* Add channel object */
/* Add stream object */
object = shm_object_table_append_shm(handle->table,
shm_fd, wakeup_fd, stream_nr,
- memory_map_size);
+ memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr));
if (!object)
return -EINVAL;
return 0;
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
int *wakeup_fd,
- uint64_t *memory_map_size)
+ uint64_t *memory_map_size,
+ void **memory_map_addr)
{
struct shm_ref *ref;
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return NULL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
*wakeup_fd = shm_get_wakeup_fd(handle, ref);
if (shm_get_shm_size(handle, ref, memory_map_size))
return NULL;
+ *memory_map_addr = handle->table->objects[ref->index].memory_map;
return shmp(handle, chan->backend.buf[cpu].shmp);
}
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
cpu = 0;
} else {
- if (cpu >= num_possible_cpus())
+ if (cpu >= get_possible_cpus_array_len())
return -EINVAL;
}
ref = &chan->backend.buf[cpu].shmp._ref;
void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, tsc, oldidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx, handle, tsc);
+ commit_count, oldidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + config->cb.subbuffer_header_size(),
commit_count, handle, cc_hot);
void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = tsc;
+ *ts_end = ctx->priv->timestamp;
/*
* Order all writes to buffer and store to ts_end before the commit
v_add(config, padding_size, &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx, handle, tsc);
+ commit_count, oldidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + padding_size, commit_count, handle,
cc_hot);
void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, tsc, beginidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx, handle, tsc);
+ commit_count, beginidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->begin + config->cb.subbuffer_header_size(),
commit_count, handle, cc_hot);
void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = tsc;
+ *ts_end = ctx->priv->timestamp;
}
/*
struct lttng_ust_ring_buffer *buf,
struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t *tsc,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- *tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
offsets->begin = subbuf_align(offsets->begin, chan);
/* Note: old points to the next subbuf at offset 0 */
offsets->end = offsets->begin;
+ /*
+ * Populate the records lost counters prior to performing a
+ * sub-buffer switch.
+ */
+ ctx->priv->records_lost_full = v_read(config, &buf->records_lost_full);
+ ctx->priv->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+ ctx->priv->records_lost_big = v_read(config, &buf->records_lost_big);
return 0;
}
{
struct lttng_ust_ring_buffer_channel *chan;
const struct lttng_ust_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_ctx_private ctx_priv;
+ struct lttng_ust_ring_buffer_ctx ctx;
struct switch_offsets offsets;
unsigned long oldidx;
- uint64_t tsc;
+ ctx.priv = &ctx_priv;
chan = shmp(handle, buf->backend.chan);
if (!chan)
return;
*/
do {
if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
- &tsc, handle))
+ &ctx, handle))
return; /* Switch not needed */
} while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
!= offsets.old);
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, tsc);
+ save_last_timestamp(config, buf, ctx.priv->timestamp);
/*
* Push the reader if necessary
* May need to populate header start on SWITCH_FLUSH.
*/
if (offsets.switch_old_start) {
- lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, &ctx, handle);
offsets.old += config->cb.subbuffer_header_size();
}
/*
* Switch old subbuffer.
*/
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, &ctx, handle);
}
static
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
- ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return -EIO;
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
*/
offsets->switch_new_end = 1; /* For offsets->begin */
}
+ /*
+ * Populate the records lost counters when the space reservation
+ * may cause a sub-buffer switch.
+ */
+ if (offsets->switch_new_end || offsets->switch_old_end) {
+ ctx_private->records_lost_full = v_read(config, &buf->records_lost_full);
+ ctx_private->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+ ctx_private->records_lost_big = v_read(config, &buf->records_lost_big);
+ }
return 0;
}
!= offsets.old));
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp records, never the opposite (missing a full timestamp
+ * record when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan),
handle);
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx, handle);
}
/*
* Populate new subbuffer.
*/
if (caa_unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx, handle);
if (caa_unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx, handle);
ctx_private->slot_size = offsets.size;
ctx_private->pre_offset = offsets.begin;
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle,
- uint64_t tsc __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
buf,
idx,
handle),
- handle);
+ handle, ctx);
/*
* Increment the packet counter while we have exclusive
}
/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * Force a read (imply TLS allocation for dlopen) of TLS variables.
*/
-void lttng_fixup_ringbuffer_tls(void)
+void lttng_ringbuffer_alloc_tls(void)
{
- asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
+ __asm__ __volatile__ ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
}
void lib_ringbuffer_signal_init(void)