#include "common/macros.h"
#include <lttng/ust-utils.h>
-#include <lttng/ringbuffer-context.h>
+#include <lttng/ust-ringbuffer-context.h>
-#include "smp.h"
+#include "common/smp.h"
#include "ringbuffer-config.h"
#include "vatomic.h"
#include "backend.h"
#include "frontend.h"
#include "shm.h"
#include "rb-init.h"
-#include "liblttng-ust/compat.h" /* For ENODATA */
+#include "common/compat/errno.h" /* For ENODATA */
/* Print DBG() messages about events lost only every 1048576 hits */
#define DBG_PRINT_NR_LOST (1UL << 20)
/*
* Non-static to ensure the compiler does not optimize away the xor.
*/
+uint8_t lttng_crash_magic_xor[]
+ __attribute__((visibility("hidden")));
uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
/*
static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle);
/*
}
/* Get blocking timeout, in ms */
-static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_channel *chan)
+static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_ring_buffer_channel *chan)
{
if (!lttng_ust_allow_blocking)
return 0;
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned int i;
chan = shmp(handle, buf->backend.chan);
* be using the iterator concurrently with reset. The previous current iterator
* record is reset.
*/
-void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+void channel_reset(struct lttng_ust_ring_buffer_channel *chan)
{
/*
* Reset iterators first. Will put the subbuffer if held for reading.
}
static
-void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+void init_crash_abi(const struct lttng_ust_ring_buffer_config *config,
struct lttng_crash_abi *crash_abi,
- struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_ring_buffer *buf,
struct channel_backend *chanb,
struct shm_object *shmobj,
struct lttng_ust_shm_handle *handle)
crash_abi->offset.buf_wsb_array =
(uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
crash_abi->offset.buf_wsb_id =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ offsetof(struct lttng_ust_ring_buffer_backend_subbuffer, id);
crash_abi->offset.sb_array =
(uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
crash_abi->offset.sb_array_shmp_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ offsetof(struct lttng_ust_ring_buffer_backend_pages_shmp,
shmp._ref.offset);
crash_abi->offset.sb_backend_p_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ offsetof(struct lttng_ust_ring_buffer_backend_pages,
p._ref.offset);
/* Field length */
crash_abi->length.commit_hot_seq =
sizeof(((struct commit_counters_hot *) NULL)->seq);
crash_abi->length.buf_wsb_id =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ sizeof(((struct lttng_ust_ring_buffer_backend_subbuffer *) NULL)->id);
crash_abi->length.sb_array_shmp_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ sizeof(((struct lttng_ust_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
crash_abi->length.sb_backend_p_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+ sizeof(((struct lttng_ust_ring_buffer_backend_pages *) NULL)->p._ref.offset);
/* Array stride */
crash_abi->stride.commit_hot_array =
sizeof(struct commit_counters_hot);
crash_abi->stride.buf_wsb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ sizeof(struct lttng_ust_ring_buffer_backend_subbuffer);
crash_abi->stride.sb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+ sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp);
/* Buffer constants */
crash_abi->buf_size = chanb->buf_size;
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_channel *shmp_chan;
+ const struct lttng_ust_ring_buffer_config *config = &chanb->config;
+ struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_ring_buffer_channel, backend);
+ struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_ring_buffer_channel *shmp_chan;
struct commit_counters_hot *cc_hot;
void *priv = channel_get_private_config(chan);
size_t subbuf_header_size;
void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
siginfo_t *si, void *uc __attribute__((unused)))
{
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
struct lttng_ust_shm_handle *handle;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
int cpu;
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
pthread_mutex_lock(&wakeup_fd_mutex);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
chan->handle);
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
}
static
-int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
}
static
-void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_wakeup(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
}
static
-void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_do_read(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config;
+ const struct lttng_ust_ring_buffer_config *config;
struct lttng_ust_shm_handle *handle;
int cpu;
pthread_mutex_lock(&wakeup_fd_mutex);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (!buf)
}
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (!buf)
void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
siginfo_t *si, void *uc __attribute__((unused)))
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
chan = si->si_value.sival_ptr;
}
static
-void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_ring_buffer_channel *chan)
{
struct sigevent sev;
struct itimerspec its;
}
static
-void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
{
int ret;
}
static
-void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
struct sigevent sev;
struct itimerspec its;
int ret;
}
static
-void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
int ret;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
chan->read_timer_enabled = 0;
}
-static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_unregister_notifiers(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
lib_ring_buffer_channel_switch_timer_stop(chan);
lib_ring_buffer_channel_read_timer_stop(chan);
}
-static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_print_errors(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config =
+ const struct lttng_ust_ring_buffer_config *config =
&chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[cpu].shmp);
if (buf)
lib_ring_buffer_print_errors(chan, buf, cpu, handle);
}
} else {
- struct lttng_ust_lib_ring_buffer *buf =
+ struct lttng_ust_ring_buffer *buf =
shmp(handle, chan->backend.buf[0].shmp);
if (buf)
}
}
-static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_free(struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int consumer)
{
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_config *config,
const char *name,
size_t priv_data_align,
size_t priv_data_size,
{
int ret;
size_t shmsize, chansize;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_channel *chan;
struct lttng_ust_shm_handle *handle;
struct shm_object *shmobj;
unsigned int nr_streams;
goto error_table_alloc;
/* Calculate the shm allocation layout */
- shmsize = sizeof(struct lttng_ust_lib_ring_buffer_channel);
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
+ shmsize = sizeof(struct lttng_ust_ring_buffer_channel);
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_shmp));
+ shmsize += sizeof(struct lttng_ust_ring_buffer_shmp) * nr_streams;
chansize = shmsize;
if (priv_data_align)
shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
-1, -1);
if (!shmobj)
goto error_append;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
assert(handle->chan._ref.index == 0);
assert(handle->chan._ref.offset == 0);
memory_map_size, wakeup_fd);
if (!object)
goto error_table_object;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
handle->chan._ref.index = 0;
handle->chan._ref.offset = 0;
return handle;
}
static
-void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_release(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
int consumer)
{
channel_free(chan, handle, consumer);
* consumption of finalized channels, get_subbuf() will return -ENODATA.
* They should release their handle at that point.
*/
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_destroy(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
int consumer)
{
if (consumer) {
return;
}
-struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+struct lttng_ust_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan, int cpu,
struct lttng_ust_shm_handle *handle,
int *shm_fd, int *wait_fd,
int *wakeup_fd,
- uint64_t *memory_map_size)
+ uint64_t *memory_map_size,
+ void **memory_map_addr)
{
struct shm_ref *ref;
*wakeup_fd = shm_get_wakeup_fd(handle, ref);
if (shm_get_shm_size(handle, ref, memory_map_size))
return NULL;
+ *memory_map_addr = handle->table->objects[ref->index].memory_map;
return shmp(handle, chan->backend.buf[cpu].shmp);
}
int ring_buffer_channel_close_wait_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
struct lttng_ust_shm_handle *handle)
{
struct shm_ref *ref;
}
int ring_buffer_channel_close_wakeup_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
struct lttng_ust_shm_handle *handle)
{
struct shm_ref *ref;
return shm_close_wakeup_fd(handle, ref);
}
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
{
return shm_close_wait_fd(handle, ref);
}
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_channel *chan,
struct lttng_ust_shm_handle *handle,
int cpu)
{
return ret;
}
-int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_open_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return 0;
}
-void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_release_read(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
if (!chan)
return;
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long consumed_cur, write_offset;
int finalized;
* consumer positions without regard for the "snapshot" feature.
*/
int lib_ring_buffer_snapshot_sample_positions(
- struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_ring_buffer *buf,
unsigned long *consumed, unsigned long *produced,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
chan = shmp(handle, buf->backend.chan);
if (!chan)
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_ring_buffer *buf,
unsigned long consumed_new,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_ring_buffer_channel *chan;
unsigned long consumed;
chan = shmp(handle, bufb->chan);
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_ring_buffer *buf,
unsigned long consumed,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
struct commit_counters_cold *cc_cold;
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_put_subbuf(struct lttng_ust_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
unsigned long sb_bindex, consumed_idx, consumed;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_ring_buffer_backend_pages *backend_pages;
chan = shmp(handle, bufb->chan);
if (!chan)
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long cons_offset,
int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
struct commit_counters_hot *cc_hot;
struct commit_counters_cold *cc_cold;
}
static
-void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
int cpu, struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
/*
}
static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf, int cpu,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
DBG("ring buffer %s: %lu records written, "
* active or at buffer finalization (destroy).
*/
static
-void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, tsc, oldidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->tsc, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx, handle, tsc);
+ commit_count, oldidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + config->cb.subbuffer_header_size(),
commit_count, handle, cc_hot);
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
struct commit_counters_hot *cc_hot;
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = tsc;
+ *ts_end = ctx->priv->tsc;
/*
* Order all writes to buffer and store to ts_end before the commit
v_add(config, padding_size, &cc_hot->cc);
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx, handle, tsc);
+ commit_count, oldidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->old + padding_size, commit_count, handle,
cc_hot);
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
struct commit_counters_hot *cc_hot;
- config->cb.buffer_begin(buf, tsc, beginidx, handle);
+ config->cb.buffer_begin(buf, ctx->priv->tsc, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
commit_count = v_read(config, &cc_hot->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx, handle, tsc);
+ commit_count, beginidx, handle, ctx);
lib_ring_buffer_write_commit_counter(config, buf, chan,
offsets->begin + config->cb.subbuffer_header_size(),
commit_count, handle, cc_hot);
* we are currently doing the space reservation.
*/
static
-void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t tsc,
+ const struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx, data_size;
uint64_t *ts_end;
* postponed until the commit counter is incremented for the
* current space reservation.
*/
- *ts_end = tsc;
+ *ts_end = ctx->priv->tsc;
}
/*
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- uint64_t *tsc,
+ struct lttng_ust_ring_buffer_ctx *ctx,
struct lttng_ust_shm_handle *handle)
{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
unsigned long off, reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
offsets->switch_old_start = 0;
off = subbuf_offset(offsets->begin, chan);
- *tsc = config->cb.ring_buffer_clock_read(chan);
+ ctx->priv->tsc = config->cb.ring_buffer_clock_read(chan);
/*
* Ensure we flush the header of an empty subbuffer when doing the
offsets->begin = subbuf_align(offsets->begin, chan);
/* Note: old points to the next subbuf at offset 0 */
offsets->end = offsets->begin;
+ /*
+ * Populate the records lost counters prior to performing a
+ * sub-buffer switch.
+ */
+ ctx->priv->records_lost_full = v_read(config, &buf->records_lost_full);
+ ctx->priv->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+ ctx->priv->records_lost_big = v_read(config, &buf->records_lost_big);
return 0;
}
* RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
* from any CPU.
*/
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_channel *chan;
+ const struct lttng_ust_ring_buffer_config *config;
+ struct lttng_ust_ring_buffer_ctx_private ctx_priv;
+ struct lttng_ust_ring_buffer_ctx ctx;
struct switch_offsets offsets;
unsigned long oldidx;
- uint64_t tsc;
+ ctx.priv = &ctx_priv;
chan = shmp(handle, buf->backend.chan);
if (!chan)
return;
*/
do {
if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
- &tsc, handle))
+ &ctx, handle))
return; /* Switch not needed */
} while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
!= offsets.old);
* records, never the opposite (missing a full TSC record when it would
* be needed).
*/
- save_last_tsc(config, buf, tsc);
+ save_last_tsc(config, buf, ctx.priv->tsc);
/*
* Push the reader if necessary
* May need to populate header start on SWITCH_FLUSH.
*/
if (offsets.switch_old_start) {
- lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, &ctx, handle);
offsets.old += config->cb.subbuffer_header_size();
}
/*
* Switch old subbuffer.
*/
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, &ctx, handle);
}
static
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
struct switch_offsets *offsets,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
struct lttng_ust_shm_handle *handle = chan->handle;
unsigned long reserve_commit_diff, offset_cmp;
int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
&offsets->pre_header_padding,
ctx, client_ctx);
offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
&offsets->pre_header_padding,
ctx, client_ctx);
offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
if (caa_unlikely(subbuf_offset(offsets->begin, chan)
*/
offsets->switch_new_end = 1; /* For offsets->begin */
}
+ /*
+ * Populate the records lost counters when the space reservation
+ * may cause a sub-buffer switch.
+ */
+ if (offsets->switch_new_end || offsets->switch_old_end) {
+ ctx_private->records_lost_full = v_read(config, &buf->records_lost_full);
+ ctx_private->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+ ctx_private->records_lost_big = v_read(config, &buf->records_lost_big);
+ }
return 0;
}
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct lttng_ust_lib_ring_buffer *buf;
+ const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
lib_ring_buffer_clear_noref(config, &buf->backend,
subbuf_index(offsets.old - 1, chan),
handle);
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx, handle);
}
/*
* Populate new subbuffer.
*/
if (caa_unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx, handle);
if (caa_unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx, handle);
ctx_private->slot_size = offsets.size;
ctx_private->pre_offset = offsets.begin;
}
static
-void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
*/
#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
static
-void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
unsigned long idx,
struct lttng_ust_shm_handle *handle)
{
#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
static
void deliver_count_events(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf __attribute__((unused)),
unsigned long idx __attribute__((unused)),
struct lttng_ust_shm_handle *handle __attribute__((unused)))
{
}
#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer *buf,
+ struct lttng_ust_ring_buffer_channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
struct lttng_ust_shm_handle *handle,
- uint64_t tsc __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
buf,
idx,
handle),
- handle);
+ handle, ctx);
/*
* Increment the packet counter while we have exclusive
}
/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * Force a read (imply TLS allocation for dlopen) of TLS variables.
*/
-void lttng_fixup_ringbuffer_tls(void)
+void lttng_ringbuffer_alloc_tls(void)
{
asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
}