ust-fd: Add close_range declaration
[lttng-ust.git] / src / common / ringbuffer / ring_buffer_frontend.c
index 9074dbcb3a41bde88316dcf107103a76d5f3d50c..ab1fc0ff878b749779d9ce3e09d1d46ed70fc9ea 100644 (file)
 #include "common/macros.h"
 
 #include <lttng/ust-utils.h>
-#include <lttng/ringbuffer-context.h>
+#include <lttng/ust-ringbuffer-context.h>
 
-#include "smp.h"
+#include "common/smp.h"
 #include "ringbuffer-config.h"
 #include "vatomic.h"
 #include "backend.h"
 #include "frontend.h"
 #include "shm.h"
 #include "rb-init.h"
-#include "liblttng-ust/compat.h"       /* For ENODATA */
+#include "common/compat/errno.h"       /* For ENODATA */
+#include "common/populate.h"
 
 /* Print DBG() messages about events lost only every 1048576 hits */
 #define DBG_PRINT_NR_LOST      (1UL << 20)
@@ -78,6 +79,8 @@
 /*
  * Non-static to ensure the compiler does not optimize away the xor.
  */
+uint8_t lttng_crash_magic_xor[]
+       __attribute__((visibility("hidden")));
 uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
 
 /*
@@ -115,8 +118,8 @@ DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
 static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
 
 static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
-                               struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+                               struct lttng_ust_ring_buffer *buf, int cpu,
                                struct lttng_ust_shm_handle *handle);
 
 /*
@@ -147,7 +150,7 @@ void lttng_ust_ringbuffer_set_allow_blocking(void)
 }
 
 /* Get blocking timeout, in ms */
-static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_channel *chan)
+static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_ring_buffer_channel *chan)
 {
        if (!lttng_ust_allow_blocking)
                return 0;
@@ -163,11 +166,11 @@ static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_cha
  * should not be using the iterator concurrently with reset. The previous
  * current iterator record is reset.
  */
-void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
                           struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
        unsigned int i;
 
        chan = shmp(handle, buf->backend.chan);
@@ -200,7 +203,7 @@ void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
        }
        uatomic_set(&buf->consumed, 0);
        uatomic_set(&buf->record_disabled, 0);
-       v_set(config, &buf->last_tsc, 0);
+       v_set(config, &buf->last_timestamp, 0);
        lib_ring_buffer_backend_reset(&buf->backend, handle);
        /* Don't reset number of active readers */
        v_set(config, &buf->records_lost_full, 0);
@@ -220,7 +223,7 @@ void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
  * be using the iterator concurrently with reset. The previous current iterator
  * record is reset.
  */
-void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+void channel_reset(struct lttng_ust_ring_buffer_channel *chan)
 {
        /*
         * Reset iterators first. Will put the subbuffer if held for reading.
@@ -234,9 +237,9 @@ void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
 }
 
 static
-void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+void init_crash_abi(const struct lttng_ust_ring_buffer_config *config,
                struct lttng_crash_abi *crash_abi,
-               struct lttng_ust_lib_ring_buffer *buf,
+               struct lttng_ust_ring_buffer *buf,
                struct channel_backend *chanb,
                struct shm_object *shmobj,
                struct lttng_ust_shm_handle *handle)
@@ -264,14 +267,14 @@ void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
        crash_abi->offset.buf_wsb_array =
                (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
        crash_abi->offset.buf_wsb_id =
-               offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+               offsetof(struct lttng_ust_ring_buffer_backend_subbuffer, id);
        crash_abi->offset.sb_array =
                (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
        crash_abi->offset.sb_array_shmp_offset =
-               offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+               offsetof(struct lttng_ust_ring_buffer_backend_pages_shmp,
                        shmp._ref.offset);
        crash_abi->offset.sb_backend_p_offset =
-               offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+               offsetof(struct lttng_ust_ring_buffer_backend_pages,
                        p._ref.offset);
 
        /* Field length */
@@ -280,19 +283,19 @@ void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
        crash_abi->length.commit_hot_seq =
                sizeof(((struct commit_counters_hot *) NULL)->seq);
        crash_abi->length.buf_wsb_id =
-               sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+               sizeof(((struct lttng_ust_ring_buffer_backend_subbuffer *) NULL)->id);
        crash_abi->length.sb_array_shmp_offset =
-               sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+               sizeof(((struct lttng_ust_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
        crash_abi->length.sb_backend_p_offset =
-               sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+               sizeof(((struct lttng_ust_ring_buffer_backend_pages *) NULL)->p._ref.offset);
 
        /* Array stride */
        crash_abi->stride.commit_hot_array =
                sizeof(struct commit_counters_hot);
        crash_abi->stride.buf_wsb_array =
-               sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+               sizeof(struct lttng_ust_ring_buffer_backend_subbuffer);
        crash_abi->stride.sb_array =
-               sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+               sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp);
 
        /* Buffer constants */
        crash_abi->buf_size = chanb->buf_size;
@@ -325,20 +328,20 @@ void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
 /*
  * Must be called under cpu hotplug protection.
  */
-int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
                           struct channel_backend *chanb, int cpu,
                           struct lttng_ust_shm_handle *handle,
                           struct shm_object *shmobj)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
-       struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
-                       struct lttng_ust_lib_ring_buffer_channel, backend);
-       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
-       struct lttng_ust_lib_ring_buffer_channel *shmp_chan;
+       const struct lttng_ust_ring_buffer_config *config = &chanb->config;
+       struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
+                       struct lttng_ust_ring_buffer_channel, backend);
+       struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
+       struct lttng_ust_ring_buffer_channel *shmp_chan;
        struct commit_counters_hot *cc_hot;
        void *priv = channel_get_private_config(chan);
        size_t subbuf_header_size;
-       uint64_t tsc;
+       uint64_t timestamp;
        int ret;
 
        /* Test for cpu hotplug */
@@ -395,8 +398,8 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
                ret = -EPERM;
                goto free_chanbuf;
        }
-       tsc = config->cb.ring_buffer_clock_read(shmp_chan);
-       config->cb.buffer_begin(buf, tsc, 0, handle);
+       timestamp = config->cb.ring_buffer_clock_read(shmp_chan);
+       config->cb.buffer_begin(buf, timestamp, 0, handle);
        cc_hot = shmp_index(handle, buf->commit_hot, 0);
        if (!cc_hot) {
                ret = -EPERM;
@@ -431,9 +434,9 @@ static
 void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
                siginfo_t *si, void *uc __attribute__((unused)))
 {
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       const struct lttng_ust_ring_buffer_config *config;
        struct lttng_ust_shm_handle *handle;
-       struct lttng_ust_lib_ring_buffer_channel *chan;
+       struct lttng_ust_ring_buffer_channel *chan;
        int cpu;
 
        assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
@@ -450,7 +453,7 @@ void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
        pthread_mutex_lock(&wakeup_fd_mutex);
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                for_each_possible_cpu(cpu) {
-                       struct lttng_ust_lib_ring_buffer *buf =
+                       struct lttng_ust_ring_buffer *buf =
                                shmp(handle, chan->backend.buf[cpu].shmp);
 
                        if (!buf)
@@ -460,7 +463,7 @@ void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
                                        chan->handle);
                }
        } else {
-               struct lttng_ust_lib_ring_buffer *buf =
+               struct lttng_ust_ring_buffer *buf =
                        shmp(handle, chan->backend.buf[0].shmp);
 
                if (!buf)
@@ -475,9 +478,9 @@ end:
 }
 
 static
-int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
-                                struct lttng_ust_lib_ring_buffer *buf,
-                                struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_ring_buffer_config *config,
+                                struct lttng_ust_ring_buffer *buf,
+                                struct lttng_ust_ring_buffer_channel *chan,
                                 struct lttng_ust_shm_handle *handle)
 {
        unsigned long consumed_old, consumed_idx, commit_count, write_offset;
@@ -521,7 +524,7 @@ int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *
 }
 
 static
-void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_wakeup(struct lttng_ust_ring_buffer *buf,
                struct lttng_ust_shm_handle *handle)
 {
        int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
@@ -586,9 +589,9 @@ void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
 }
 
 static
-void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_do_read(struct lttng_ust_ring_buffer_channel *chan)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       const struct lttng_ust_ring_buffer_config *config;
        struct lttng_ust_shm_handle *handle;
        int cpu;
 
@@ -601,7 +604,7 @@ void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *c
        pthread_mutex_lock(&wakeup_fd_mutex);
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                for_each_possible_cpu(cpu) {
-                       struct lttng_ust_lib_ring_buffer *buf =
+                       struct lttng_ust_ring_buffer *buf =
                                shmp(handle, chan->backend.buf[cpu].shmp);
 
                        if (!buf)
@@ -613,7 +616,7 @@ void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *c
                        }
                }
        } else {
-               struct lttng_ust_lib_ring_buffer *buf =
+               struct lttng_ust_ring_buffer *buf =
                        shmp(handle, chan->backend.buf[0].shmp);
 
                if (!buf)
@@ -632,7 +635,7 @@ static
 void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
                siginfo_t *si, void *uc __attribute__((unused)))
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
+       struct lttng_ust_ring_buffer_channel *chan;
 
        assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
        chan = si->si_value.sival_ptr;
@@ -782,7 +785,7 @@ void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr)
 }
 
 static
-void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_ring_buffer_channel *chan)
 {
        struct sigevent sev;
        struct itimerspec its;
@@ -816,7 +819,7 @@ void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer
 }
 
 static
-void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
 {
        int ret;
 
@@ -835,9 +838,9 @@ void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_
 }
 
 static
-void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_ring_buffer_channel *chan)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        struct sigevent sev;
        struct itimerspec its;
        int ret;
@@ -870,9 +873,9 @@ void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_c
 }
 
 static
-void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        int ret;
 
        if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
@@ -896,29 +899,29 @@ void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_ch
        chan->read_timer_enabled = 0;
 }
 
-static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_unregister_notifiers(struct lttng_ust_ring_buffer_channel *chan,
                           struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
        lib_ring_buffer_channel_switch_timer_stop(chan);
        lib_ring_buffer_channel_read_timer_stop(chan);
 }
 
-static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_print_errors(struct lttng_ust_ring_buffer_channel *chan,
                struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config =
+       const struct lttng_ust_ring_buffer_config *config =
                        &chan->backend.config;
        int cpu;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
                for_each_possible_cpu(cpu) {
-                       struct lttng_ust_lib_ring_buffer *buf =
+                       struct lttng_ust_ring_buffer *buf =
                                shmp(handle, chan->backend.buf[cpu].shmp);
                        if (buf)
                                lib_ring_buffer_print_errors(chan, buf, cpu, handle);
                }
        } else {
-               struct lttng_ust_lib_ring_buffer *buf =
+               struct lttng_ust_ring_buffer *buf =
                        shmp(handle, chan->backend.buf[0].shmp);
 
                if (buf)
@@ -926,7 +929,7 @@ static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
        }
 }
 
-static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
+static void channel_free(struct lttng_ust_ring_buffer_channel *chan,
                struct lttng_ust_shm_handle *handle,
                int consumer)
 {
@@ -959,7 +962,7 @@ static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
  * Holds cpu hotplug.
  * Returns NULL on failure.
  */
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_config *config,
                   const char *name,
                   size_t priv_data_align,
                   size_t priv_data_size,
@@ -973,14 +976,15 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
 {
        int ret;
        size_t shmsize, chansize;
-       struct lttng_ust_lib_ring_buffer_channel *chan;
+       struct lttng_ust_ring_buffer_channel *chan;
        struct lttng_ust_shm_handle *handle;
        struct shm_object *shmobj;
        unsigned int nr_streams;
        int64_t blocking_timeout_ms;
+       bool populate = lttng_ust_map_populate_is_enabled();
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               nr_streams = num_possible_cpus();
+               nr_streams = get_possible_cpus_array_len();
        else
                nr_streams = 1;
 
@@ -1004,19 +1008,19 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
                                         read_timer_interval))
                return NULL;
 
-       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
-       handle->table = shm_object_table_create(1 + num_possible_cpus());
+       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
        if (!handle->table)
                goto error_table_alloc;
 
        /* Calculate the shm allocation layout */
-       shmsize = sizeof(struct lttng_ust_lib_ring_buffer_channel);
-       shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
-       shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
+       shmsize = sizeof(struct lttng_ust_ring_buffer_channel);
+       shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_shmp));
+       shmsize += sizeof(struct lttng_ust_ring_buffer_shmp) * nr_streams;
        chansize = shmsize;
        if (priv_data_align)
                shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
@@ -1024,10 +1028,10 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
 
        /* Allocate normal memory for channel (not shared) */
        shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
-                       -1, -1);
+                       -1, -1, populate);
        if (!shmobj)
                goto error_append;
-       /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+       /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
        set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
        assert(handle->chan._ref.index == 0);
        assert(handle->chan._ref.offset == 0);
@@ -1087,13 +1091,14 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data,
 {
        struct lttng_ust_shm_handle *handle;
        struct shm_object *object;
+       bool populate = lttng_ust_map_populate_is_enabled();
 
-       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       handle = zmalloc_populate(sizeof(struct lttng_ust_shm_handle), populate);
        if (!handle)
                return NULL;
 
        /* Allocate table for channel + per-cpu buffers */
-       handle->table = shm_object_table_create(1 + num_possible_cpus());
+       handle->table = shm_object_table_create(1 + get_possible_cpus_array_len(), populate);
        if (!handle->table)
                goto error_table_alloc;
        /* Add channel object */
@@ -1101,7 +1106,7 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data,
                        memory_map_size, wakeup_fd);
        if (!object)
                goto error_table_object;
-       /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+       /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
        handle->chan._ref.index = 0;
        handle->chan._ref.offset = 0;
        return handle;
@@ -1122,7 +1127,7 @@ int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
        /* Add stream object */
        object = shm_object_table_append_shm(handle->table,
                        shm_fd, wakeup_fd, stream_nr,
-                       memory_map_size);
+                       memory_map_size, lttng_ust_map_populate_cpu_is_enabled(stream_nr));
        if (!object)
                return -EINVAL;
        return 0;
@@ -1135,7 +1140,7 @@ unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
 }
 
 static
-void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_release(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
                int consumer)
 {
        channel_free(chan, handle, consumer);
@@ -1151,7 +1156,7 @@ void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttn
  * consumption of finalized channels, get_subbuf() will return -ENODATA.
  * They should release their handle at that point.
  */
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+void channel_destroy(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
                int consumer)
 {
        if (consumer) {
@@ -1174,20 +1179,21 @@ void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttn
        return;
 }
 
-struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
-                                       const struct lttng_ust_lib_ring_buffer_config *config,
-                                       struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+struct lttng_ust_ring_buffer *channel_get_ring_buffer(
+                                       const struct lttng_ust_ring_buffer_config *config,
+                                       struct lttng_ust_ring_buffer_channel *chan, int cpu,
                                        struct lttng_ust_shm_handle *handle,
                                        int *shm_fd, int *wait_fd,
                                        int *wakeup_fd,
-                                       uint64_t *memory_map_size)
+                                       uint64_t *memory_map_size,
+                                       void **memory_map_addr)
 {
        struct shm_ref *ref;
 
        if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                cpu = 0;
        } else {
-               if (cpu >= num_possible_cpus())
+               if (cpu >= get_possible_cpus_array_len())
                        return NULL;
        }
        ref = &chan->backend.buf[cpu].shmp._ref;
@@ -1196,12 +1202,13 @@ struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
        *wakeup_fd = shm_get_wakeup_fd(handle, ref);
        if (shm_get_shm_size(handle, ref, memory_map_size))
                return NULL;
+       *memory_map_addr = handle->table->objects[ref->index].memory_map;
        return shmp(handle, chan->backend.buf[cpu].shmp);
 }
 
 int ring_buffer_channel_close_wait_fd(
-               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
-               struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+               const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
                struct lttng_ust_shm_handle *handle)
 {
        struct shm_ref *ref;
@@ -1211,8 +1218,8 @@ int ring_buffer_channel_close_wait_fd(
 }
 
 int ring_buffer_channel_close_wakeup_fd(
-               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
-               struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+               const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
                struct lttng_ust_shm_handle *handle)
 {
        struct shm_ref *ref;
@@ -1221,8 +1228,8 @@ int ring_buffer_channel_close_wakeup_fd(
        return shm_close_wakeup_fd(handle, ref);
 }
 
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
-                       struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
+                       struct lttng_ust_ring_buffer_channel *chan,
                        struct lttng_ust_shm_handle *handle,
                        int cpu)
 {
@@ -1231,15 +1238,15 @@ int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_conf
        if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                cpu = 0;
        } else {
-               if (cpu >= num_possible_cpus())
+               if (cpu >= get_possible_cpus_array_len())
                        return -EINVAL;
        }
        ref = &chan->backend.buf[cpu].shmp._ref;
        return shm_close_wait_fd(handle, ref);
 }
 
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
-                       struct lttng_ust_lib_ring_buffer_channel *chan,
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
+                       struct lttng_ust_ring_buffer_channel *chan,
                        struct lttng_ust_shm_handle *handle,
                        int cpu)
 {
@@ -1249,7 +1256,7 @@ int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_co
        if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
                cpu = 0;
        } else {
-               if (cpu >= num_possible_cpus())
+               if (cpu >= get_possible_cpus_array_len())
                        return -EINVAL;
        }
        ref = &chan->backend.buf[cpu].shmp._ref;
@@ -1259,7 +1266,7 @@ int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_co
        return ret;
 }
 
-int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_open_read(struct lttng_ust_ring_buffer *buf,
                              struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
        if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
@@ -1268,10 +1275,10 @@ int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
        return 0;
 }
 
-void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_release_read(struct lttng_ust_ring_buffer *buf,
                                  struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+       struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
 
        if (!chan)
                return;
@@ -1290,12 +1297,12 @@ void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
  * data to read at consumed position, or 0 if the get operation succeeds.
  */
 
-int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_snapshot(struct lttng_ust_ring_buffer *buf,
                             unsigned long *consumed, unsigned long *produced,
                             struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
        unsigned long consumed_cur, write_offset;
        int finalized;
 
@@ -1355,12 +1362,12 @@ nodata:
  * consumer positions without regard for the "snapshot" feature.
  */
 int lib_ring_buffer_snapshot_sample_positions(
-                            struct lttng_ust_lib_ring_buffer *buf,
+                            struct lttng_ust_ring_buffer *buf,
                             unsigned long *consumed, unsigned long *produced,
                             struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
 
        chan = shmp(handle, buf->backend.chan);
        if (!chan)
@@ -1385,12 +1392,12 @@ int lib_ring_buffer_snapshot_sample_positions(
  * @buf: ring buffer
  * @consumed_new: new consumed count value
  */
-void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_move_consumer(struct lttng_ust_ring_buffer *buf,
                                   unsigned long consumed_new,
                                   struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
-       struct lttng_ust_lib_ring_buffer_channel *chan;
+       struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+       struct lttng_ust_ring_buffer_channel *chan;
        unsigned long consumed;
 
        chan = shmp(handle, bufb->chan);
@@ -1417,12 +1424,12 @@ void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
  * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
  * data to read at consumed position, or 0 if the get operation succeeds.
  */
-int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+int lib_ring_buffer_get_subbuf(struct lttng_ust_ring_buffer *buf,
                               unsigned long consumed,
                               struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
        unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
        int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
        struct commit_counters_cold *cc_cold;
@@ -1581,15 +1588,15 @@ nodata:
  * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
  * @buf: ring buffer
  */
-void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_put_subbuf(struct lttng_ust_ring_buffer *buf,
                                struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
        unsigned long sb_bindex, consumed_idx, consumed;
-       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
-       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+       struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_ring_buffer_backend_pages *backend_pages;
 
        chan = shmp(handle, bufb->chan);
        if (!chan)
@@ -1651,13 +1658,13 @@ void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
  * position and the writer position. (inclusive)
  */
 static
-void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
-                                           struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_ring_buffer *buf,
+                                           struct lttng_ust_ring_buffer_channel *chan,
                                            unsigned long cons_offset,
                                            int cpu,
                                            struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long cons_idx, commit_count, commit_count_sb;
        struct commit_counters_hot *cc_hot;
        struct commit_counters_cold *cc_cold;
@@ -1686,11 +1693,11 @@ void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *bu
 }
 
 static
-void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
-                                        struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_ring_buffer *buf,
+                                        struct lttng_ust_ring_buffer_channel *chan,
                                         int cpu, struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long write_offset, cons_offset;
 
        /*
@@ -1716,11 +1723,11 @@ void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
 }
 
 static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
-                               struct lttng_ust_lib_ring_buffer *buf, int cpu,
+void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
+                               struct lttng_ust_ring_buffer *buf, int cpu,
                                struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
 
        if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
                DBG("ring buffer %s: %lu records written, "
@@ -1756,18 +1763,18 @@ void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan
  * active or at buffer finalization (destroy).
  */
 static
-void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
-                                     struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
+                                     struct lttng_ust_ring_buffer_channel *chan,
                                      struct switch_offsets *offsets,
-                                     uint64_t tsc,
+                                     const struct lttng_ust_ring_buffer_ctx *ctx,
                                      struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old, chan);
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, tsc, oldidx, handle);
+       config->cb.buffer_begin(buf, ctx->priv->timestamp, oldidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1782,7 +1789,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
        commit_count = v_read(config, &cc_hot->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
-                                     commit_count, oldidx, handle, tsc);
+                                     commit_count, oldidx, handle, ctx);
        lib_ring_buffer_write_commit_counter(config, buf, chan,
                        offsets->old + config->cb.subbuffer_header_size(),
                        commit_count, handle, cc_hot);
@@ -1797,13 +1804,13 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
  * subbuffer.
  */
 static
-void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
-                                   struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
+                                   struct lttng_ust_ring_buffer_channel *chan,
                                    struct switch_offsets *offsets,
-                                   uint64_t tsc,
+                                   const struct lttng_ust_ring_buffer_ctx *ctx,
                                    struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
        unsigned long commit_count, padding_size, data_size;
        struct commit_counters_hot *cc_hot;
@@ -1825,7 +1832,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = tsc;
+       *ts_end = ctx->priv->timestamp;
 
        /*
         * Order all writes to buffer and store to ts_end before the commit
@@ -1838,7 +1845,7 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
        v_add(config, padding_size, &cc_hot->cc);
        commit_count = v_read(config, &cc_hot->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
-                                     commit_count, oldidx, handle, tsc);
+                                     commit_count, oldidx, handle, ctx);
        lib_ring_buffer_write_commit_counter(config, buf, chan,
                        offsets->old + padding_size, commit_count, handle,
                        cc_hot);
@@ -1852,18 +1859,18 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
  * that this code is executed before the deliver of this sub-buffer.
  */
 static
-void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
-                                     struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
+                                     struct lttng_ust_ring_buffer_channel *chan,
                                      struct switch_offsets *offsets,
-                                     uint64_t tsc,
+                                     const struct lttng_ust_ring_buffer_ctx *ctx,
                                      struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long beginidx = subbuf_index(offsets->begin, chan);
        unsigned long commit_count;
        struct commit_counters_hot *cc_hot;
 
-       config->cb.buffer_begin(buf, tsc, beginidx, handle);
+       config->cb.buffer_begin(buf, ctx->priv->timestamp, beginidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
@@ -1877,7 +1884,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
        commit_count = v_read(config, &cc_hot->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
-                                     commit_count, beginidx, handle, tsc);
+                                     commit_count, beginidx, handle, ctx);
        lib_ring_buffer_write_commit_counter(config, buf, chan,
                        offsets->begin + config->cb.subbuffer_header_size(),
                        commit_count, handle, cc_hot);
@@ -1892,13 +1899,13 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
  * we are currently doing the space reservation.
  */
 static
-void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
-                                   struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
+                                   struct lttng_ust_ring_buffer_channel *chan,
                                    struct switch_offsets *offsets,
-                                   uint64_t tsc,
+                                   const struct lttng_ust_ring_buffer_ctx *ctx,
                                    struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long endidx, data_size;
        uint64_t *ts_end;
 
@@ -1917,7 +1924,7 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
         * postponed until the commit counter is incremented for the
         * current space reservation.
         */
-       *ts_end = tsc;
+       *ts_end = ctx->priv->timestamp;
 }
 
 /*
@@ -1927,13 +1934,13 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
  */
 static
 int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
-                                   struct lttng_ust_lib_ring_buffer *buf,
-                                   struct lttng_ust_lib_ring_buffer_channel *chan,
+                                   struct lttng_ust_ring_buffer *buf,
+                                   struct lttng_ust_ring_buffer_channel *chan,
                                    struct switch_offsets *offsets,
-                                   uint64_t *tsc,
+                                   struct lttng_ust_ring_buffer_ctx *ctx,
                                    struct lttng_ust_shm_handle *handle)
 {
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        unsigned long off, reserve_commit_diff;
 
        offsets->begin = v_read(config, &buf->offset);
@@ -1941,7 +1948,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
        offsets->switch_old_start = 0;
        off = subbuf_offset(offsets->begin, chan);
 
-       *tsc = config->cb.ring_buffer_clock_read(chan);
+       ctx->priv->timestamp = config->cb.ring_buffer_clock_read(chan);
 
        /*
         * Ensure we flush the header of an empty subbuffer when doing the
@@ -2028,6 +2035,13 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
        offsets->begin = subbuf_align(offsets->begin, chan);
        /* Note: old points to the next subbuf at offset 0 */
        offsets->end = offsets->begin;
+       /*
+        * Populate the records lost counters prior to performing a
+        * sub-buffer switch.
+        */
+       ctx->priv->records_lost_full = v_read(config, &buf->records_lost_full);
+       ctx->priv->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+       ctx->priv->records_lost_big = v_read(config, &buf->records_lost_big);
        return 0;
 }
 
@@ -2041,15 +2055,17 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
  * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
  * from any CPU.
  */
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
                                 struct lttng_ust_shm_handle *handle)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan;
-       const struct lttng_ust_lib_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_channel *chan;
+       const struct lttng_ust_ring_buffer_config *config;
+       struct lttng_ust_ring_buffer_ctx_private ctx_priv;
+       struct lttng_ust_ring_buffer_ctx ctx;
        struct switch_offsets offsets;
        unsigned long oldidx;
-       uint64_t tsc;
 
+       ctx.priv = &ctx_priv;
        chan = shmp(handle, buf->backend.chan);
        if (!chan)
                return;
@@ -2062,18 +2078,18 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum swi
         */
        do {
                if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
-                                                   &tsc, handle))
+                                                   &ctx, handle))
                        return; /* Switch not needed */
        } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
                 != offsets.old);
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full
+        * timestamp records, never the opposite (missing a full timestamp
+        * record when it would be needed).
         */
-       save_last_tsc(config, buf, tsc);
+       save_last_timestamp(config, buf, ctx.priv->timestamp);
 
        /*
         * Push the reader if necessary
@@ -2087,14 +2103,14 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum swi
         * May need to populate header start on SWITCH_FLUSH.
         */
        if (offsets.switch_old_start) {
-               lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
+               lib_ring_buffer_switch_old_start(buf, chan, &offsets, &ctx, handle);
                offsets.old += config->cb.subbuffer_header_size();
        }
 
        /*
         * Switch old subbuffer.
         */
-       lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
+       lib_ring_buffer_switch_old_end(buf, chan, &offsets, &ctx, handle);
 }
 
 static
@@ -2122,14 +2138,14 @@ bool handle_blocking_retry(int *timeout_left_ms)
  * -EIO if data cannot be written into the buffer for any other reason.
  */
 static
-int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
-                                    struct lttng_ust_lib_ring_buffer_channel *chan,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_ring_buffer *buf,
+                                    struct lttng_ust_ring_buffer_channel *chan,
                                     struct switch_offsets *offsets,
-                                    struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                                    struct lttng_ust_ring_buffer_ctx *ctx,
                                     void *client_ctx)
 {
-       struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
        struct lttng_ust_shm_handle *handle = chan->handle;
        unsigned long reserve_commit_diff, offset_cmp;
        int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
@@ -2142,12 +2158,12 @@ retry:
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
-       ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx_private->tsc == -EIO)
+       ctx_private->timestamp = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx_private->timestamp == -EIO)
                return -EIO;
 
-       if (last_tsc_overflow(config, buf, ctx_private->tsc))
-               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
 
        if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
@@ -2157,7 +2173,7 @@ retry:
                                                &offsets->pre_header_padding,
                                                ctx, client_ctx);
                offsets->size +=
-                       lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+                       lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
                                             ctx->largest_align)
                        + ctx->data_size;
                if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
@@ -2263,7 +2279,7 @@ retry:
                                                &offsets->pre_header_padding,
                                                ctx, client_ctx);
                offsets->size +=
-                       lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+                       lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
                if (caa_unlikely(subbuf_offset(offsets->begin, chan)
@@ -2304,6 +2320,15 @@ retry:
                 */
                offsets->switch_new_end = 1;    /* For offsets->begin */
        }
+       /*
+        * Populate the records lost counters when the space reservation
+        * may cause a sub-buffer switch.
+        */
+       if (offsets->switch_new_end || offsets->switch_old_end) {
+               ctx_private->records_lost_full = v_read(config, &buf->records_lost_full);
+               ctx_private->records_lost_wrap = v_read(config, &buf->records_lost_wrap);
+               ctx_private->records_lost_big = v_read(config, &buf->records_lost_big);
+       }
        return 0;
 }
 
@@ -2315,14 +2340,14 @@ retry:
  * -EIO for other errors, else returns 0.
  * It will take care of sub-buffer switching.
  */
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
                void *client_ctx)
 {
-       struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
-       struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+       struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+       struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
        struct lttng_ust_shm_handle *handle = chan->handle;
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       struct lttng_ust_lib_ring_buffer *buf;
+       const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_ring_buffer *buf;
        struct switch_offsets offsets;
        int ret;
 
@@ -2346,12 +2371,12 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                          != offsets.old));
 
        /*
-        * Atomically update last_tsc. This update races against concurrent
-        * atomic updates, but the race will always cause supplementary full TSC
-        * records, never the opposite (missing a full TSC record when it would
-        * be needed).
+        * Atomically update last_timestamp. This update races against concurrent
+        * atomic updates, but the race will always cause supplementary full
+        * timestamp records, never the opposite (missing a full timestamp
+        * record when it would be needed).
         */
-       save_last_tsc(config, buf, ctx_private->tsc);
+       save_last_timestamp(config, buf, ctx_private->timestamp);
 
        /*
         * Push the reader if necessary
@@ -2372,17 +2397,17 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan),
                                            handle);
-               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
+               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx, handle);
        }
 
        /*
         * Populate new subbuffer.
         */
        if (caa_unlikely(offsets.switch_new_start))
-               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
+               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx, handle);
 
        if (caa_unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx, handle);
 
        ctx_private->slot_size = offsets.size;
        ctx_private->pre_offset = offsets.begin;
@@ -2391,8 +2416,8 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
 }
 
 static
-void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
-                                         struct lttng_ust_lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_ring_buffer_config *config,
+                                         struct lttng_ust_ring_buffer *buf,
                                          unsigned long commit_count,
                                          unsigned long idx,
                                          struct lttng_ust_shm_handle *handle)
@@ -2413,8 +2438,8 @@ void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer
  */
 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
 static
-void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
-               struct lttng_ust_lib_ring_buffer *buf,
+void deliver_count_events(const struct lttng_ust_ring_buffer_config *config,
+               struct lttng_ust_ring_buffer *buf,
                unsigned long idx,
                struct lttng_ust_shm_handle *handle)
 {
@@ -2428,22 +2453,22 @@ void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
 static
 void deliver_count_events(
-               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
-               struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+               const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_ring_buffer *buf __attribute__((unused)),
                unsigned long idx __attribute__((unused)),
                struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
 }
 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
 
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
-                                  struct lttng_ust_lib_ring_buffer *buf,
-                                  struct lttng_ust_lib_ring_buffer_channel *chan,
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_ring_buffer_config *config,
+                                  struct lttng_ust_ring_buffer *buf,
+                                  struct lttng_ust_ring_buffer_channel *chan,
                                   unsigned long offset,
                                   unsigned long commit_count,
                                   unsigned long idx,
                                   struct lttng_ust_shm_handle *handle,
-                                  uint64_t tsc __attribute__((unused)))
+                                  const struct lttng_ust_ring_buffer_ctx *ctx)
 {
        unsigned long old_commit_count = commit_count
                                         - chan->backend.subbuf_size;
@@ -2511,7 +2536,7 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
                                                                buf,
                                                                idx,
                                                                handle),
-                                     handle);
+                                     handle, ctx);
 
                /*
                 * Increment the packet counter while we have exclusive
@@ -2556,11 +2581,11 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
 }
 
 /*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * Force a read (imply TLS allocation for dlopen) of TLS variables.
  */
-void lttng_fixup_ringbuffer_tls(void)
+void lttng_ringbuffer_alloc_tls(void)
 {
-       asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
+       __asm__ __volatile__ ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
 }
 
 void lib_ringbuffer_signal_init(void)
This page took 0.056979 seconds and 4 git commands to generate.