Fix: pass private data to context callbacks
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index 83da122b80cf8d5e50fa8d99827ddf58b73b45b6..1b9ec40d3cc9b001a5bd951457438cf2445c53e3 100644 (file)
@@ -52,7 +52,7 @@
 #include <poll.h>
 #include <ust-helper.h>
 
-#include <lttng/ust-align.h>
+#include <lttng/ust-utils.h>
 #include <lttng/ringbuffer-context.h>
 
 #include "smp.h"
@@ -428,7 +428,8 @@ free_chanbuf:
 }
 
 static
-void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc)
+void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
+               siginfo_t *si, void *uc __attribute__((unused)))
 {
        const struct lttng_ust_lib_ring_buffer_config *config;
        struct lttng_ust_shm_handle *handle;
@@ -628,7 +629,8 @@ end:
 }
 
 static
-void lib_ring_buffer_channel_read_timer(int sig, siginfo_t *si, void *uc)
+void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
+               siginfo_t *si, void *uc __attribute__((unused)))
 {
        struct lttng_ust_lib_ring_buffer_channel *chan;
 
@@ -663,7 +665,7 @@ void rb_setmask(sigset_t *mask)
 }
 
 static
-void *sig_thread(void *arg)
+void *sig_thread(void *arg __attribute__((unused)))
 {
        sigset_t mask;
        siginfo_t info;
@@ -895,7 +897,7 @@ void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_ch
 }
 
 static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
-                          struct lttng_ust_shm_handle *handle)
+                          struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
        lib_ring_buffer_channel_switch_timer_stop(chan);
        lib_ring_buffer_channel_read_timer_stop(chan);
@@ -963,7 +965,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
                   size_t priv_data_size,
                   void *priv_data_init,
                   void *priv,
-                  void *buf_addr, size_t subbuf_size,
+                  void *buf_addr __attribute__((unused)), size_t subbuf_size,
                   size_t num_subbuf, unsigned int switch_timer_interval,
                   unsigned int read_timer_interval,
                   const int *stream_fds, int nr_stream_fds,
@@ -1197,9 +1199,10 @@ struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
        return shmp(handle, chan->backend.buf[cpu].shmp);
 }
 
-int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
-                       struct lttng_ust_lib_ring_buffer_channel *chan,
-                       struct lttng_ust_shm_handle *handle)
+int ring_buffer_channel_close_wait_fd(
+               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+               struct lttng_ust_shm_handle *handle)
 {
        struct shm_ref *ref;
 
@@ -1207,9 +1210,10 @@ int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_con
        return shm_close_wait_fd(handle, ref);
 }
 
-int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
-                       struct lttng_ust_lib_ring_buffer_channel *chan,
-                       struct lttng_ust_shm_handle *handle)
+int ring_buffer_channel_close_wakeup_fd(
+               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+               struct lttng_ust_shm_handle *handle)
 {
        struct shm_ref *ref;
 
@@ -1256,7 +1260,7 @@ int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_co
 }
 
 int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
-                             struct lttng_ust_shm_handle *handle)
+                             struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
        if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
                return -EBUSY;
@@ -2124,8 +2128,9 @@ int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                                     struct lttng_ust_lib_ring_buffer_ctx *ctx,
                                     void *client_ctx)
 {
+       struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_shm_handle *handle = chan->handle;
        unsigned long reserve_commit_diff, offset_cmp;
        int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
 
@@ -2137,14 +2142,14 @@ retry:
        offsets->switch_old_end = 0;
        offsets->pre_header_padding = 0;
 
-       ctx->tsc = config->cb.ring_buffer_clock_read(chan);
-       if ((int64_t) ctx->tsc == -EIO)
+       ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
+       if ((int64_t) ctx_private->tsc == -EIO)
                return -EIO;
 
-       if (last_tsc_overflow(config, buf, ctx->tsc))
-               ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+       if (last_tsc_overflow(config, buf, ctx_private->tsc))
+               ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -2152,8 +2157,8 @@ retry:
                                                &offsets->pre_header_padding,
                                                ctx, client_ctx);
                offsets->size +=
-                       lib_ring_buffer_align(offsets->begin + offsets->size,
-                                             ctx->largest_align)
+                       lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+                                            ctx->largest_align)
                        + ctx->data_size;
                if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
                             offsets->size > chan->backend.subbuf_size)) {
@@ -2258,7 +2263,7 @@ retry:
                                                &offsets->pre_header_padding,
                                                ctx, client_ctx);
                offsets->size +=
-                       lib_ring_buffer_align(offsets->begin + offsets->size,
+                       lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
                if (caa_unlikely(subbuf_offset(offsets->begin, chan)
@@ -2313,20 +2318,21 @@ retry:
 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                void *client_ctx)
 {
-       struct lttng_ust_lib_ring_buffer_channel *chan = ctx->chan;
-       struct lttng_ust_shm_handle *handle = ctx->handle;
+       struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+       struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+       struct lttng_ust_shm_handle *handle = chan->handle;
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        struct lttng_ust_lib_ring_buffer *buf;
        struct switch_offsets offsets;
        int ret;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = shmp(handle, chan->backend.buf[ctx->reserve_cpu].shmp);
+               buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
        else
                buf = shmp(handle, chan->backend.buf[0].shmp);
        if (!buf)
                return -EIO;
-       ctx->buf = buf;
+       ctx_private->buf = buf;
 
        offsets.size = 0;
 
@@ -2345,7 +2351,7 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
         * records, never the opposite (missing a full TSC record when it would
         * be needed).
         */
-       save_last_tsc(config, buf, ctx->tsc);
+       save_last_tsc(config, buf, ctx_private->tsc);
 
        /*
         * Push the reader if necessary
@@ -2366,21 +2372,21 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
                lib_ring_buffer_clear_noref(config, &buf->backend,
                                            subbuf_index(offsets.old - 1, chan),
                                            handle);
-               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
+               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
        }
 
        /*
         * Populate new subbuffer.
         */
        if (caa_unlikely(offsets.switch_new_start))
-               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
+               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
 
        if (caa_unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
 
-       ctx->slot_size = offsets.size;
-       ctx->pre_offset = offsets.begin;
-       ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
+       ctx_private->slot_size = offsets.size;
+       ctx_private->pre_offset = offsets.begin;
+       ctx_private->buf_offset = offsets.begin + offsets.pre_header_padding;
        return 0;
 }
 
@@ -2421,10 +2427,11 @@ void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
 }
 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
 static
-void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
-               struct lttng_ust_lib_ring_buffer *buf,
-               unsigned long idx,
-               struct lttng_ust_shm_handle *handle)
+void deliver_count_events(
+               const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+               struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+               unsigned long idx __attribute__((unused)),
+               struct lttng_ust_shm_handle *handle __attribute__((unused)))
 {
 }
 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
@@ -2436,7 +2443,7 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
                                   unsigned long commit_count,
                                   unsigned long idx,
                                   struct lttng_ust_shm_handle *handle,
-                                  uint64_t tsc)
+                                  uint64_t tsc __attribute__((unused)))
 {
        unsigned long old_commit_count = commit_count
                                         - chan->backend.subbuf_size;
This page took 0.026315 seconds and 4 git commands to generate.