Fix: ACCESS_ONCE() removed in kernel 4.15
[lttng-modules.git] / lib / ringbuffer / backend_internal.h
index fc5bec535c591b3d9487e555c0dbbbf2055f5dc9..dc69ecf39ff41591f3d65908252c4276751c3c6a 100644 (file)
@@ -23,6 +23,7 @@
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+#include <wrapper/compiler.h>
 #include <wrapper/ringbuffer/config.h>
 #include <wrapper/ringbuffer/backend_types.h>
 #include <wrapper/ringbuffer/frontend_types.h>
@@ -171,7 +172,7 @@ void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
                tmp |= offset << SB_ID_OFFSET_SHIFT;
                tmp |= SB_ID_NOREF_MASK;
                /* Volatile store, read concurrently by readers. */
-               ACCESS_ONCE(*id) = tmp;
+               WRITE_ONCE(*id, tmp);
        }
 }
 
@@ -201,6 +202,37 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
                return 0;
 }
 
+static inline
+void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lib_ring_buffer_backend_pages **backend_pages)
+{
+       struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
+       struct channel_backend *chanb = &ctx->chan->backend;
+       size_t sbidx, offset = ctx->buf_offset;
+       unsigned long sb_bindex, id;
+       struct lib_ring_buffer_backend_pages *rpages;
+
+       offset &= chanb->buf_size - 1;
+       sbidx = offset >> chanb->subbuf_size_order;
+       id = bufb->buf_wsb[sbidx].id;
+       sb_bindex = subbuffer_id_get_index(config, id);
+       rpages = bufb->array[sb_bindex];
+       CHAN_WARN_ON(ctx->chan,
+                    config->mode == RING_BUFFER_OVERWRITE
+                    && subbuffer_id_is_noref(config, id));
+       *backend_pages = rpages;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lib_ring_buffer_backend_pages *
+       lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
+               struct lib_ring_buffer_ctx *ctx)
+{
+       return ctx->backend_pages;
+}
+
 /*
  * The ring buffer can count events recorded and overwritten per buffer,
  * but it is disabled by default due to its performance overhead.
@@ -348,7 +380,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
         * Performing a volatile access to read the sb_pages, because we want to
         * read a coherent version of the pointer and the associated noref flag.
         */
-       id = ACCESS_ONCE(bufb->buf_wsb[idx].id);
+       id = READ_ONCE(bufb->buf_wsb[idx].id);
        for (;;) {
                /* This check is called on the fast path for each record. */
                if (likely(!subbuffer_id_is_noref(config, id))) {
@@ -417,7 +449,7 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config,
        if (config->mode == RING_BUFFER_OVERWRITE) {
                /*
                 * Exchange the target writer subbuffer with our own unused
-                * subbuffer. No need to use ACCESS_ONCE() here to read the
+                * subbuffer. No need to use READ_ONCE() here to read the
                 * old_wpage, because the value read will be confirmed by the
                 * following cmpxchg().
                 */
@@ -447,6 +479,28 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config,
        return 0;
 }
 
+static inline __attribute__((always_inline))
+void lttng_inline_memcpy(void *dest, const void *src,
+               unsigned long len)
+{
+       switch (len) {
+       case 1:
+               *(uint8_t *) dest = *(const uint8_t *) src;
+               break;
+       case 2:
+               *(uint16_t *) dest = *(const uint16_t *) src;
+               break;
+       case 4:
+               *(uint32_t *) dest = *(const uint32_t *) src;
+               break;
+       case 8:
+               *(uint64_t *) dest = *(const uint64_t *) src;
+               break;
+       default:
+               inline_memcpy(dest, src, len);
+       }
+}
+
 /*
  * Use the architecture-specific memcpy implementation for constant-sized
  * inputs, but rely on an inline memcpy for length statically unknown.
@@ -458,7 +512,7 @@ do {                                                                \
        if (__builtin_constant_p(len))                          \
                memcpy(dest, src, __len);                       \
        else                                                    \
-               inline_memcpy(dest, src, __len);                \
+               lttng_inline_memcpy(dest, src, __len);          \
 } while (0)
 
 /*
This page took 0.024067 seconds and 4 git commands to generate.