X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fbackend_internal.h;h=5ad6fac24f6124b6b88a13d50911c22bfb923740;hb=b7cdc18250880cc44edeef4a4b42c8ac7a135a6d;hp=04a7ae17db7ea7fcba964f1d64092c5de7409b22;hpb=5b3cf4f924befda843a7736daf84f8ecae5e86a4;p=lttng-modules.git diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h index 04a7ae17..5ad6fac2 100644 --- a/lib/ringbuffer/backend_internal.h +++ b/lib/ringbuffer/backend_internal.h @@ -1,31 +1,19 @@ -#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H -#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H - -/* +/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only) + * * lib/ringbuffer/backend_internal.h * * Ring buffer backend (internal helpers). * * Copyright (C) 2008-2012 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend_types.h" -#include "../../wrapper/ringbuffer/frontend_types.h" +#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H +#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H + +#include +#include +#include +#include #include #include @@ -171,7 +159,7 @@ void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config, tmp |= offset << SB_ID_OFFSET_SHIFT; tmp |= SB_ID_NOREF_MASK; /* Volatile store, read concurrently by readers. */ - ACCESS_ONCE(*id) = tmp; + WRITE_ONCE(*id, tmp); } } @@ -201,6 +189,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, return 0; } +static inline +void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx, + struct lib_ring_buffer_backend_pages **backend_pages) +{ + struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; + struct channel_backend *chanb = &ctx->chan->backend; + size_t sbidx, offset = ctx->buf_offset; + unsigned long sb_bindex, id; + struct lib_ring_buffer_backend_pages *rpages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(ctx->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + *backend_pages = rpages; +} + +/* Get backend pages from cache. */ +static inline +struct lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx) +{ + return ctx->backend_pages; +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static inline void subbuffer_count_record(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, @@ -211,6 +235,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config, sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); v_inc(config, &bufb->array[sb_bindex]->records_commit); } +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static inline +void subbuffer_count_record(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ /* * Reader has exclusive subbuffer access for record consumption. No need to @@ -335,7 +367,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, * Performing a volatile access to read the sb_pages, because we want to * read a coherent version of the pointer and the associated noref flag. */ - id = ACCESS_ONCE(bufb->buf_wsb[idx].id); + id = READ_ONCE(bufb->buf_wsb[idx].id); for (;;) { /* This check is called on the fast path for each record. */ if (likely(!subbuffer_id_is_noref(config, id))) { @@ -404,7 +436,7 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, if (config->mode == RING_BUFFER_OVERWRITE) { /* * Exchange the target writer subbuffer with our own unused - * subbuffer. No need to use ACCESS_ONCE() here to read the + * subbuffer. No need to use READ_ONCE() here to read the * old_wpage, because the value read will be confirmed by the * following cmpxchg(). */ @@ -434,6 +466,28 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, return 0; } +static inline __attribute__((always_inline)) +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + inline_memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -445,12 +499,14 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) /* * We use __copy_from_user_inatomic to copy userspace data since we already * did the access_ok for the whole range. + * + * Return 0 if OK, nonzero on error. */ static inline unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,