X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fbackend_internal.h;h=2d6a3453103af434ed1d94ac66026eddd9cf342f;hb=9f36eaed6f91d5897924b551b44d1edd8cee00e2;hp=d6c1c8239b2ec35af257d9b7376ad45c52a6df89;hpb=f3bc08c50e1b302bceea699027d889fd6d9af525;p=lttng-modules.git diff --git a/lib/ringbuffer/backend_internal.h b/lib/ringbuffer/backend_internal.h index d6c1c823..2d6a3453 100644 --- a/lib/ringbuffer/backend_internal.h +++ b/lib/ringbuffer/backend_internal.h @@ -1,20 +1,21 @@ -#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H -#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H - -/* - * linux/ringbuffer/backend_internal.h +/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1) * - * Copyright (C) 2008-2010 - Mathieu Desnoyers + * lib/ringbuffer/backend_internal.h * * Ring buffer backend (internal helpers). * - * Dual LGPL v2.1/GPL v2 license. + * Copyright (C) 2008-2012 Mathieu Desnoyers */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper//ringbuffer/backend_types.h" -#include "../../wrapper/ringbuffer/frontend_types.h" +#ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H +#define _LIB_RING_BUFFER_BACKEND_INTERNAL_H + +#include +#include +#include +#include #include +#include /* Ring buffer backend API presented to the frontend */ @@ -39,7 +40,19 @@ void lib_ring_buffer_backend_exit(void); extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb, size_t offset, const void *src, size_t len, - ssize_t pagecpy); + size_t pagecpy); +extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb, + size_t offset, int c, size_t len, + size_t pagecpy); +extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb, + size_t offset, const char *src, size_t len, + size_t pagecpy, int pad); +extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, + size_t offset, const void *src, + size_t len, size_t pagecpy); +extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb, + size_t offset, const char __user *src, size_t len, + size_t pagecpy, int pad); /* * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be @@ -146,7 +159,7 @@ void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config, tmp |= offset << SB_ID_OFFSET_SHIFT; tmp |= SB_ID_NOREF_MASK; /* Volatile store, read concurrently by readers. */ - ACCESS_ONCE(*id) = tmp; + WRITE_ONCE(*id, tmp); } } @@ -176,6 +189,42 @@ int subbuffer_id_check_index(const struct lib_ring_buffer_config *config, return 0; } +static inline +void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx, + struct lib_ring_buffer_backend_pages **backend_pages) +{ + struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; + struct channel_backend *chanb = &ctx->chan->backend; + size_t sbidx, offset = ctx->buf_offset; + unsigned long sb_bindex, id; + struct lib_ring_buffer_backend_pages *rpages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + id = bufb->buf_wsb[sbidx].id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = bufb->array[sb_bindex]; + CHAN_WARN_ON(ctx->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + *backend_pages = rpages; +} + +/* Get backend pages from cache. */ +static inline +struct lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_ctx *ctx) +{ + return ctx->backend_pages; +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static inline void subbuffer_count_record(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_backend *bufb, @@ -186,6 +235,14 @@ void subbuffer_count_record(const struct lib_ring_buffer_config *config, sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id); v_inc(config, &bufb->array[sb_bindex]->records_commit); } +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static inline +void subbuffer_count_record(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ /* * Reader has exclusive subbuffer access for record consumption. No need to @@ -219,9 +276,10 @@ unsigned long subbuffer_get_records_count( /* * Must be executed at subbuffer delivery when the writer has _exclusive_ - * subbuffer access. See ring_buffer_check_deliver() for details. - * ring_buffer_get_records_count() must be called to get the records count - * before this function, because it resets the records_commit count. + * subbuffer access. See lib_ring_buffer_check_deliver() for details. + * lib_ring_buffer_get_records_count() must be called to get the records + * count before this function, because it resets the records_commit + * count. */ static inline unsigned long subbuffer_count_records_overrun( @@ -283,6 +341,14 @@ unsigned long subbuffer_get_data_size( return pages->data_size; } +static inline +void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer_backend *bufb, + unsigned long idx) +{ + bufb->buf_cnt[idx].seq_cnt++; +} + /** * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by * writer. @@ -301,7 +367,7 @@ void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config, * Performing a volatile access to read the sb_pages, because we want to * read a coherent version of the pointer and the associated noref flag. */ - id = ACCESS_ONCE(bufb->buf_wsb[idx].id); + id = READ_ONCE(bufb->buf_wsb[idx].id); for (;;) { /* This check is called on the fast path for each record. */ if (likely(!subbuffer_id_is_noref(config, id))) { @@ -370,7 +436,7 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, if (config->mode == RING_BUFFER_OVERWRITE) { /* * Exchange the target writer subbuffer with our own unused - * subbuffer. No need to use ACCESS_ONCE() here to read the + * subbuffer. No need to use READ_ONCE() here to read the * old_wpage, because the value read will be confirmed by the * following cmpxchg(). */ @@ -400,6 +466,28 @@ int update_read_sb_index(const struct lib_ring_buffer_config *config, return 0; } +static inline __attribute__((always_inline)) +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + inline_memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -411,7 +499,34 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) -#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */ +/* + * We use __copy_from_user_inatomic to copy userspace data since we already + * did the access_ok for the whole range. + * + * Return 0 if OK, nonzero on error. + */ +static inline +unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest, + const void __user *src, + unsigned long len) +{ + return __copy_from_user_inatomic(dest, src, len); +} + +/* + * write len bytes to dest with c + */ +static inline +void lib_ring_buffer_do_memset(char *dest, int c, + unsigned long len) +{ + unsigned long i; + + for (i = 0; i < len; i++) + dest[i] = c; +} + +#endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */