X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Fbackend_internal.h;h=a325875cbd98eafdddad0f9d453fbaeed40bcb40;hb=6ba6fd60507f8e045bdc4f1be14e9d99c6a15f7f;hp=c4fb7604aa18bf9b3018551257dd3be61285b022;hpb=12bcbbdbd7eb6b3cb111ab91b316b14ac210ceb2;p=lttng-ust.git diff --git a/libringbuffer/backend_internal.h b/libringbuffer/backend_internal.h index c4fb7604..a325875c 100644 --- a/libringbuffer/backend_internal.h +++ b/libringbuffer/backend_internal.h @@ -1,20 +1,21 @@ -#ifndef _LINUX_RING_BUFFER_BACKEND_INTERNAL_H -#define _LINUX_RING_BUFFER_BACKEND_INTERNAL_H - /* - * linux/ringbuffer/backend_internal.h + * SPDX-License-Identifier: LGPL-2.1-only * - * Copyright (C) 2008-2010 - Mathieu Desnoyers + * Copyright (C) 2005-2012 Mathieu Desnoyers * * Ring buffer backend (internal helpers). - * - * Dual LGPL v2.1/GPL v2 license. */ +#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H +#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H + +#include +#include #include #include -#include +#include +#include "ringbuffer-config.h" #include "backend_types.h" #include "frontend_types.h" #include "shm.h" @@ -24,29 +25,47 @@ /* Ring buffer and channel backend create/free */ int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, - struct channel_backend *chan, int cpu, + struct channel_backend *chan, + int cpu, struct lttng_ust_shm_handle *handle, - struct shm_object *shmobj); -void channel_backend_unregister_notifiers(struct channel_backend *chanb); -void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb); + struct shm_object *shmobj) + __attribute__((visibility("hidden"))); + +void channel_backend_unregister_notifiers(struct channel_backend *chanb) + __attribute__((visibility("hidden"))); + +void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb) + __attribute__((visibility("hidden"))); + int channel_backend_init(struct channel_backend *chanb, const char *name, const struct lttng_ust_lib_ring_buffer_config *config, size_t subbuf_size, - size_t num_subbuf, struct lttng_ust_shm_handle *handle); + size_t num_subbuf, struct lttng_ust_shm_handle *handle, + const int *stream_fds) + __attribute__((visibility("hidden"))); + void channel_backend_free(struct channel_backend *chanb, - struct lttng_ust_shm_handle *handle); + struct lttng_ust_shm_handle *handle) + __attribute__((visibility("hidden"))); void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, - struct lttng_ust_shm_handle *handle); -void channel_backend_reset(struct channel_backend *chanb); + struct lttng_ust_shm_handle *handle) + __attribute__((visibility("hidden"))); + +void channel_backend_reset(struct channel_backend *chanb) + __attribute__((visibility("hidden"))); + +int lib_ring_buffer_backend_init(void) + __attribute__((visibility("hidden"))); -int lib_ring_buffer_backend_init(void); -void lib_ring_buffer_backend_exit(void); +void lib_ring_buffer_backend_exit(void) + __attribute__((visibility("hidden"))); extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset, const void *src, size_t len, - ssize_t pagecpy); + ssize_t pagecpy) + __attribute__((visibility("hidden"))); /* * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be @@ -103,8 +122,9 @@ unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config * bits are identical, else 0. */ static inline -int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config, - unsigned long id, unsigned long offset) +int subbuffer_id_compare_offset( + const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), + unsigned long id, unsigned long offset) { return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); } @@ -183,16 +203,82 @@ int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *conf return 0; } +static inline +int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer_ctx *ctx, + struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages) +{ + struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv; + struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx_private->buf->backend; + struct channel_backend *chanb = &ctx_private->chan->backend; + struct lttng_ust_shm_handle *handle = ctx_private->chan->handle; + size_t sbidx; + size_t offset = ctx_private->buf_offset; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + unsigned long sb_bindex, id; + struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages; + + offset &= chanb->buf_size - 1; + sbidx = offset >> chanb->subbuf_size_order; + wsb = shmp_index(handle, bufb->buf_wsb, sbidx); + if (caa_unlikely(!wsb)) + return -1; + id = wsb->id; + sb_bindex = subbuffer_id_get_index(config, id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (caa_unlikely(!rpages)) + return -1; + CHAN_WARN_ON(ctx_private->chan, + config->mode == RING_BUFFER_OVERWRITE + && subbuffer_id_is_noref(config, id)); + _backend_pages = shmp(handle, rpages->shmp); + if (caa_unlikely(!_backend_pages)) + return -1; + *backend_pages = _backend_pages; + return 0; +} + +/* Get backend pages from cache. */ +static inline +struct lttng_ust_lib_ring_buffer_backend_pages * + lib_ring_buffer_get_backend_pages_from_ctx( + const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), + struct lttng_ust_lib_ring_buffer_ctx *ctx) +{ + return ctx->priv->backend_pages; +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS static inline void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config, + const struct lttng_ust_lib_ring_buffer_ctx *ctx, struct lttng_ust_lib_ring_buffer_backend *bufb, unsigned long idx, struct lttng_ust_shm_handle *handle) { - unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); - v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); + backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx); + if (caa_unlikely(!backend_pages)) { + if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages)) + return; + } + v_inc(config, &backend_pages->records_commit); } +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static inline +void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), + const struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)), + struct lttng_ust_lib_ring_buffer_backend *bufb __attribute__((unused)), + unsigned long idx __attribute__((unused)), + struct lttng_ust_shm_handle *handle __attribute__((unused))) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ /* * Reader has exclusive subbuffer access for record consumption. No need to @@ -204,12 +290,23 @@ void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *con struct lttng_ust_shm_handle *handle) { unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_channel *chan; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); - CHAN_WARN_ON(shmp(handle, bufb->chan), - !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread)); + chan = shmp(handle, bufb->chan); + if (!chan) + return; + pages_shmp = shmp_index(handle, bufb->array, sb_bindex); + if (!pages_shmp) + return; + backend_pages = shmp(handle, pages_shmp->shmp); + if (!backend_pages) + return; + CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread)); /* Non-atomic decrement protected by exclusive subbuffer access */ - _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread); + _v_dec(config, &backend_pages->records_unread); v_inc(config, &bufb->records_read); } @@ -221,16 +318,29 @@ unsigned long subbuffer_get_records_count( struct lttng_ust_shm_handle *handle) { unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); - return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + return v_read(config, &backend_pages->records_commit); } /* * Must be executed at subbuffer delivery when the writer has _exclusive_ - * subbuffer access. See ring_buffer_check_deliver() for details. - * ring_buffer_get_records_count() must be called to get the records count - * before this function, because it resets the records_commit count. + * subbuffer access. See lib_ring_buffer_check_deliver() for details. + * lib_ring_buffer_get_records_count() must be called to get the records + * count before this function, because it resets the records_commit + * count. */ static inline unsigned long subbuffer_count_records_overrun( @@ -239,15 +349,25 @@ unsigned long subbuffer_count_records_overrun( unsigned long idx, struct lttng_ust_shm_handle *handle) { - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; unsigned long overruns, sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); - pages = shmp_index(handle, bufb->array, sb_bindex); - overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread); - v_set(config, &shmp(handle, pages->shmp)->records_unread, - v_read(config, &shmp(handle, pages->shmp)->records_commit)); - v_set(config, &shmp(handle, pages->shmp)->records_commit, 0); + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + overruns = v_read(config, &backend_pages->records_unread); + v_set(config, &backend_pages->records_unread, + v_read(config, &backend_pages->records_commit)); + v_set(config, &backend_pages->records_commit, 0); return overruns; } @@ -259,12 +379,22 @@ void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *conf unsigned long data_size, struct lttng_ust_shm_handle *handle) { - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); - pages = shmp_index(handle, bufb->array, sb_bindex); - shmp(handle, pages->shmp)->data_size = data_size; + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return; + backend_pages->data_size = data_size; } static inline @@ -273,12 +403,18 @@ unsigned long subbuffer_get_read_data_size( struct lttng_ust_lib_ring_buffer_backend *bufb, struct lttng_ust_shm_handle *handle) { - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); - pages = shmp_index(handle, bufb->array, sb_bindex); - return shmp(handle, pages->shmp)->data_size; + pages_shmp = shmp_index(handle, bufb->array, sb_bindex); + if (!pages_shmp) + return 0; + backend_pages = shmp(handle, pages_shmp->shmp); + if (!backend_pages) + return 0; + return backend_pages->data_size; } static inline @@ -288,12 +424,36 @@ unsigned long subbuffer_get_data_size( unsigned long idx, struct lttng_ust_shm_handle *handle) { - struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; unsigned long sb_bindex; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); - pages = shmp_index(handle, bufb->array, sb_bindex); - return shmp(handle, pages->shmp)->data_size; + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return 0; + sb_bindex = subbuffer_id_get_index(config, wsb->id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return 0; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return 0; + return backend_pages->data_size; +} + +static inline +void subbuffer_inc_packet_count( + const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), + struct lttng_ust_lib_ring_buffer_backend *bufb, + unsigned long idx, struct lttng_ust_shm_handle *handle) +{ + struct lttng_ust_lib_ring_buffer_backend_counts *counts; + + counts = shmp_index(handle, bufb->buf_cnt, idx); + if (!counts) + return; + counts->seq_cnt++; } /** @@ -307,6 +467,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config * struct lttng_ust_shm_handle *handle) { unsigned long id, new_id; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; if (config->mode != RING_BUFFER_OVERWRITE) return; @@ -315,7 +476,10 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config * * Performing a volatile access to read the sb_pages, because we want to * read a coherent version of the pointer and the associated noref flag. */ - id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id); + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return; + id = CMM_ACCESS_ONCE(wsb->id); for (;;) { /* This check is called on the fast path for each record. */ if (caa_likely(!subbuffer_id_is_noref(config, id))) { @@ -329,7 +493,7 @@ void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config * } new_id = id; subbuffer_id_clear_noref(config, &new_id); - new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id); + new_id = uatomic_cmpxchg(&wsb->id, id, new_id); if (caa_likely(new_id == id)) break; id = new_id; @@ -346,9 +510,15 @@ void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_con unsigned long idx, unsigned long offset, struct lttng_ust_shm_handle *handle) { + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct lttng_ust_lib_ring_buffer_channel *chan; + if (config->mode != RING_BUFFER_OVERWRITE) return; + wsb = shmp_index(handle, bufb->buf_wsb, idx); + if (!wsb) + return; /* * Because ring_buffer_set_noref() is only called by a single thread * (the one which updated the cc_sb value), there are no concurrent @@ -360,14 +530,16 @@ void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_con * subbuffer_set_noref() uses a volatile store to deal with concurrent * readers of the noref flag. */ - CHAN_WARN_ON(shmp(handle, bufb->chan), - subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id)); + chan = shmp(handle, bufb->chan); + if (!chan) + return; + CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id)); /* * Memory barrier that ensures counter stores are ordered before set * noref and offset. */ cmm_smp_mb(); - subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset); + subbuffer_id_set_noref_offset(config, &wsb->id, offset); } /** @@ -376,21 +548,28 @@ void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_con static inline int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer_backend *bufb, - struct channel_backend *chanb, + struct channel_backend *chanb __attribute__((unused)), unsigned long consumed_idx, unsigned long consumed_count, struct lttng_ust_shm_handle *handle) { + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; unsigned long old_id, new_id; + wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx); + if (caa_unlikely(!wsb)) + return -EPERM; + if (config->mode == RING_BUFFER_OVERWRITE) { + struct lttng_ust_lib_ring_buffer_channel *chan; + /* * Exchange the target writer subbuffer with our own unused * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the * old_wpage, because the value read will be confirmed by the * following cmpxchg(). */ - old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; + old_id = wsb->id; if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) return -EAGAIN; /* @@ -400,18 +579,19 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, consumed_count))) return -EAGAIN; - CHAN_WARN_ON(shmp(handle, bufb->chan), - !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); + chan = shmp(handle, bufb->chan); + if (caa_unlikely(!chan)) + return -EPERM; + CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, consumed_count); - new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id, - bufb->buf_rsb.id); + new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id); if (caa_unlikely(old_id != new_id)) return -EAGAIN; bufb->buf_rsb.id = new_id; } else { /* No page exchange, use the writer page directly */ - bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; + bufb->buf_rsb.id = wsb->id; } return 0; } @@ -420,6 +600,32 @@ int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, #define inline_memcpy(dest, src, n) memcpy(dest, src, n) #endif +static inline +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) + __attribute__((always_inline)); +static inline +void lttng_inline_memcpy(void *dest, const void *src, + unsigned long len) +{ + switch (len) { + case 1: + *(uint8_t *) dest = *(const uint8_t *) src; + break; + case 2: + *(uint16_t *) dest = *(const uint16_t *) src; + break; + case 4: + *(uint32_t *) dest = *(const uint32_t *) src; + break; + case 8: + *(uint64_t *) dest = *(const uint64_t *) src; + break; + default: + inline_memcpy(dest, src, len); + } +} + /* * Use the architecture-specific memcpy implementation for constant-sized * inputs, but rely on an inline memcpy for length statically unknown. @@ -431,12 +637,24 @@ do { \ if (__builtin_constant_p(len)) \ memcpy(dest, src, __len); \ else \ - inline_memcpy(dest, src, __len); \ + lttng_inline_memcpy(dest, src, __len); \ } while (0) +/* + * write len bytes to dest with c + */ +static inline +void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len) +{ + unsigned long i; + + for (i = 0; i < len; i++) + dest[i] = c; +} + /* arch-agnostic implementation */ -static inline int fls(unsigned int x) +static inline int lttng_ust_fls(unsigned int x) { int r = 32; @@ -459,7 +677,7 @@ static inline int fls(unsigned int x) r -= 2; } if (!(x & 0x80000000U)) { - x <<= 1; + /* No need to bit shift on last operation */ r -= 1; } return r; @@ -469,10 +687,10 @@ static inline int get_count_order(unsigned int count) { int order; - order = fls(count) - 1; + order = lttng_ust_fls(count) - 1; if (count & (count - 1)) order++; return order; } -#endif /* _LINUX_RING_BUFFER_BACKEND_INTERNAL_H */ +#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */