X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Ffrontend_internal.h;h=d02a73d32c2a46bc8a26469e1fddb0fb6eb762f1;hb=9f36eaed6f91d5897924b551b44d1edd8cee00e2;hp=281aafd7e4f2bb5227771e17cb7c779af723a460;hpb=f3bc08c50e1b302bceea699027d889fd6d9af525;p=lttng-modules.git diff --git a/lib/ringbuffer/frontend_internal.h b/lib/ringbuffer/frontend_internal.h index 281aafd7..d02a73d3 100644 --- a/lib/ringbuffer/frontend_internal.h +++ b/lib/ringbuffer/frontend_internal.h @@ -1,25 +1,21 @@ -#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H -#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H - -/* - * linux/ringbuffer/frontend_internal.h +/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1) * - * (C) Copyright 2005-2010 - Mathieu Desnoyers + * linux/ringbuffer/frontend_internal.h * * Ring Buffer Library Synchronization Header (internal helpers). * - * Author: - * Mathieu Desnoyers + * Copyright (C) 2005-2012 Mathieu Desnoyers * * See ring_buffer_frontend.c for more information on wait-free algorithms. - * - * Dual LGPL v2.1/GPL v2 license. */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend_types.h" -#include "../../wrapper/ringbuffer/frontend_types.h" -#include "../../wrapper/prio_heap.h" /* For per-CPU read-side iterator */ +#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H +#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H + +#include +#include +#include +#include /* For per-CPU read-side iterator */ /* Buffer offset macros */ @@ -138,12 +134,27 @@ int last_tsc_overflow(const struct lib_ring_buffer_config *config, #endif extern -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx); +int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, + void *client_ctx); extern void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode); +extern +void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config, + struct lib_ring_buffer *buf, + struct channel *chan, + unsigned long offset, + unsigned long commit_count, + unsigned long idx, + u64 tsc); + +extern +void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf); +extern +void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf); + /* Buffer write helpers */ static inline @@ -174,58 +185,6 @@ void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf, consumed_new) != consumed_old)); } -static inline -void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf, - unsigned long commit_count, - unsigned long idx) -{ - if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) - v_set(config, &buf->commit_hot[idx].seq, commit_count); -} - -static inline -int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config, - struct lib_ring_buffer *buf, - struct channel *chan) -{ - unsigned long consumed_old, consumed_idx, commit_count, write_offset; - - consumed_old = atomic_long_read(&buf->consumed); - consumed_idx = subbuf_index(consumed_old, chan); - commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb); - /* - * No memory barrier here, since we are only interested - * in a statistically correct polling result. The next poll will - * get the data is we are racing. The mb() that ensures correct - * memory order is in get_subbuf. - */ - write_offset = v_read(config, &buf->offset); - - /* - * Check that the subbuffer we are trying to consume has been - * already fully committed. - */ - - if (((commit_count - chan->backend.subbuf_size) - & chan->commit_count_mask) - - (buf_trunc(consumed_old, chan) - >> chan->backend.num_subbuf_order) - != 0) - return 0; - - /* - * Check that we are not about to read the same subbuffer in - * which the writer head is. - */ - if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) - == 0) - return 0; - - return 1; - -} - static inline int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, @@ -275,104 +234,30 @@ int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *confi - (commit_count & chan->commit_count_mask) == 0); } +/* + * Receive end of subbuffer TSC as parameter. It has been read in the + * space reservation loop of either reserve or switch, which ensures it + * progresses monotonically with event records in the buffer. Therefore, + * it ensures that the end timestamp of a subbuffer is <= begin + * timestamp of the following subbuffers. + */ static inline void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, struct channel *chan, unsigned long offset, unsigned long commit_count, - unsigned long idx) + unsigned long idx, + u64 tsc) { unsigned long old_commit_count = commit_count - chan->backend.subbuf_size; - u64 tsc; /* Check if all commits have been done */ if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) - - (old_commit_count & chan->commit_count_mask) == 0)) { - /* - * If we succeeded at updating cc_sb below, we are the subbuffer - * writer delivering the subbuffer. Deals with concurrent - * updates of the "cc" value without adding a add_return atomic - * operation to the fast path. - * - * We are doing the delivery in two steps: - * - First, we cmpxchg() cc_sb to the new value - * old_commit_count + 1. This ensures that we are the only - * subbuffer user successfully filling the subbuffer, but we - * do _not_ set the cc_sb value to "commit_count" yet. - * Therefore, other writers that would wrap around the ring - * buffer and try to start writing to our subbuffer would - * have to drop records, because it would appear as - * non-filled. - * We therefore have exclusive access to the subbuffer control - * structures. This mutual exclusion with other writers is - * crucially important to perform record overruns count in - * flight recorder mode locklessly. - * - When we are ready to release the subbuffer (either for - * reading or for overrun by other writers), we simply set the - * cc_sb value to "commit_count" and perform delivery. - * - * The subbuffer size is least 2 bytes (minimum size: 1 page). - * This guarantees that old_commit_count + 1 != commit_count. - */ - if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb, - old_commit_count, old_commit_count + 1) - == old_commit_count)) { - /* - * Start of exclusive subbuffer access. We are - * guaranteed to be the last writer in this subbuffer - * and any other writer trying to access this subbuffer - * in this state is required to drop records. - */ - tsc = config->cb.ring_buffer_clock_read(chan); - v_add(config, - subbuffer_get_records_count(config, - &buf->backend, idx), - &buf->records_count); - v_add(config, - subbuffer_count_records_overrun(config, - &buf->backend, - idx), - &buf->records_overrun); - config->cb.buffer_end(buf, tsc, idx, - lib_ring_buffer_get_data_size(config, - buf, - idx)); - - /* - * Set noref flag and offset for this subbuffer id. - * Contains a memory barrier that ensures counter stores - * are ordered before set noref and offset. - */ - lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, - buf_trunc_val(offset, chan)); - - /* - * Order set_noref and record counter updates before the - * end of subbuffer exclusive access. Orders with - * respect to writers coming into the subbuffer after - * wrap around, and also order wrt concurrent readers. - */ - smp_mb(); - /* End of exclusive subbuffer access */ - v_set(config, &buf->commit_cold[idx].cc_sb, - commit_count); - lib_ring_buffer_vmcore_check_deliver(config, buf, - commit_count, idx); - - /* - * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. - */ - if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER - && atomic_long_read(&buf->active_readers) - && lib_ring_buffer_poll_deliver(config, buf, chan)) { - wake_up_interruptible(&buf->read_wait); - wake_up_interruptible(&chan->read_wait); - } - - } - } + - (old_commit_count & chan->commit_count_mask) == 0)) + lib_ring_buffer_check_deliver_slow(config, buf, chan, offset, + commit_count, idx, tsc); } /* @@ -387,31 +272,27 @@ static inline void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config, struct lib_ring_buffer *buf, struct channel *chan, - unsigned long idx, unsigned long buf_offset, unsigned long commit_count, - size_t slot_size) + struct commit_counters_hot *cc_hot) { - unsigned long offset, commit_seq_old; + unsigned long commit_seq_old; if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) return; - offset = buf_offset + slot_size; - /* * subbuf_offset includes commit_count_mask. We can simply * compare the offsets within the subbuffer without caring about * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (unlikely(subbuf_offset(offset - commit_count, chan))) + if (unlikely(subbuf_offset(buf_offset - commit_count, chan))) return; - commit_seq_old = v_read(config, &buf->commit_hot[idx].seq); - while ((long) (commit_seq_old - commit_count) < 0) - commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq, - commit_seq_old, commit_count); + commit_seq_old = v_read(config, &cc_hot->seq); + if (likely((long) (commit_seq_old - commit_count) < 0)) + v_set(config, &cc_hot->seq, commit_count); } extern int lib_ring_buffer_create(struct lib_ring_buffer *buf, @@ -421,4 +302,4 @@ extern void lib_ring_buffer_free(struct lib_ring_buffer *buf); /* Keep track of trap nesting inside ring buffer code */ DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting); -#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */ +#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */