-#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
-
-/*
+/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
+ *
* linux/ringbuffer/frontend_internal.h
*
* Ring Buffer Library Synchronization Header (internal helpers).
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
* See ring_buffer_frontend.c for more information on wait-free algorithms.
*/
+#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
+
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend_types.h>
#include <wrapper/ringbuffer/frontend_types.h>
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
enum switch_mode mode);
+extern
+void lib_ring_buffer_check_deliver_slow(const struct lib_ring_buffer_config *config,
+ struct lib_ring_buffer *buf,
+ struct channel *chan,
+ unsigned long offset,
+ unsigned long commit_count,
+ unsigned long idx,
+ u64 tsc);
+
extern
void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
+extern
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf);
/* Buffer write helpers */
consumed_new) != consumed_old));
}
-static inline
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- unsigned long commit_count,
- unsigned long idx)
-{
- if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &buf->commit_hot[idx].seq, commit_count);
-}
-
-static inline
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- struct channel *chan)
-{
- unsigned long consumed_old, consumed_idx, commit_count, write_offset;
-
- consumed_old = atomic_long_read(&buf->consumed);
- consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
- /*
- * No memory barrier here, since we are only interested
- * in a statistically correct polling result. The next poll will
- * get the data is we are racing. The mb() that ensures correct
- * memory order is in get_subbuf.
- */
- write_offset = v_read(config, &buf->offset);
-
- /*
- * Check that the subbuffer we are trying to consume has been
- * already fully committed.
- */
-
- if (((commit_count - chan->backend.subbuf_size)
- & chan->commit_count_mask)
- - (buf_trunc(consumed_old, chan)
- >> chan->backend.num_subbuf_order)
- != 0)
- return 0;
-
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
- == 0)
- return 0;
-
- return 1;
-
-}
-
static inline
int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
/* Check if all commits have been done */
if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- - (old_commit_count & chan->commit_count_mask) == 0)) {
- /*
- * If we succeeded at updating cc_sb below, we are the subbuffer
- * writer delivering the subbuffer. Deals with concurrent
- * updates of the "cc" value without adding a add_return atomic
- * operation to the fast path.
- *
- * We are doing the delivery in two steps:
- * - First, we cmpxchg() cc_sb to the new value
- * old_commit_count + 1. This ensures that we are the only
- * subbuffer user successfully filling the subbuffer, but we
- * do _not_ set the cc_sb value to "commit_count" yet.
- * Therefore, other writers that would wrap around the ring
- * buffer and try to start writing to our subbuffer would
- * have to drop records, because it would appear as
- * non-filled.
- * We therefore have exclusive access to the subbuffer control
- * structures. This mutual exclusion with other writers is
- * crucially important to perform record overruns count in
- * flight recorder mode locklessly.
- * - When we are ready to release the subbuffer (either for
- * reading or for overrun by other writers), we simply set the
- * cc_sb value to "commit_count" and perform delivery.
- *
- * The subbuffer size is least 2 bytes (minimum size: 1 page).
- * This guarantees that old_commit_count + 1 != commit_count.
- */
-
- /*
- * Order prior updates to reserve count prior to the
- * commit_cold cc_sb update.
- */
- smp_wmb();
- if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
- old_commit_count, old_commit_count + 1)
- == old_commit_count)) {
- /*
- * Start of exclusive subbuffer access. We are
- * guaranteed to be the last writer in this subbuffer
- * and any other writer trying to access this subbuffer
- * in this state is required to drop records.
- */
- v_add(config,
- subbuffer_get_records_count(config,
- &buf->backend, idx),
- &buf->records_count);
- v_add(config,
- subbuffer_count_records_overrun(config,
- &buf->backend,
- idx),
- &buf->records_overrun);
- config->cb.buffer_end(buf, tsc, idx,
- lib_ring_buffer_get_data_size(config,
- buf,
- idx));
-
- /*
- * Increment the packet counter while we have exclusive
- * access.
- */
- subbuffer_inc_packet_count(config, &buf->backend, idx);
-
- /*
- * Set noref flag and offset for this subbuffer id.
- * Contains a memory barrier that ensures counter stores
- * are ordered before set noref and offset.
- */
- lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
- buf_trunc_val(offset, chan));
-
- /*
- * Order set_noref and record counter updates before the
- * end of subbuffer exclusive access. Orders with
- * respect to writers coming into the subbuffer after
- * wrap around, and also order wrt concurrent readers.
- */
- smp_mb();
- /* End of exclusive subbuffer access */
- v_set(config, &buf->commit_cold[idx].cc_sb,
- commit_count);
- /*
- * Order later updates to reserve count after
- * the commit_cold cc_sb update.
- */
- smp_wmb();
- lib_ring_buffer_vmcore_check_deliver(config, buf,
- commit_count, idx);
-
- /*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
- */
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
- && atomic_long_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf, chan)) {
- wake_up_interruptible(&buf->read_wait);
- wake_up_interruptible(&chan->read_wait);
- }
-
- }
- }
+ - (old_commit_count & chan->commit_count_mask) == 0))
+ lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
+ commit_count, idx, tsc);
}
/*
void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan,
- unsigned long idx,
unsigned long buf_offset,
- unsigned long commit_count)
+ unsigned long commit_count,
+ struct commit_counters_hot *cc_hot)
{
unsigned long commit_seq_old;
if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
- while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &buf->commit_hot[idx].seq,
- commit_seq_old, commit_count);
+ commit_seq_old = v_read(config, &cc_hot->seq);
+ if (likely((long) (commit_seq_old - commit_count) < 0))
+ v_set(config, &cc_hot->seq, commit_count);
}
extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,