- if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- - (old_commit_count & chan->commit_count_mask) == 0)) {
- /*
- * If we succeeded at updating cc_sb below, we are the subbuffer
- * writer delivering the subbuffer. Deals with concurrent
- * updates of the "cc" value without adding a add_return atomic
- * operation to the fast path.
- *
- * We are doing the delivery in two steps:
- * - First, we cmpxchg() cc_sb to the new value
- * old_commit_count + 1. This ensures that we are the only
- * subbuffer user successfully filling the subbuffer, but we
- * do _not_ set the cc_sb value to "commit_count" yet.
- * Therefore, other writers that would wrap around the ring
- * buffer and try to start writing to our subbuffer would
- * have to drop records, because it would appear as
- * non-filled.
- * We therefore have exclusive access to the subbuffer control
- * structures. This mutual exclusion with other writers is
- * crucially important to perform record overruns count in
- * flight recorder mode locklessly.
- * - When we are ready to release the subbuffer (either for
- * reading or for overrun by other writers), we simply set the
- * cc_sb value to "commit_count" and perform delivery.
- *
- * The subbuffer size is least 2 bytes (minimum size: 1 page).
- * This guarantees that old_commit_count + 1 != commit_count.
- */
- if (likely(v_cmpxchg(config, &shmp(buf->commit_cold)[idx].cc_sb,
- old_commit_count, old_commit_count + 1)
- == old_commit_count)) {
- /*
- * Start of exclusive subbuffer access. We are
- * guaranteed to be the last writer in this subbuffer
- * and any other writer trying to access this subbuffer
- * in this state is required to drop records.
- */
- tsc = config->cb.ring_buffer_clock_read(chan);
- v_add(config,
- subbuffer_get_records_count(config,
- &buf->backend, idx),
- &buf->records_count);
- v_add(config,
- subbuffer_count_records_overrun(config,
- &buf->backend,
- idx),
- &buf->records_overrun);
- config->cb.buffer_end(buf, tsc, idx,
- lib_ring_buffer_get_data_size(config,
- buf,
- idx));
-
- /*
- * Set noref flag and offset for this subbuffer id.
- * Contains a memory barrier that ensures counter stores
- * are ordered before set noref and offset.
- */
- lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
- buf_trunc_val(offset, chan));
-
- /*
- * Order set_noref and record counter updates before the
- * end of subbuffer exclusive access. Orders with
- * respect to writers coming into the subbuffer after
- * wrap around, and also order wrt concurrent readers.
- */
- cmm_smp_mb();
- /* End of exclusive subbuffer access */
- v_set(config, &shmp(buf->commit_cold)[idx].cc_sb,
- commit_count);
- lib_ring_buffer_vmcore_check_deliver(config, buf,
- commit_count, idx);
-
- /*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
- */
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
- && uatomic_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf, chan)) {
- //wake_up_interruptible(&buf->read_wait);
- //wake_up_interruptible(&chan->read_wait);
- }
-
- }
- }