X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_frontend.c;h=bda0e1f497fecc786883726b6329ba51498240c9;hb=b07cd987c6856becc4ce1c20e5118b006f4431b0;hp=eb4e48629ca3d26a08b3c015b069a0e3c29f7d68;hpb=80249235cb6560c6bb915f1cd486aa5017f87456;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index eb4e4862..bda0e1f4 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include "smp.h" @@ -73,10 +74,6 @@ #include "tlsfixup.h" #include "../liblttng-ust/compat.h" /* For ENODATA */ -#ifndef max -#define max(a, b) ((a) > (b) ? (a) : (b)) -#endif - /* Print DBG() messages about events lost only every 1048576 hits */ #define DBG_PRINT_NR_LOST (1UL << 20) @@ -84,6 +81,13 @@ #define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1 #define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2 #define CLOCKID CLOCK_MONOTONIC +#define LTTNG_UST_RING_BUFFER_GET_RETRY 10 +#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10 + +/* + * Non-static to ensure the compiler does not optimize away the xor. + */ +uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR; /* * Use POSIX SHM: shm_open(3) and shm_unlink(3). @@ -156,10 +160,14 @@ static struct timer_signal_data timer_signal = { void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf, struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; unsigned int i; + chan = shmp(handle, buf->backend.chan); + if (!chan) + abort(); + config = &chan->backend.config; /* * Reset iterator first. It will put the subbuffer if it currently holds * it. @@ -205,6 +213,95 @@ void channel_reset(struct channel *chan) /* Don't reset reader reference count */ } +static +void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_crash_abi *crash_abi, + struct lttng_ust_lib_ring_buffer *buf, + struct channel_backend *chanb, + struct shm_object *shmobj, + struct lttng_ust_shm_handle *handle) +{ + int i; + + for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++) + crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF; + crash_abi->mmap_length = shmobj->memory_map_size; + crash_abi->endian = RB_CRASH_ENDIAN; + crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR; + crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR; + crash_abi->word_size = sizeof(unsigned long); + crash_abi->layout_type = LTTNG_CRASH_TYPE_UST; + + /* Offset of fields */ + crash_abi->offset.prod_offset = + (uint32_t) ((char *) &buf->offset - (char *) buf); + crash_abi->offset.consumed_offset = + (uint32_t) ((char *) &buf->consumed - (char *) buf); + crash_abi->offset.commit_hot_array = + (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf); + crash_abi->offset.commit_hot_seq = + offsetof(struct commit_counters_hot, seq); + crash_abi->offset.buf_wsb_array = + (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf); + crash_abi->offset.buf_wsb_id = + offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id); + crash_abi->offset.sb_array = + (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf); + crash_abi->offset.sb_array_shmp_offset = + offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, + shmp._ref.offset); + crash_abi->offset.sb_backend_p_offset = + offsetof(struct lttng_ust_lib_ring_buffer_backend_pages, + p._ref.offset); + + /* Field length */ + crash_abi->length.prod_offset = sizeof(buf->offset); + crash_abi->length.consumed_offset = sizeof(buf->consumed); + crash_abi->length.commit_hot_seq = + sizeof(((struct commit_counters_hot *) NULL)->seq); + crash_abi->length.buf_wsb_id = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id); + crash_abi->length.sb_array_shmp_offset = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset); + crash_abi->length.sb_backend_p_offset = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset); + + /* Array stride */ + crash_abi->stride.commit_hot_array = + sizeof(struct commit_counters_hot); + crash_abi->stride.buf_wsb_array = + sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer); + crash_abi->stride.sb_array = + sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp); + + /* Buffer constants */ + crash_abi->buf_size = chanb->buf_size; + crash_abi->subbuf_size = chanb->subbuf_size; + crash_abi->num_subbuf = chanb->num_subbuf; + crash_abi->mode = (uint32_t) chanb->config.mode; + + if (config->cb.content_size_field) { + size_t offset, length; + + config->cb.content_size_field(config, &offset, &length); + crash_abi->offset.content_size = offset; + crash_abi->length.content_size = length; + } else { + crash_abi->offset.content_size = 0; + crash_abi->length.content_size = 0; + } + if (config->cb.packet_size_field) { + size_t offset, length; + + config->cb.packet_size_field(config, &offset, &length); + crash_abi->offset.packet_size = offset; + crash_abi->length.packet_size = length; + } else { + crash_abi->offset.packet_size = 0; + crash_abi->length.packet_size = 0; + } +} + /* * Must be called under cpu hotplug protection. */ @@ -224,18 +321,12 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, if (buf->backend.allocated) return 0; - ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, - cpu, handle, shmobj); - if (ret) - return ret; - align_shm(shmobj, __alignof__(struct commit_counters_hot)); set_shmp(buf->commit_hot, zalloc_shm(shmobj, sizeof(struct commit_counters_hot) * chan->backend.num_subbuf)); if (!shmp(handle, buf->commit_hot)) { - ret = -ENOMEM; - goto free_chanbuf; + return -ENOMEM; } align_shm(shmobj, __alignof__(struct commit_counters_cold)); @@ -247,6 +338,12 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, goto free_commit; } + ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, + cpu, handle, shmobj); + if (ret) { + goto free_init; + } + /* * Write the subbuffer header for first subbuffer so we know the total * duration of data gathering. @@ -257,12 +354,16 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan)); config->cb.buffer_begin(buf, tsc, 0, handle); v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc); + v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq); if (config->cb.buffer_create) { ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle); if (ret) - goto free_init; + goto free_chanbuf; } + + init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle); + buf->backend.allocated = 1; return 0; @@ -299,6 +400,9 @@ void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc) for_each_possible_cpu(cpu) { struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); + + if (!buf) + abort(); if (uatomic_read(&buf->active_readers)) lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, chan->handle); @@ -307,6 +411,8 @@ void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc) struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); + if (!buf) + abort(); if (uatomic_read(&buf->active_readers)) lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, chan->handle); @@ -315,6 +421,113 @@ void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc) return; } +static +int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + struct lttng_ust_shm_handle *handle) +{ + unsigned long consumed_old, consumed_idx, commit_count, write_offset; + + consumed_old = uatomic_read(&buf->consumed); + consumed_idx = subbuf_index(consumed_old, chan); + commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb); + /* + * No memory barrier here, since we are only interested + * in a statistically correct polling result. The next poll will + * get the data is we are racing. The mb() that ensures correct + * memory order is in get_subbuf. + */ + write_offset = v_read(config, &buf->offset); + + /* + * Check that the subbuffer we are trying to consume has been + * already fully committed. + */ + + if (((commit_count - chan->backend.subbuf_size) + & chan->commit_count_mask) + - (buf_trunc(consumed_old, chan) + >> chan->backend.num_subbuf_order) + != 0) + return 0; + + /* + * Check that we are not about to read the same subbuffer in + * which the writer head is. + */ + if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) + == 0) + return 0; + + return 1; +} + +static +void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) +{ + int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); + sigset_t sigpipe_set, pending_set, old_set; + int ret, sigpipe_was_pending = 0; + + if (wakeup_fd < 0) + return; + + /* + * Wake-up the other end by writing a null byte in the pipe + * (non-blocking). Important note: Because writing into the + * pipe is non-blocking (and therefore we allow dropping wakeup + * data, as long as there is wakeup data present in the pipe + * buffer to wake up the consumer), the consumer should perform + * the following sequence for waiting: + * 1) empty the pipe (reads). + * 2) check if there is data in the buffer. + * 3) wait on the pipe (poll). + * + * Discard the SIGPIPE from write(), not disturbing any SIGPIPE + * that might be already pending. If a bogus SIGPIPE is sent to + * the entire process concurrently by a malicious user, it may + * be simply discarded. + */ + ret = sigemptyset(&pending_set); + assert(!ret); + /* + * sigpending returns the mask of signals that are _both_ + * blocked for the thread _and_ pending for either the thread or + * the entire process. + */ + ret = sigpending(&pending_set); + assert(!ret); + sigpipe_was_pending = sigismember(&pending_set, SIGPIPE); + /* + * If sigpipe was pending, it means it was already blocked, so + * no need to block it. + */ + if (!sigpipe_was_pending) { + ret = sigemptyset(&sigpipe_set); + assert(!ret); + ret = sigaddset(&sigpipe_set, SIGPIPE); + assert(!ret); + ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set); + assert(!ret); + } + do { + ret = write(wakeup_fd, "", 1); + } while (ret == -1L && errno == EINTR); + if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) { + struct timespec timeout = { 0, 0 }; + do { + ret = sigtimedwait(&sigpipe_set, NULL, + &timeout); + } while (ret == -1L && errno == EINTR); + } + if (!sigpipe_was_pending) { + ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL); + assert(!ret); + } +} + static void lib_ring_buffer_channel_do_read(struct channel *chan) { @@ -334,6 +547,8 @@ void lib_ring_buffer_channel_do_read(struct channel *chan) struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); + if (!buf) + abort(); if (uatomic_read(&buf->active_readers) && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { @@ -344,6 +559,8 @@ void lib_ring_buffer_channel_do_read(struct channel *chan) struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); + if (!buf) + abort(); if (uatomic_read(&buf->active_readers) && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { @@ -528,7 +745,7 @@ void lib_ring_buffer_channel_switch_timer_start(struct channel *chan) } its.it_value.tv_sec = chan->switch_timer_interval / 1000000; - its.it_value.tv_nsec = chan->switch_timer_interval % 1000000; + its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000; its.it_interval.tv_sec = its.it_value.tv_sec; its.it_interval.tv_nsec = its.it_value.tv_nsec; @@ -582,7 +799,7 @@ void lib_ring_buffer_channel_read_timer_start(struct channel *chan) } its.it_value.tv_sec = chan->read_timer_interval / 1000000; - its.it_value.tv_nsec = chan->read_timer_interval % 1000000; + its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000; its.it_interval.tv_sec = its.it_value.tv_sec; its.it_interval.tv_nsec = its.it_value.tv_nsec; @@ -672,6 +889,8 @@ static void channel_free(struct channel *chan, * padding to let readers get those sub-buffers. * Used for live streaming. * @read_timer_interval: Time interval (in us) to wake up pending readers. + * @stream_fds: array of stream file descriptors. + * @nr_stream_fds: number of file descriptors in array. * * Holds cpu hotplug. * Returns NULL on failure. @@ -684,7 +903,8 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff void *priv_data_init, void *buf_addr, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, - unsigned int read_timer_interval) + unsigned int read_timer_interval, + const int *stream_fds, int nr_stream_fds) { int ret; size_t shmsize, chansize; @@ -698,6 +918,9 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff else nr_streams = 1; + if (nr_stream_fds != nr_streams) + return NULL; + if (lib_ring_buffer_check_config(config, switch_timer_interval, read_timer_interval)) return NULL; @@ -721,7 +944,8 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff shmsize += priv_data_size; /* Allocate normal memory for channel (not shared) */ - shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM); + shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, + -1); if (!shmobj) goto error_append; /* struct channel is at object 0, offset 0 (hardcoded) */ @@ -751,7 +975,8 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff } ret = channel_backend_init(&chan->backend, name, config, - subbuf_size, num_subbuf, handle); + subbuf_size, num_subbuf, handle, + stream_fds); if (ret) goto error_backend_init; @@ -1067,8 +1292,7 @@ int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan = shmp(handle, buf->backend.chan); const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, consumed_idx, commit_count, write_offset; - int ret; - int finalized; + int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY; retry: finalized = CMM_ACCESS_ONCE(buf->finalized); @@ -1103,14 +1327,66 @@ retry: /* * Check that the subbuffer we are trying to consume has been - * already fully committed. + * already fully committed. There are a few causes that can make + * this unavailability situation occur: + * + * Temporary (short-term) situation: + * - Application is running on a different CPU, between reserve + * and commit ring buffer operations, + * - Application is preempted between reserve and commit ring + * buffer operations, + * + * Long-term situation: + * - Application is stopped (SIGSTOP) between reserve and commit + * ring buffer operations. Could eventually be resumed by + * SIGCONT. + * - Application is killed (SIGTERM, SIGINT, SIGKILL) between + * reserve and commit ring buffer operation. + * + * From a consumer perspective, handling short-term + * unavailability situations is performed by retrying a few + * times after a delay. Handling long-term unavailability + * situations is handled by failing to get the sub-buffer. + * + * In all of those situations, if the application is taking a + * long time to perform its commit after ring buffer space + * reservation, we can end up in a situation where the producer + * will fill the ring buffer and try to write into the same + * sub-buffer again (which has a missing commit). This is + * handled by the producer in the sub-buffer switch handling + * code of the reserve routine by detecting unbalanced + * reserve/commit counters and discarding all further events + * until the situation is resolved in those situations. Two + * scenarios can occur: + * + * 1) The application causing the reserve/commit counters to be + * unbalanced has been terminated. In this situation, all + * further events will be discarded in the buffers, and no + * further buffer data will be readable by the consumer + * daemon. Tearing down the UST tracing session and starting + * anew is a work-around for those situations. Note that this + * only affects per-UID tracing. In per-PID tracing, the + * application vanishes with the termination, and therefore + * no more data needs to be written to the buffers. + * 2) The application causing the unbalance has been delayed for + * a long time, but will eventually try to increment the + * commit counter after eventually writing to the sub-buffer. + * This situation can cause events to be discarded until the + * application resumes its operations. */ if (((commit_count - chan->backend.subbuf_size) & chan->commit_count_mask) - (buf_trunc(consumed, chan) >> chan->backend.num_subbuf_order) - != 0) - goto nodata; + != 0) { + if (nr_retry-- > 0) { + if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1)) + (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS); + goto retry; + } else { + goto nodata; + } + } /* * Check that we are not about to read the same subbuffer in @@ -1126,12 +1402,23 @@ retry: * the writer is getting access to a subbuffer we were trying to get * access to. Also checks that the "consumed" buffer count we are * looking for matches the one contained in the subbuffer id. + * + * The short-lived race window described here can be affected by + * application signals and preemption, thus requiring to bound + * the loop to a maximum number of retry. */ ret = update_read_sb_index(config, &buf->backend, &chan->backend, consumed_idx, buf_trunc_val(consumed, chan), handle); - if (ret) - goto retry; + if (ret) { + if (nr_retry-- > 0) { + if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1)) + (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS); + goto retry; + } else { + goto nodata; + } + } subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id); buf->get_subbuf_consumed = consumed; @@ -1308,7 +1595,8 @@ void lib_ring_buffer_print_errors(struct channel *chan, /* * lib_ring_buffer_switch_old_start: Populate old subbuffer header. * - * Only executed when the buffer is finalized, in SWITCH_FLUSH. + * Only executed by SWITCH_FLUSH, which can be issued while tracing is + * active or at buffer finalization (destroy). */ static void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf, @@ -1482,12 +1770,14 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, unsigned long sb_index, commit_count; /* - * We are performing a SWITCH_FLUSH. At this stage, there are no - * concurrent writes into the buffer. + * We are performing a SWITCH_FLUSH. There may be concurrent + * writes into the buffer if e.g. invoked while performing a + * snapshot on an active trace. * - * The client does not save any header information. Don't - * switch empty subbuffer on finalize, because it is invalid to - * deliver a completely empty subbuffer. + * If the client does not save any header information + * (sub-buffer header size == 0), don't switch empty subbuffer + * on finalize, because it is invalid to deliver a completely + * empty subbuffer. */ if (!config->cb.subbuffer_header_size()) return -1; @@ -1866,6 +2156,130 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) return 0; } +static +void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + unsigned long commit_count, + unsigned long idx, + struct lttng_ust_shm_handle *handle) +{ + if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) + v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count); +} + +void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + unsigned long offset, + unsigned long commit_count, + unsigned long idx, + struct lttng_ust_shm_handle *handle, + uint64_t tsc) +{ + unsigned long old_commit_count = commit_count + - chan->backend.subbuf_size; + + /* + * If we succeeded at updating cc_sb below, we are the subbuffer + * writer delivering the subbuffer. Deals with concurrent + * updates of the "cc" value without adding a add_return atomic + * operation to the fast path. + * + * We are doing the delivery in two steps: + * - First, we cmpxchg() cc_sb to the new value + * old_commit_count + 1. This ensures that we are the only + * subbuffer user successfully filling the subbuffer, but we + * do _not_ set the cc_sb value to "commit_count" yet. + * Therefore, other writers that would wrap around the ring + * buffer and try to start writing to our subbuffer would + * have to drop records, because it would appear as + * non-filled. + * We therefore have exclusive access to the subbuffer control + * structures. This mutual exclusion with other writers is + * crucially important to perform record overruns count in + * flight recorder mode locklessly. + * - When we are ready to release the subbuffer (either for + * reading or for overrun by other writers), we simply set the + * cc_sb value to "commit_count" and perform delivery. + * + * The subbuffer size is least 2 bytes (minimum size: 1 page). + * This guarantees that old_commit_count + 1 != commit_count. + */ + + /* + * Order prior updates to reserve count prior to the + * commit_cold cc_sb update. + */ + cmm_smp_wmb(); + if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, + old_commit_count, old_commit_count + 1) + == old_commit_count)) { + /* + * Start of exclusive subbuffer access. We are + * guaranteed to be the last writer in this subbuffer + * and any other writer trying to access this subbuffer + * in this state is required to drop records. + */ + v_add(config, + subbuffer_get_records_count(config, + &buf->backend, + idx, handle), + &buf->records_count); + v_add(config, + subbuffer_count_records_overrun(config, + &buf->backend, + idx, handle), + &buf->records_overrun); + config->cb.buffer_end(buf, tsc, idx, + lib_ring_buffer_get_data_size(config, + buf, + idx, + handle), + handle); + + /* + * Increment the packet counter while we have exclusive + * access. + */ + subbuffer_inc_packet_count(config, &buf->backend, idx, handle); + + /* + * Set noref flag and offset for this subbuffer id. + * Contains a memory barrier that ensures counter stores + * are ordered before set noref and offset. + */ + lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, + buf_trunc_val(offset, chan), handle); + + /* + * Order set_noref and record counter updates before the + * end of subbuffer exclusive access. Orders with + * respect to writers coming into the subbuffer after + * wrap around, and also order wrt concurrent readers. + */ + cmm_smp_mb(); + /* End of exclusive subbuffer access */ + v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, + commit_count); + /* + * Order later updates to reserve count after + * the commit cold cc_sb update. + */ + cmm_smp_wmb(); + lib_ring_buffer_vmcore_check_deliver(config, buf, + commit_count, idx, handle); + + /* + * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. + */ + if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER + && uatomic_read(&buf->active_readers) + && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { + lib_ring_buffer_wakeup(buf, handle); + } + } +} + /* * Force a read (imply TLS fixup for dlopen) of TLS variables. */