X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_internal.h;h=00b9508d4df54768767fbe0aaff401cb0f965b0d;hb=1ff31389b;hp=1b55cd5dc97365bfc774dc8cd27633cbaae5b3af;hpb=4cfec15c93af7e0cfe3ce769ee90486bb8ab7c37;p=lttng-ust.git diff --git a/libringbuffer/frontend_internal.h b/libringbuffer/frontend_internal.h index 1b55cd5d..00b9508d 100644 --- a/libringbuffer/frontend_internal.h +++ b/libringbuffer/frontend_internal.h @@ -1,13 +1,28 @@ -#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H -#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H +#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H +#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H /* - * linux/ringbuffer/frontend_internal.h - * - * (C) Copyright 2005-2010 - Mathieu Desnoyers + * libringbuffer/frontend_internal.h * * Ring Buffer Library Synchronization Header (internal helpers). * + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * + * * Author: * Mathieu Desnoyers * @@ -17,8 +32,11 @@ */ #include +#include +#include +#include -#include +#include #include "backend_types.h" #include "frontend_types.h" #include "shm.h" @@ -86,7 +104,7 @@ unsigned long subbuf_index(unsigned long offset, struct channel *chan) #if (CAA_BITS_PER_LONG == 32) static inline void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, u64 tsc) + struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) { if (config->tsc_bits == 0 || config->tsc_bits == 64) return; @@ -99,7 +117,7 @@ void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, static inline int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, u64 tsc) + struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) { unsigned long tsc_shifted; @@ -107,7 +125,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, return 0; tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); - if (unlikely(tsc_shifted + if (caa_unlikely(tsc_shifted - (unsigned long)v_read(config, &buf->last_tsc))) return 1; else @@ -116,7 +134,7 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, #else static inline void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, u64 tsc) + struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) { if (config->tsc_bits == 0 || config->tsc_bits == 64) return; @@ -126,12 +144,12 @@ void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, static inline int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, u64 tsc) + struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) { if (config->tsc_bits == 0 || config->tsc_bits == 64) return 0; - if (unlikely((tsc - v_read(config, &buf->last_tsc)) + if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) >> config->tsc_bits)) return 1; else @@ -167,13 +185,13 @@ void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, * write position sub-buffer index in the buffer being the one * which will win this loop. */ - if (unlikely(subbuf_trunc(offset, chan) + if (caa_unlikely(subbuf_trunc(offset, chan) - subbuf_trunc(consumed_old, chan) >= chan->backend.buf_size)) consumed_new = subbuf_align(consumed_old, chan); else return; - } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, + } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, consumed_new) != consumed_old)); } @@ -282,6 +300,78 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_con - (commit_count & chan->commit_count_mask) == 0); } +static inline +void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) +{ + int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); + sigset_t sigpipe_set, pending_set, old_set; + int ret, sigpipe_was_pending = 0; + + if (wakeup_fd < 0) + return; + + /* + * Wake-up the other end by writing a null byte in the pipe + * (non-blocking). Important note: Because writing into the + * pipe is non-blocking (and therefore we allow dropping wakeup + * data, as long as there is wakeup data present in the pipe + * buffer to wake up the consumer), the consumer should perform + * the following sequence for waiting: + * 1) empty the pipe (reads). + * 2) check if there is data in the buffer. + * 3) wait on the pipe (poll). + * + * Discard the SIGPIPE from write(), not disturbing any SIGPIPE + * that might be already pending. If a bogus SIGPIPE is sent to + * the entire process concurrently by a malicious user, it may + * be simply discarded. + */ + ret = sigemptyset(&pending_set); + assert(!ret); + /* + * sigpending returns the mask of signals that are _both_ + * blocked for the thread _and_ pending for either the thread or + * the entire process. + */ + ret = sigpending(&pending_set); + assert(!ret); + sigpipe_was_pending = sigismember(&pending_set, SIGPIPE); + /* + * If sigpipe was pending, it means it was already blocked, so + * no need to block it. + */ + if (!sigpipe_was_pending) { + ret = sigemptyset(&sigpipe_set); + assert(!ret); + ret = sigaddset(&sigpipe_set, SIGPIPE); + assert(!ret); + ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set); + assert(!ret); + } + do { + ret = write(wakeup_fd, "", 1); + } while (ret == -1L && errno == EINTR); + if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) { + struct timespec timeout = { 0, 0 }; + do { + ret = sigtimedwait(&sigpipe_set, NULL, + &timeout); + } while (ret == -1L && errno == EINTR); + } + if (!sigpipe_was_pending) { + ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL); + assert(!ret); + } +} + +/* + * Receive end of subbuffer TSC as parameter. It has been read in the + * space reservation loop of either reserve or switch, which ensures it + * progresses monotonically with event records in the buffer. Therefore, + * it ensures that the end timestamp of a subbuffer is <= begin + * timestamp of the following subbuffers. + */ static inline void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer *buf, @@ -289,14 +379,14 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config unsigned long offset, unsigned long commit_count, unsigned long idx, - struct lttng_ust_shm_handle *handle) + struct lttng_ust_shm_handle *handle, + uint64_t tsc) { unsigned long old_commit_count = commit_count - chan->backend.subbuf_size; - u64 tsc; /* Check if all commits have been done */ - if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) + if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) - (old_commit_count & chan->commit_count_mask) == 0)) { /* * If we succeeded at updating cc_sb below, we are the subbuffer @@ -324,7 +414,13 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config * The subbuffer size is least 2 bytes (minimum size: 1 page). * This guarantees that old_commit_count + 1 != commit_count. */ - if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, + + /* + * Order prior updates to reserve count prior to the + * commit_cold cc_sb update. + */ + cmm_smp_wmb(); + if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, old_commit_count, old_commit_count + 1) == old_commit_count)) { /* @@ -333,7 +429,6 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config * and any other writer trying to access this subbuffer * in this state is required to drop records. */ - tsc = config->cb.ring_buffer_clock_read(chan); v_add(config, subbuffer_get_records_count(config, &buf->backend, @@ -351,6 +446,12 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config handle), handle); + /* + * Increment the packet counter while we have exclusive + * access. + */ + subbuffer_inc_packet_count(config, &buf->backend, idx, handle); + /* * Set noref flag and offset for this subbuffer id. * Contains a memory barrier that ensures counter stores @@ -369,6 +470,11 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config /* End of exclusive subbuffer access */ v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, commit_count); + /* + * Order later updates to reserve count after + * the commit cold cc_sb update. + */ + cmm_smp_wmb(); lib_ring_buffer_vmcore_check_deliver(config, buf, commit_count, idx, handle); @@ -376,38 +482,10 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. */ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER - && (uatomic_read(&buf->active_readers) - || uatomic_read(&buf->active_shadow_readers)) + && uatomic_read(&buf->active_readers) && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { - int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); - - if (wakeup_fd >= 0) { - int ret; - /* - * Wake-up the other end by - * writing a null byte in the - * pipe (non-blocking). - * Important note: Because - * writing into the pipe is - * non-blocking (and therefore - * we allow dropping wakeup - * data, as long as there is - * wakeup data present in the - * pipe buffer to wake up the - * consumer), the consumer - * should perform the following - * sequence for waiting: - * 1) empty the pipe (reads). - * 2) check if there is data in - * the buffer. - * 3) wait on the pipe (poll). - */ - do { - ret = write(wakeup_fd, "", 1); - } while (ret == -1L && errno == EINTR); - } + lib_ring_buffer_wakeup(buf, handle); } - } } } @@ -427,23 +505,20 @@ void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer unsigned long idx, unsigned long buf_offset, unsigned long commit_count, - size_t slot_size, struct lttng_ust_shm_handle *handle) { - unsigned long offset, commit_seq_old; + unsigned long commit_seq_old; if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) return; - offset = buf_offset + slot_size; - /* * subbuf_offset includes commit_count_mask. We can simply * compare the offsets within the subbuffer without caring about * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (unlikely(subbuf_offset(offset - commit_count, chan))) + if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan))) return; commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq); @@ -460,6 +535,6 @@ extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, struct lttng_ust_shm_handle *handle); /* Keep track of trap nesting inside ring buffer code */ -extern __thread unsigned int lib_ring_buffer_nesting; +extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); -#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */ +#endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */