X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_api.h;h=5ddb8ac460951f0bae59a3f098cf6bca510a3799;hb=f52a5702dbeb8125ca373ba363b29cbff3042cc1;hp=d5a2f2a043cb892a0ef020bfd7ef9b137978debb;hpb=aae88c703374f4b1fbb8a5e7e95591bf8ce3e837;p=lttng-ust.git diff --git a/libringbuffer/frontend_api.h b/libringbuffer/frontend_api.h index d5a2f2a0..5ddb8ac4 100644 --- a/libringbuffer/frontend_api.h +++ b/libringbuffer/frontend_api.h @@ -1,20 +1,33 @@ -#ifndef _LINUX_RING_BUFFER_FRONTEND_API_H -#define _LINUX_RING_BUFFER_FRONTEND_API_H +#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H +#define _LTTNG_RING_BUFFER_FRONTEND_API_H /* - * linux/ringbuffer/frontend_api.h + * libringbuffer/frontend_api.h * - * (C) Copyright 2005-2010 - Mathieu Desnoyers + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * Ring Buffer Library Synchronization Header (buffer write API). * * Author: - * Mathieu Desnoyers + * Mathieu Desnoyers * - * See ring_buffer_frontend.c for more information on wait-free algorithms. - * See linux/ringbuffer/frontend.h for channel allocation and read-side API. - * - * Dual LGPL v2.1/GPL v2 license. + * See ring_buffer_frontend.c for more information on wait-free + * algorithms. + * See frontend.h for channel allocation and read-side API. */ #include "frontend.h" @@ -24,10 +37,10 @@ /** * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit. * - * Grabs RCU read-side lock and keeps a ring buffer nesting count as - * supplementary safety net to ensure tracer client code will never - * trigger an endless recursion. Returns the processor ID on success, - * -EPERM on failure (nesting count too high). + * Keeps a ring buffer nesting count as supplementary safety net to + * ensure tracer client code will never trigger an endless recursion. + * Returns the processor ID on success, -EPERM on failure (nesting count + * too high). * * asm volatile and "memory" clobber prevent the compiler from moving * instructions out of the ring buffer nesting count. This is required to ensure @@ -40,15 +53,13 @@ int lib_ring_buffer_get_cpu(const struct lttng_ust_lib_ring_buffer_config *confi { int cpu, nesting; - rcu_read_lock(); cpu = lttng_ust_get_cpu(); - nesting = ++lib_ring_buffer_nesting; /* TLS */ + nesting = ++URCU_TLS(lib_ring_buffer_nesting); cmm_barrier(); if (caa_unlikely(nesting > 4)) { WARN_ON_ONCE(1); - lib_ring_buffer_nesting--; /* TLS */ - rcu_read_unlock(); + URCU_TLS(lib_ring_buffer_nesting)--; return -EPERM; } else return cpu; @@ -61,8 +72,7 @@ static inline void lib_ring_buffer_put_cpu(const struct lttng_ust_lib_ring_buffer_config *config) { cmm_barrier(); - lib_ring_buffer_nesting--; /* TLS */ - rcu_read_unlock(); + URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */ } /* @@ -93,18 +103,8 @@ int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *c */ //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); - /* - * Because we don't use any timer in the application, we - * currently cannot guarantee that we have frequent - * events that let us detect 27-bit overflows. - * Therefore, for now, we force event headers - * to contain 64-bit timestamps. - */ - ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; -#if 0 if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; -#endif //0 if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; @@ -160,14 +160,14 @@ int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *confi unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; - if (uatomic_read(&chan->record_disabled)) + if (caa_unlikely(uatomic_read(&chan->record_disabled))) return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); - if (uatomic_read(&buf->record_disabled)) + if (caa_unlikely(uatomic_read(&buf->record_disabled))) return -EAGAIN; ctx->buf = buf; @@ -285,14 +285,13 @@ void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *confi commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, - commit_count, endidx, handle); + commit_count, endidx, handle, ctx->tsc); /* * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. */ lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - ctx->buf_offset, commit_count, - ctx->slot_size, handle); + offset_end, commit_count, handle); } /** @@ -361,4 +360,4 @@ void lib_ring_buffer_record_enable(const struct lttng_ust_lib_ring_buffer_config uatomic_dec(&buf->record_disabled); } -#endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */ +#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */