#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
unsigned long tsc_shifted;
}
#else
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, u64 tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx);
extern
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
enum switch_mode mode,
- struct shm_handle *handle);
+ struct lttng_ust_shm_handle *handle);
/* Buffer write helpers */
static inline
-void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
+void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset)
{
}
static inline
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long commit_count,
unsigned long idx,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &shmp(handle, buf->commit_hot)[idx].seq, commit_count);
+ v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count);
}
static inline
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
unsigned long consumed_old, consumed_idx, commit_count, write_offset;
consumed_old = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* No memory barrier here, since we are only interested
* in a statistically correct polling result. The next poll will
}
static inline
-int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long idx,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
return subbuffer_get_data_size(config, &buf->backend, idx, handle);
}
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
-int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[idx].cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
}
static inline
-void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
- if (likely(v_cmpxchg(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+ if (likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
/*
*/
cmm_smp_mb();
/* End of exclusive subbuffer access */
- v_set(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
+ v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
commit_count);
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx, handle);
* RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
*/
if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
- && uatomic_read(&buf->active_readers)
+ && (uatomic_read(&buf->active_readers)
+ || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
* Wake-up the other end by
* writing a null byte in the
* pipe (non-blocking).
+ * Important note: Because
+ * writing into the pipe is
+ * non-blocking (and therefore
+ * we allow dropping wakeup
+ * data, as long as there is
+ * wakeup data present in the
+ * pipe buffer to wake up the
+ * consumer), the consumer
+ * should perform the following
+ * sequence for waiting:
+ * 1) empty the pipe (reads).
+ * 2) check if there is data in
+ * the buffer.
+ * 3) wait on the pipe (poll).
*/
do {
ret = write(wakeup_fd, "", 1);
* useful for crash dump.
*/
static inline
-void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long idx,
unsigned long buf_offset,
unsigned long commit_count,
size_t slot_size,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
unsigned long offset, commit_seq_old;
if (unlikely(subbuf_offset(offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &shmp(handle, buf->commit_hot)[idx].seq);
+ commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq);
while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &shmp(handle, buf->commit_hot)[idx].seq,
+ commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq,
commit_seq_old, commit_count);
}
-extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
- struct shm_handle *handle,
+ struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj);
-extern void lib_ring_buffer_free(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
+extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle);
/* Keep track of trap nesting inside ring buffer code */
extern __thread unsigned int lib_ring_buffer_nesting;