1 #ifndef _LINUX_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LINUX_RING_BUFFER_FRONTEND_TYPES_H
5 * linux/ringbuffer/frontend_types.h
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring Buffer Library Synchronization Header (types).
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
16 * Dual LGPL v2.1/GPL v2 license.
21 #include <urcu/list.h>
22 #include <urcu/uatomic.h>
24 #include "lttng/core.h"
26 #include <lttng/usterr-signal-safe.h>
27 #include <lttng/ringbuffer-config.h>
28 #include "backend_types.h"
29 #include "shm_internal.h"
33 * A switch is done during tracing or as a final flush after tracing (so it
34 * won't write in the new sub-buffer).
36 enum switch_mode
{ SWITCH_ACTIVE
, SWITCH_FLUSH
};
38 /* channel: collection of per-cpu ring buffers. */
41 unsigned long commit_count_mask
; /*
42 * Commit count mask, removing
43 * the MSBs corresponding to
44 * bits used to represent the
48 unsigned long switch_timer_interval
; /* Buffer flush (jiffies) */
49 unsigned long read_timer_interval
; /* Reader wakeup (jiffies) */
50 //wait_queue_head_t read_wait; /* reader wait queue */
51 int finalized
; /* Has channel been finalized */
52 size_t priv_data_offset
;
54 * Associated backend contains a variable-length array. Needs to
57 struct channel_backend backend
; /* Associated backend */
58 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
60 /* Per-subbuffer commit counters used on the hot path */
61 struct commit_counters_hot
{
62 union v_atomic cc
; /* Commit counter */
63 union v_atomic seq
; /* Consecutive commits */
64 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
66 /* Per-subbuffer commit counters used only on cold paths */
67 struct commit_counters_cold
{
68 union v_atomic cc_sb
; /* Incremented _once_ at sb switch */
69 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
71 /* ring buffer state */
72 struct lttng_ust_lib_ring_buffer
{
73 /* First 32 bytes cache-hot cacheline */
74 union v_atomic offset
; /* Current offset in the buffer */
75 DECLARE_SHMP(struct commit_counters_hot
, commit_hot
);
76 /* Commit count per sub-buffer */
78 * Current offset in the buffer
79 * standard atomic access (shared)
82 /* End of first 32 bytes cacheline */
83 union v_atomic last_tsc
; /*
84 * Last timestamp written in the buffer.
87 struct lttng_ust_lib_ring_buffer_backend backend
; /* Associated backend */
89 DECLARE_SHMP(struct commit_counters_cold
, commit_cold
);
90 /* Commit count per sub-buffer */
91 long active_readers
; /*
92 * Active readers count
93 * standard atomic access (shared)
95 long active_shadow_readers
;
97 union v_atomic records_lost_full
; /* Buffer full */
98 union v_atomic records_lost_wrap
; /* Nested wrap-around */
99 union v_atomic records_lost_big
; /* Events too big */
100 union v_atomic records_count
; /* Number of records written */
101 union v_atomic records_overrun
; /* Number of overwritten records */
102 //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
103 int finalized
; /* buffer has been finalized */
104 //struct timer_list switch_timer; /* timer for periodical switch */
105 //struct timer_list read_timer; /* timer for read poll */
106 unsigned long get_subbuf_consumed
; /* Read-side consumed */
107 unsigned long prod_snapshot
; /* Producer count snapshot */
108 unsigned long cons_snapshot
; /* Consumer count snapshot */
109 int get_subbuf
:1; /* Sub-buffer being held by reader */
110 int switch_timer_enabled
:1; /* Protected by ring_buffer_nohz_lock */
111 int read_timer_enabled
:1; /* Protected by ring_buffer_nohz_lock */
112 /* shmp pointer to self */
113 DECLARE_SHMP(struct lttng_ust_lib_ring_buffer
, self
);
114 } __attribute__((aligned(CAA_CACHE_LINE_SIZE
)));
117 void *channel_get_private(struct channel
*chan
)
119 return ((char *) chan
) + chan
->priv_data_offset
;
123 * Issue warnings and disable channels upon internal error.
124 * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
127 #define CHAN_WARN_ON(c, cond) \
129 struct channel *__chan; \
130 int _____ret = caa_unlikely(cond); \
132 if (__same_type(*(c), struct channel_backend)) \
133 __chan = caa_container_of((void *) (c), \
136 else if (__same_type(*(c), struct channel)) \
137 __chan = (void *) (c); \
140 uatomic_inc(&__chan->record_disabled); \
146 #endif /* _LINUX_RING_BUFFER_FRONTEND_TYPES_H */