Fix: timestamp_end field should include all events within sub-buffer
[lttng-modules.git] / lib / ringbuffer / frontend_types.h
1 #ifndef _LIB_RING_BUFFER_FRONTEND_TYPES_H
2 #define _LIB_RING_BUFFER_FRONTEND_TYPES_H
3
4 /*
5 * lib/ringbuffer/frontend_types.h
6 *
7 * Ring Buffer Library Synchronization Header (types).
8 *
9 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * Author:
26 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
27 *
28 * See ring_buffer_frontend.c for more information on wait-free algorithms.
29 */
30
31 #include <linux/kref.h>
32 #include <wrapper/ringbuffer/config.h>
33 #include <wrapper/ringbuffer/backend_types.h>
34 #include <wrapper/spinlock.h>
35 #include <lib/prio_heap/lttng_prio_heap.h> /* For per-CPU read-side iterator */
36 #include <lttng-cpuhotplug.h>
37
38 /*
39 * A switch is done during tracing or as a final flush after tracing (so it
40 * won't write in the new sub-buffer).
41 */
42 enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
43
44 /* channel-level read-side iterator */
45 struct channel_iter {
46 /* Prio heap of buffers. Lowest timestamps at the top. */
47 struct lttng_ptr_heap heap; /* Heap of struct lib_ring_buffer ptrs */
48 struct list_head empty_head; /* Empty buffers linked-list head */
49 int read_open; /* Opened for reading ? */
50 u64 last_qs; /* Last quiescent state timestamp */
51 u64 last_timestamp; /* Last timestamp (for WARN_ON) */
52 int last_cpu; /* Last timestamp cpu */
53 /*
54 * read() file operation state.
55 */
56 unsigned long len_left;
57 };
58
59 /* channel: collection of per-cpu ring buffers. */
60 struct channel {
61 atomic_t record_disabled;
62 unsigned long commit_count_mask; /*
63 * Commit count mask, removing
64 * the MSBs corresponding to
65 * bits used to represent the
66 * subbuffer index.
67 */
68
69 struct channel_backend backend; /* Associated backend */
70
71 unsigned long switch_timer_interval; /* Buffer flush (jiffies) */
72 unsigned long read_timer_interval; /* Reader wakeup (jiffies) */
73 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
74 struct lttng_cpuhp_node cpuhp_prepare;
75 struct lttng_cpuhp_node cpuhp_online;
76 struct lttng_cpuhp_node cpuhp_iter_online;
77 #else
78 struct notifier_block cpu_hp_notifier; /* CPU hotplug notifier */
79 struct notifier_block hp_iter_notifier; /* hotplug iterator notifier */
80 unsigned int cpu_hp_enable:1; /* Enable CPU hotplug notif. */
81 unsigned int hp_iter_enable:1; /* Enable hp iter notif. */
82 #endif
83 struct notifier_block tick_nohz_notifier; /* CPU nohz notifier */
84 wait_queue_head_t read_wait; /* reader wait queue */
85 wait_queue_head_t hp_wait; /* CPU hotplug wait queue */
86 int finalized; /* Has channel been finalized */
87 struct channel_iter iter; /* Channel read-side iterator */
88 struct kref ref; /* Reference count */
89 };
90
91 /* Per-subbuffer commit counters used on the hot path */
92 struct commit_counters_hot {
93 union v_atomic cc; /* Commit counter */
94 union v_atomic seq; /* Consecutive commits */
95 };
96
97 /* Per-subbuffer commit counters used only on cold paths */
98 struct commit_counters_cold {
99 union v_atomic cc_sb; /* Incremented _once_ at sb switch */
100 };
101
102 /* Per-buffer read iterator */
103 struct lib_ring_buffer_iter {
104 u64 timestamp; /* Current record timestamp */
105 size_t header_len; /* Current record header length */
106 size_t payload_len; /* Current record payload length */
107
108 struct list_head empty_node; /* Linked list of empty buffers */
109 unsigned long consumed, read_offset, data_size;
110 enum {
111 ITER_GET_SUBBUF = 0,
112 ITER_TEST_RECORD,
113 ITER_NEXT_RECORD,
114 ITER_PUT_SUBBUF,
115 } state;
116 unsigned int allocated:1;
117 unsigned int read_open:1; /* Opened for reading ? */
118 };
119
120 /* ring buffer state */
121 struct lib_ring_buffer {
122 /* First 32 bytes cache-hot cacheline */
123 union v_atomic offset; /* Current offset in the buffer */
124 struct commit_counters_hot *commit_hot;
125 /* Commit count per sub-buffer */
126 atomic_long_t consumed; /*
127 * Current offset in the buffer
128 * standard atomic access (shared)
129 */
130 atomic_t record_disabled;
131 /* End of first 32 bytes cacheline */
132 union v_atomic last_tsc; /*
133 * Last timestamp written in the buffer.
134 */
135
136 struct lib_ring_buffer_backend backend; /* Associated backend */
137
138 struct commit_counters_cold *commit_cold;
139 /* Commit count per sub-buffer */
140 u64 *ts_end; /*
141 * timestamp_end per sub-buffer.
142 * Time is sampled by the
143 * switch_*_end() callbacks which
144 * are the last space reservation
145 * performed in the sub-buffer
146 * before it can be fully
147 * committed and delivered. This
148 * time value is then read by
149 * the deliver callback,
150 * performed by the last commit
151 * before the buffer becomes
152 * readable.
153 */
154 atomic_long_t active_readers; /*
155 * Active readers count
156 * standard atomic access (shared)
157 */
158 /* Dropped records */
159 union v_atomic records_lost_full; /* Buffer full */
160 union v_atomic records_lost_wrap; /* Nested wrap-around */
161 union v_atomic records_lost_big; /* Events too big */
162 union v_atomic records_count; /* Number of records written */
163 union v_atomic records_overrun; /* Number of overwritten records */
164 wait_queue_head_t read_wait; /* reader buffer-level wait queue */
165 wait_queue_head_t write_wait; /* writer buffer-level wait queue (for metadata only) */
166 int finalized; /* buffer has been finalized */
167 struct timer_list switch_timer; /* timer for periodical switch */
168 struct timer_list read_timer; /* timer for read poll */
169 raw_spinlock_t raw_tick_nohz_spinlock; /* nohz entry lock/trylock */
170 struct lib_ring_buffer_iter iter; /* read-side iterator */
171 unsigned long get_subbuf_consumed; /* Read-side consumed */
172 unsigned long prod_snapshot; /* Producer count snapshot */
173 unsigned long cons_snapshot; /* Consumer count snapshot */
174 unsigned int get_subbuf:1, /* Sub-buffer being held by reader */
175 switch_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
176 read_timer_enabled:1, /* Protected by ring_buffer_nohz_lock */
177 quiescent:1;
178 };
179
180 static inline
181 void *channel_get_private(struct channel *chan)
182 {
183 return chan->backend.priv;
184 }
185
186 void lib_ring_buffer_lost_event_too_big(struct channel *chan);
187
188 /*
189 * Issue warnings and disable channels upon internal error.
190 * Can receive struct lib_ring_buffer or struct lib_ring_buffer_backend
191 * parameters.
192 */
193 #define CHAN_WARN_ON(c, cond) \
194 ({ \
195 struct channel *__chan; \
196 int _____ret = unlikely(cond); \
197 if (_____ret) { \
198 if (__same_type(*(c), struct channel_backend)) \
199 __chan = container_of((void *) (c), \
200 struct channel, \
201 backend); \
202 else if (__same_type(*(c), struct channel)) \
203 __chan = (void *) (c); \
204 else \
205 BUG_ON(1); \
206 atomic_inc(&__chan->record_disabled); \
207 WARN_ON(1); \
208 } \
209 _____ret; \
210 })
211
212 #endif /* _LIB_RING_BUFFER_FRONTEND_TYPES_H */
This page took 0.035837 seconds and 4 git commands to generate.