Commit | Line | Data |
---|---|---|
852c2936 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only) |
852c2936 | 3 | * |
e92f3e28 MD |
4 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
5 | * | |
c0c0989a | 6 | * Ring Buffer Library Synchronization Header (internal helpers). |
852c2936 MD |
7 | * |
8 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
852c2936 MD |
9 | */ |
10 | ||
c0c0989a MJ |
11 | #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H |
12 | #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H | |
13 | ||
14641deb | 14 | #include <urcu/compiler.h> |
8c90a710 | 15 | #include <urcu/tls-compat.h> |
2c44f5b9 | 16 | #include <signal.h> |
fb31eb73 | 17 | #include <stdint.h> |
2c44f5b9 | 18 | #include <pthread.h> |
14641deb | 19 | |
0466ac28 MD |
20 | #include <lttng/ringbuffer-context.h> |
21 | #include "ringbuffer-config.h" | |
4931a13e | 22 | #include "backend_types.h" |
b89c5899 | 23 | #include "backend_internal.h" |
4931a13e | 24 | #include "frontend_types.h" |
a6352fd4 | 25 | #include "shm.h" |
852c2936 MD |
26 | |
27 | /* Buffer offset macros */ | |
28 | ||
29 | /* buf_trunc mask selects only the buffer number. */ | |
30 | static inline | |
5198080d MJ |
31 | unsigned long buf_trunc(unsigned long offset, |
32 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
33 | { |
34 | return offset & ~(chan->backend.buf_size - 1); | |
35 | ||
36 | } | |
37 | ||
38 | /* Select the buffer number value (counter). */ | |
39 | static inline | |
5198080d MJ |
40 | unsigned long buf_trunc_val(unsigned long offset, |
41 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
42 | { |
43 | return buf_trunc(offset, chan) >> chan->backend.buf_size_order; | |
44 | } | |
45 | ||
46 | /* buf_offset mask selects only the offset within the current buffer. */ | |
47 | static inline | |
5198080d MJ |
48 | unsigned long buf_offset(unsigned long offset, |
49 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
50 | { |
51 | return offset & (chan->backend.buf_size - 1); | |
52 | } | |
53 | ||
54 | /* subbuf_offset mask selects the offset within the current subbuffer. */ | |
55 | static inline | |
5198080d MJ |
56 | unsigned long subbuf_offset(unsigned long offset, |
57 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
58 | { |
59 | return offset & (chan->backend.subbuf_size - 1); | |
60 | } | |
61 | ||
62 | /* subbuf_trunc mask selects the subbuffer number. */ | |
63 | static inline | |
5198080d MJ |
64 | unsigned long subbuf_trunc(unsigned long offset, |
65 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
66 | { |
67 | return offset & ~(chan->backend.subbuf_size - 1); | |
68 | } | |
69 | ||
70 | /* subbuf_align aligns the offset to the next subbuffer. */ | |
71 | static inline | |
5198080d MJ |
72 | unsigned long subbuf_align(unsigned long offset, |
73 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
74 | { |
75 | return (offset + chan->backend.subbuf_size) | |
76 | & ~(chan->backend.subbuf_size - 1); | |
77 | } | |
78 | ||
79 | /* subbuf_index returns the index of the current subbuffer within the buffer. */ | |
80 | static inline | |
5198080d MJ |
81 | unsigned long subbuf_index(unsigned long offset, |
82 | struct lttng_ust_lib_ring_buffer_channel *chan) | |
852c2936 MD |
83 | { |
84 | return buf_offset(offset, chan) >> chan->backend.subbuf_size_order; | |
85 | } | |
86 | ||
87 | /* | |
88 | * Last TSC comparison functions. Check if the current TSC overflows tsc_bits | |
89 | * bits from the last TSC read. When overflows are detected, the full 64-bit | |
90 | * timestamp counter should be written in the record header. Reads and writes | |
91 | * last_tsc atomically. | |
92 | */ | |
93 | ||
14641deb | 94 | #if (CAA_BITS_PER_LONG == 32) |
852c2936 | 95 | static inline |
4cfec15c | 96 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 97 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
98 | { |
99 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
100 | return; | |
101 | ||
102 | /* | |
103 | * Ensure the compiler performs this update in a single instruction. | |
104 | */ | |
105 | v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); | |
106 | } | |
107 | ||
108 | static inline | |
4cfec15c | 109 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 110 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
111 | { |
112 | unsigned long tsc_shifted; | |
113 | ||
114 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
115 | return 0; | |
116 | ||
117 | tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); | |
b5a3dfa5 | 118 | if (caa_unlikely(tsc_shifted |
852c2936 MD |
119 | - (unsigned long)v_read(config, &buf->last_tsc))) |
120 | return 1; | |
121 | else | |
122 | return 0; | |
123 | } | |
124 | #else | |
125 | static inline | |
4cfec15c | 126 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 127 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
128 | { |
129 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
130 | return; | |
131 | ||
132 | v_set(config, &buf->last_tsc, (unsigned long)tsc); | |
133 | } | |
134 | ||
135 | static inline | |
4cfec15c | 136 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 137 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
138 | { |
139 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
140 | return 0; | |
141 | ||
b5a3dfa5 | 142 | if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) |
852c2936 MD |
143 | >> config->tsc_bits)) |
144 | return 1; | |
145 | else | |
146 | return 0; | |
147 | } | |
148 | #endif | |
149 | ||
150 | extern | |
e56bb47c | 151 | int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx, |
1d18d519 MJ |
152 | void *client_ctx) |
153 | __attribute__((visibility("hidden"))); | |
852c2936 MD |
154 | |
155 | extern | |
4cfec15c | 156 | void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, |
1d498196 | 157 | enum switch_mode mode, |
1d18d519 MJ |
158 | struct lttng_ust_shm_handle *handle) |
159 | __attribute__((visibility("hidden"))); | |
852c2936 | 160 | |
b07cd987 MD |
161 | void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config, |
162 | struct lttng_ust_lib_ring_buffer *buf, | |
5198080d | 163 | struct lttng_ust_lib_ring_buffer_channel *chan, |
b07cd987 MD |
164 | unsigned long offset, |
165 | unsigned long commit_count, | |
166 | unsigned long idx, | |
167 | struct lttng_ust_shm_handle *handle, | |
1d18d519 MJ |
168 | uint64_t tsc) |
169 | __attribute__((visibility("hidden"))); | |
b07cd987 | 170 | |
852c2936 MD |
171 | /* Buffer write helpers */ |
172 | ||
173 | static inline | |
4cfec15c | 174 | void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, |
5198080d | 175 | struct lttng_ust_lib_ring_buffer_channel *chan, |
852c2936 MD |
176 | unsigned long offset) |
177 | { | |
178 | unsigned long consumed_old, consumed_new; | |
179 | ||
180 | do { | |
14641deb | 181 | consumed_old = uatomic_read(&buf->consumed); |
852c2936 MD |
182 | /* |
183 | * If buffer is in overwrite mode, push the reader consumed | |
184 | * count if the write position has reached it and we are not | |
185 | * at the first iteration (don't push the reader farther than | |
186 | * the writer). This operation can be done concurrently by many | |
187 | * writers in the same buffer, the writer being at the farthest | |
188 | * write position sub-buffer index in the buffer being the one | |
189 | * which will win this loop. | |
190 | */ | |
b5a3dfa5 | 191 | if (caa_unlikely(subbuf_trunc(offset, chan) |
852c2936 MD |
192 | - subbuf_trunc(consumed_old, chan) |
193 | >= chan->backend.buf_size)) | |
194 | consumed_new = subbuf_align(consumed_old, chan); | |
195 | else | |
196 | return; | |
b5a3dfa5 | 197 | } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
852c2936 MD |
198 | consumed_new) != consumed_old)); |
199 | } | |
200 | ||
4c742ffd MD |
201 | /* |
202 | * Move consumed position to the beginning of subbuffer in which the | |
203 | * write offset is. Should only be used on ring buffers that are not | |
204 | * actively being written into, because clear_reader does not take into | |
205 | * account the commit counters when moving the consumed position, which | |
206 | * can make concurrent trace producers or consumers observe consumed | |
207 | * position further than the write offset, which breaks ring buffer | |
208 | * algorithm guarantees. | |
209 | */ | |
beca55a1 MD |
210 | static inline |
211 | void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf, | |
212 | struct lttng_ust_shm_handle *handle) | |
213 | { | |
5198080d | 214 | struct lttng_ust_lib_ring_buffer_channel *chan; |
beca55a1 MD |
215 | const struct lttng_ust_lib_ring_buffer_config *config; |
216 | unsigned long offset, consumed_old, consumed_new; | |
217 | ||
218 | chan = shmp(handle, buf->backend.chan); | |
219 | if (!chan) | |
220 | return; | |
221 | config = &chan->backend.config; | |
222 | ||
223 | do { | |
224 | offset = v_read(config, &buf->offset); | |
225 | consumed_old = uatomic_read(&buf->consumed); | |
4c742ffd MD |
226 | CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan) |
227 | - subbuf_trunc(consumed_old, chan)) | |
228 | < 0); | |
229 | consumed_new = subbuf_trunc(offset, chan); | |
beca55a1 MD |
230 | } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
231 | consumed_new) != consumed_old)); | |
232 | } | |
233 | ||
852c2936 | 234 | static inline |
4cfec15c MD |
235 | int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config, |
236 | struct lttng_ust_lib_ring_buffer *buf, | |
5198080d | 237 | struct lttng_ust_lib_ring_buffer_channel *chan) |
852c2936 MD |
238 | { |
239 | return !!subbuf_offset(v_read(config, &buf->offset), chan); | |
240 | } | |
241 | ||
242 | static inline | |
4cfec15c MD |
243 | unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
244 | struct lttng_ust_lib_ring_buffer *buf, | |
1d498196 | 245 | unsigned long idx, |
38fae1d3 | 246 | struct lttng_ust_shm_handle *handle) |
852c2936 | 247 | { |
1d498196 | 248 | return subbuffer_get_data_size(config, &buf->backend, idx, handle); |
852c2936 MD |
249 | } |
250 | ||
251 | /* | |
252 | * Check if all space reservation in a buffer have been committed. This helps | |
253 | * knowing if an execution context is nested (for per-cpu buffers only). | |
254 | * This is a very specific ftrace use-case, so we keep this as "internal" API. | |
255 | */ | |
256 | static inline | |
4cfec15c MD |
257 | int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config, |
258 | struct lttng_ust_lib_ring_buffer *buf, | |
5198080d | 259 | struct lttng_ust_lib_ring_buffer_channel *chan, |
38fae1d3 | 260 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
261 | { |
262 | unsigned long offset, idx, commit_count; | |
730be651 | 263 | struct commit_counters_hot *cc_hot; |
852c2936 MD |
264 | |
265 | CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); | |
266 | CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); | |
267 | ||
268 | /* | |
269 | * Read offset and commit count in a loop so they are both read | |
270 | * atomically wrt interrupts. By deal with interrupt concurrency by | |
271 | * restarting both reads if the offset has been pushed. Note that given | |
272 | * we only have to deal with interrupt concurrency here, an interrupt | |
273 | * modifying the commit count will also modify "offset", so it is safe | |
274 | * to only check for offset modifications. | |
275 | */ | |
276 | do { | |
277 | offset = v_read(config, &buf->offset); | |
278 | idx = subbuf_index(offset, chan); | |
730be651 MD |
279 | cc_hot = shmp_index(handle, buf->commit_hot, idx); |
280 | if (caa_unlikely(!cc_hot)) | |
281 | return 0; | |
15500a1b | 282 | commit_count = v_read(config, &cc_hot->cc); |
852c2936 MD |
283 | } while (offset != v_read(config, &buf->offset)); |
284 | ||
285 | return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
286 | - (commit_count & chan->commit_count_mask) == 0); | |
287 | } | |
288 | ||
1b7b0501 MD |
289 | /* |
290 | * Receive end of subbuffer TSC as parameter. It has been read in the | |
291 | * space reservation loop of either reserve or switch, which ensures it | |
292 | * progresses monotonically with event records in the buffer. Therefore, | |
293 | * it ensures that the end timestamp of a subbuffer is <= begin | |
294 | * timestamp of the following subbuffers. | |
295 | */ | |
852c2936 | 296 | static inline |
4cfec15c MD |
297 | void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, |
298 | struct lttng_ust_lib_ring_buffer *buf, | |
5198080d | 299 | struct lttng_ust_lib_ring_buffer_channel *chan, |
852c2936 MD |
300 | unsigned long offset, |
301 | unsigned long commit_count, | |
1d498196 | 302 | unsigned long idx, |
1b7b0501 MD |
303 | struct lttng_ust_shm_handle *handle, |
304 | uint64_t tsc) | |
852c2936 MD |
305 | { |
306 | unsigned long old_commit_count = commit_count | |
307 | - chan->backend.subbuf_size; | |
852c2936 MD |
308 | |
309 | /* Check if all commits have been done */ | |
b5a3dfa5 | 310 | if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) |
b07cd987 MD |
311 | - (old_commit_count & chan->commit_count_mask) == 0)) |
312 | lib_ring_buffer_check_deliver_slow(config, buf, chan, offset, | |
313 | commit_count, idx, handle, tsc); | |
852c2936 MD |
314 | } |
315 | ||
316 | /* | |
317 | * lib_ring_buffer_write_commit_counter | |
318 | * | |
319 | * For flight recording. must be called after commit. | |
320 | * This function increments the subbuffer's commit_seq counter each time the | |
321 | * commit count reaches back the reserve offset (modulo subbuffer size). It is | |
322 | * useful for crash dump. | |
323 | */ | |
324 | static inline | |
2208d8b5 MJ |
325 | void lib_ring_buffer_write_commit_counter( |
326 | const struct lttng_ust_lib_ring_buffer_config *config, | |
327 | struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)), | |
328 | struct lttng_ust_lib_ring_buffer_channel *chan, | |
329 | unsigned long buf_offset, | |
330 | unsigned long commit_count, | |
331 | struct lttng_ust_shm_handle *handle __attribute__((unused)), | |
332 | struct commit_counters_hot *cc_hot) | |
852c2936 | 333 | { |
80249235 | 334 | unsigned long commit_seq_old; |
852c2936 MD |
335 | |
336 | if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) | |
337 | return; | |
338 | ||
852c2936 MD |
339 | /* |
340 | * subbuf_offset includes commit_count_mask. We can simply | |
341 | * compare the offsets within the subbuffer without caring about | |
342 | * buffer full/empty mismatch because offset is never zero here | |
343 | * (subbuffer header and record headers have non-zero length). | |
344 | */ | |
80249235 | 345 | if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan))) |
852c2936 MD |
346 | return; |
347 | ||
d2fe4771 | 348 | commit_seq_old = v_read(config, &cc_hot->seq); |
16ec84c8 | 349 | if (caa_likely((long) (commit_seq_old - commit_count) < 0)) |
d2fe4771 | 350 | v_set(config, &cc_hot->seq, commit_count); |
852c2936 MD |
351 | } |
352 | ||
4cfec15c | 353 | extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, |
a6352fd4 | 354 | struct channel_backend *chanb, int cpu, |
38fae1d3 | 355 | struct lttng_ust_shm_handle *handle, |
1d18d519 MJ |
356 | struct shm_object *shmobj) |
357 | __attribute__((visibility("hidden"))); | |
ddabe860 | 358 | |
4cfec15c | 359 | extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, |
1d18d519 MJ |
360 | struct lttng_ust_shm_handle *handle) |
361 | __attribute__((visibility("hidden"))); | |
852c2936 MD |
362 | |
363 | /* Keep track of trap nesting inside ring buffer code */ | |
1d18d519 MJ |
364 | extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting) |
365 | __attribute__((visibility("hidden"))); | |
852c2936 | 366 | |
e92f3e28 | 367 | #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */ |