2 * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring Buffer Library Synchronization Header (internal helpers).
8 * See ring_buffer_frontend.c for more information on wait-free algorithms.
11 #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
12 #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
14 #include <urcu/compiler.h>
15 #include <urcu/tls-compat.h>
20 #include <lttng/ringbuffer-context.h>
21 #include "ringbuffer-config.h"
22 #include "backend_types.h"
23 #include "backend_internal.h"
24 #include "frontend_types.h"
27 /* Buffer offset macros */
29 /* buf_trunc mask selects only the buffer number. */
31 unsigned long buf_trunc(unsigned long offset
,
32 struct lttng_ust_lib_ring_buffer_channel
*chan
)
34 return offset
& ~(chan
->backend
.buf_size
- 1);
38 /* Select the buffer number value (counter). */
40 unsigned long buf_trunc_val(unsigned long offset
,
41 struct lttng_ust_lib_ring_buffer_channel
*chan
)
43 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
46 /* buf_offset mask selects only the offset within the current buffer. */
48 unsigned long buf_offset(unsigned long offset
,
49 struct lttng_ust_lib_ring_buffer_channel
*chan
)
51 return offset
& (chan
->backend
.buf_size
- 1);
54 /* subbuf_offset mask selects the offset within the current subbuffer. */
56 unsigned long subbuf_offset(unsigned long offset
,
57 struct lttng_ust_lib_ring_buffer_channel
*chan
)
59 return offset
& (chan
->backend
.subbuf_size
- 1);
62 /* subbuf_trunc mask selects the subbuffer number. */
64 unsigned long subbuf_trunc(unsigned long offset
,
65 struct lttng_ust_lib_ring_buffer_channel
*chan
)
67 return offset
& ~(chan
->backend
.subbuf_size
- 1);
70 /* subbuf_align aligns the offset to the next subbuffer. */
72 unsigned long subbuf_align(unsigned long offset
,
73 struct lttng_ust_lib_ring_buffer_channel
*chan
)
75 return (offset
+ chan
->backend
.subbuf_size
)
76 & ~(chan
->backend
.subbuf_size
- 1);
79 /* subbuf_index returns the index of the current subbuffer within the buffer. */
81 unsigned long subbuf_index(unsigned long offset
,
82 struct lttng_ust_lib_ring_buffer_channel
*chan
)
84 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
88 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
89 * bits from the last TSC read. When overflows are detected, the full 64-bit
90 * timestamp counter should be written in the record header. Reads and writes
91 * last_tsc atomically.
94 #if (CAA_BITS_PER_LONG == 32)
96 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
97 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
99 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
103 * Ensure the compiler performs this update in a single instruction.
105 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
109 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
110 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
112 unsigned long tsc_shifted
;
114 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
117 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
118 if (caa_unlikely(tsc_shifted
119 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
126 void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config
*config
,
127 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
129 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
132 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
136 int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config
*config
,
137 struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
)
139 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
142 if (caa_unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
143 >> config
->tsc_bits
))
151 int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
153 __attribute__((visibility("hidden")));
156 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer
*buf
,
157 enum switch_mode mode
,
158 struct lttng_ust_shm_handle
*handle
)
159 __attribute__((visibility("hidden")));
161 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config
*config
,
162 struct lttng_ust_lib_ring_buffer
*buf
,
163 struct lttng_ust_lib_ring_buffer_channel
*chan
,
164 unsigned long offset
,
165 unsigned long commit_count
,
167 struct lttng_ust_shm_handle
*handle
,
169 __attribute__((visibility("hidden")));
171 /* Buffer write helpers */
174 void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer
*buf
,
175 struct lttng_ust_lib_ring_buffer_channel
*chan
,
176 unsigned long offset
)
178 unsigned long consumed_old
, consumed_new
;
181 consumed_old
= uatomic_read(&buf
->consumed
);
183 * If buffer is in overwrite mode, push the reader consumed
184 * count if the write position has reached it and we are not
185 * at the first iteration (don't push the reader farther than
186 * the writer). This operation can be done concurrently by many
187 * writers in the same buffer, the writer being at the farthest
188 * write position sub-buffer index in the buffer being the one
189 * which will win this loop.
191 if (caa_unlikely(subbuf_trunc(offset
, chan
)
192 - subbuf_trunc(consumed_old
, chan
)
193 >= chan
->backend
.buf_size
))
194 consumed_new
= subbuf_align(consumed_old
, chan
);
197 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
198 consumed_new
) != consumed_old
));
202 * Move consumed position to the beginning of subbuffer in which the
203 * write offset is. Should only be used on ring buffers that are not
204 * actively being written into, because clear_reader does not take into
205 * account the commit counters when moving the consumed position, which
206 * can make concurrent trace producers or consumers observe consumed
207 * position further than the write offset, which breaks ring buffer
208 * algorithm guarantees.
211 void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer
*buf
,
212 struct lttng_ust_shm_handle
*handle
)
214 struct lttng_ust_lib_ring_buffer_channel
*chan
;
215 const struct lttng_ust_lib_ring_buffer_config
*config
;
216 unsigned long offset
, consumed_old
, consumed_new
;
218 chan
= shmp(handle
, buf
->backend
.chan
);
221 config
= &chan
->backend
.config
;
224 offset
= v_read(config
, &buf
->offset
);
225 consumed_old
= uatomic_read(&buf
->consumed
);
226 CHAN_WARN_ON(chan
, (long) (subbuf_trunc(offset
, chan
)
227 - subbuf_trunc(consumed_old
, chan
))
229 consumed_new
= subbuf_trunc(offset
, chan
);
230 } while (caa_unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
231 consumed_new
) != consumed_old
));
235 int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config
*config
,
236 struct lttng_ust_lib_ring_buffer
*buf
,
237 struct lttng_ust_lib_ring_buffer_channel
*chan
)
239 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
243 unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config
*config
,
244 struct lttng_ust_lib_ring_buffer
*buf
,
246 struct lttng_ust_shm_handle
*handle
)
248 return subbuffer_get_data_size(config
, &buf
->backend
, idx
, handle
);
252 * Check if all space reservation in a buffer have been committed. This helps
253 * knowing if an execution context is nested (for per-cpu buffers only).
254 * This is a very specific ftrace use-case, so we keep this as "internal" API.
257 int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config
*config
,
258 struct lttng_ust_lib_ring_buffer
*buf
,
259 struct lttng_ust_lib_ring_buffer_channel
*chan
,
260 struct lttng_ust_shm_handle
*handle
)
262 unsigned long offset
, idx
, commit_count
;
263 struct commit_counters_hot
*cc_hot
;
265 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
266 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
269 * Read offset and commit count in a loop so they are both read
270 * atomically wrt interrupts. By deal with interrupt concurrency by
271 * restarting both reads if the offset has been pushed. Note that given
272 * we only have to deal with interrupt concurrency here, an interrupt
273 * modifying the commit count will also modify "offset", so it is safe
274 * to only check for offset modifications.
277 offset
= v_read(config
, &buf
->offset
);
278 idx
= subbuf_index(offset
, chan
);
279 cc_hot
= shmp_index(handle
, buf
->commit_hot
, idx
);
280 if (caa_unlikely(!cc_hot
))
282 commit_count
= v_read(config
, &cc_hot
->cc
);
283 } while (offset
!= v_read(config
, &buf
->offset
));
285 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
286 - (commit_count
& chan
->commit_count_mask
) == 0);
290 * Receive end of subbuffer TSC as parameter. It has been read in the
291 * space reservation loop of either reserve or switch, which ensures it
292 * progresses monotonically with event records in the buffer. Therefore,
293 * it ensures that the end timestamp of a subbuffer is <= begin
294 * timestamp of the following subbuffers.
297 void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config
*config
,
298 struct lttng_ust_lib_ring_buffer
*buf
,
299 struct lttng_ust_lib_ring_buffer_channel
*chan
,
300 unsigned long offset
,
301 unsigned long commit_count
,
303 struct lttng_ust_shm_handle
*handle
,
306 unsigned long old_commit_count
= commit_count
307 - chan
->backend
.subbuf_size
;
309 /* Check if all commits have been done */
310 if (caa_unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
311 - (old_commit_count
& chan
->commit_count_mask
) == 0))
312 lib_ring_buffer_check_deliver_slow(config
, buf
, chan
, offset
,
313 commit_count
, idx
, handle
, tsc
);
317 * lib_ring_buffer_write_commit_counter
319 * For flight recording. must be called after commit.
320 * This function increments the subbuffer's commit_seq counter each time the
321 * commit count reaches back the reserve offset (modulo subbuffer size). It is
322 * useful for crash dump.
325 void lib_ring_buffer_write_commit_counter(
326 const struct lttng_ust_lib_ring_buffer_config
*config
,
327 struct lttng_ust_lib_ring_buffer
*buf
__attribute__((unused
)),
328 struct lttng_ust_lib_ring_buffer_channel
*chan
,
329 unsigned long buf_offset
,
330 unsigned long commit_count
,
331 struct lttng_ust_shm_handle
*handle
__attribute__((unused
)),
332 struct commit_counters_hot
*cc_hot
)
334 unsigned long commit_seq_old
;
336 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
340 * subbuf_offset includes commit_count_mask. We can simply
341 * compare the offsets within the subbuffer without caring about
342 * buffer full/empty mismatch because offset is never zero here
343 * (subbuffer header and record headers have non-zero length).
345 if (caa_unlikely(subbuf_offset(buf_offset
- commit_count
, chan
)))
348 commit_seq_old
= v_read(config
, &cc_hot
->seq
);
349 if (caa_likely((long) (commit_seq_old
- commit_count
) < 0))
350 v_set(config
, &cc_hot
->seq
, commit_count
);
353 extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer
*buf
,
354 struct channel_backend
*chanb
, int cpu
,
355 struct lttng_ust_shm_handle
*handle
,
356 struct shm_object
*shmobj
)
357 __attribute__((visibility("hidden")));
359 extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer
*buf
,
360 struct lttng_ust_shm_handle
*handle
)
361 __attribute__((visibility("hidden")));
363 /* Keep track of trap nesting inside ring buffer code */
364 extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting
)
365 __attribute__((visibility("hidden")));
367 #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */