1 #ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
2 #define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
5 * linux/ringbuffer/frontend_internal.h
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9 * Ring Buffer Library Synchronization Header (internal helpers).
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
16 * Dual LGPL v2.1/GPL v2 license.
19 #include <urcu/compiler.h>
22 #include "backend_types.h"
23 #include "frontend_types.h"
25 /* Buffer offset macros */
27 /* buf_trunc mask selects only the buffer number. */
29 unsigned long buf_trunc(unsigned long offset
, struct channel
*chan
)
31 return offset
& ~(chan
->backend
.buf_size
- 1);
35 /* Select the buffer number value (counter). */
37 unsigned long buf_trunc_val(unsigned long offset
, struct channel
*chan
)
39 return buf_trunc(offset
, chan
) >> chan
->backend
.buf_size_order
;
42 /* buf_offset mask selects only the offset within the current buffer. */
44 unsigned long buf_offset(unsigned long offset
, struct channel
*chan
)
46 return offset
& (chan
->backend
.buf_size
- 1);
49 /* subbuf_offset mask selects the offset within the current subbuffer. */
51 unsigned long subbuf_offset(unsigned long offset
, struct channel
*chan
)
53 return offset
& (chan
->backend
.subbuf_size
- 1);
56 /* subbuf_trunc mask selects the subbuffer number. */
58 unsigned long subbuf_trunc(unsigned long offset
, struct channel
*chan
)
60 return offset
& ~(chan
->backend
.subbuf_size
- 1);
63 /* subbuf_align aligns the offset to the next subbuffer. */
65 unsigned long subbuf_align(unsigned long offset
, struct channel
*chan
)
67 return (offset
+ chan
->backend
.subbuf_size
)
68 & ~(chan
->backend
.subbuf_size
- 1);
71 /* subbuf_index returns the index of the current subbuffer within the buffer. */
73 unsigned long subbuf_index(unsigned long offset
, struct channel
*chan
)
75 return buf_offset(offset
, chan
) >> chan
->backend
.subbuf_size_order
;
79 * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
80 * bits from the last TSC read. When overflows are detected, the full 64-bit
81 * timestamp counter should be written in the record header. Reads and writes
82 * last_tsc atomically.
85 #if (CAA_BITS_PER_LONG == 32)
87 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
88 struct lib_ring_buffer
*buf
, u64 tsc
)
90 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
94 * Ensure the compiler performs this update in a single instruction.
96 v_set(config
, &buf
->last_tsc
, (unsigned long)(tsc
>> config
->tsc_bits
));
100 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
101 struct lib_ring_buffer
*buf
, u64 tsc
)
103 unsigned long tsc_shifted
;
105 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
108 tsc_shifted
= (unsigned long)(tsc
>> config
->tsc_bits
);
109 if (unlikely(tsc_shifted
110 - (unsigned long)v_read(config
, &buf
->last_tsc
)))
117 void save_last_tsc(const struct lib_ring_buffer_config
*config
,
118 struct lib_ring_buffer
*buf
, u64 tsc
)
120 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
123 v_set(config
, &buf
->last_tsc
, (unsigned long)tsc
);
127 int last_tsc_overflow(const struct lib_ring_buffer_config
*config
,
128 struct lib_ring_buffer
*buf
, u64 tsc
)
130 if (config
->tsc_bits
== 0 || config
->tsc_bits
== 64)
133 if (unlikely((tsc
- v_read(config
, &buf
->last_tsc
))
134 >> config
->tsc_bits
))
142 int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx
*ctx
);
145 void lib_ring_buffer_switch_slow(struct lib_ring_buffer
*buf
,
146 enum switch_mode mode
);
148 /* Buffer write helpers */
151 void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer
*buf
,
152 struct channel
*chan
,
153 unsigned long offset
)
155 unsigned long consumed_old
, consumed_new
;
158 consumed_old
= uatomic_read(&buf
->consumed
);
160 * If buffer is in overwrite mode, push the reader consumed
161 * count if the write position has reached it and we are not
162 * at the first iteration (don't push the reader farther than
163 * the writer). This operation can be done concurrently by many
164 * writers in the same buffer, the writer being at the farthest
165 * write position sub-buffer index in the buffer being the one
166 * which will win this loop.
168 if (unlikely(subbuf_trunc(offset
, chan
)
169 - subbuf_trunc(consumed_old
, chan
)
170 >= chan
->backend
.buf_size
))
171 consumed_new
= subbuf_align(consumed_old
, chan
);
174 } while (unlikely(uatomic_cmpxchg(&buf
->consumed
, consumed_old
,
175 consumed_new
) != consumed_old
));
179 void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config
*config
,
180 struct lib_ring_buffer
*buf
,
181 unsigned long commit_count
,
184 if (config
->oops
== RING_BUFFER_OOPS_CONSISTENCY
)
185 v_set(config
, &buf
->commit_hot
[idx
].seq
, commit_count
);
189 int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config
*config
,
190 struct lib_ring_buffer
*buf
,
191 struct channel
*chan
)
193 unsigned long consumed_old
, consumed_idx
, commit_count
, write_offset
;
195 consumed_old
= uatomic_read(&buf
->consumed
);
196 consumed_idx
= subbuf_index(consumed_old
, chan
);
197 commit_count
= v_read(config
, &buf
->commit_cold
[consumed_idx
].cc_sb
);
199 * No memory barrier here, since we are only interested
200 * in a statistically correct polling result. The next poll will
201 * get the data is we are racing. The mb() that ensures correct
202 * memory order is in get_subbuf.
204 write_offset
= v_read(config
, &buf
->offset
);
207 * Check that the subbuffer we are trying to consume has been
208 * already fully committed.
211 if (((commit_count
- chan
->backend
.subbuf_size
)
212 & chan
->commit_count_mask
)
213 - (buf_trunc(consumed_old
, chan
)
214 >> chan
->backend
.num_subbuf_order
)
219 * Check that we are not about to read the same subbuffer in
220 * which the writer head is.
222 if (subbuf_trunc(write_offset
, chan
) - subbuf_trunc(consumed_old
, chan
)
231 int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config
*config
,
232 struct lib_ring_buffer
*buf
,
233 struct channel
*chan
)
235 return !!subbuf_offset(v_read(config
, &buf
->offset
), chan
);
239 unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config
*config
,
240 struct lib_ring_buffer
*buf
,
243 return subbuffer_get_data_size(config
, &buf
->backend
, idx
);
247 * Check if all space reservation in a buffer have been committed. This helps
248 * knowing if an execution context is nested (for per-cpu buffers only).
249 * This is a very specific ftrace use-case, so we keep this as "internal" API.
252 int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config
*config
,
253 struct lib_ring_buffer
*buf
,
254 struct channel
*chan
)
256 unsigned long offset
, idx
, commit_count
;
258 CHAN_WARN_ON(chan
, config
->alloc
!= RING_BUFFER_ALLOC_PER_CPU
);
259 CHAN_WARN_ON(chan
, config
->sync
!= RING_BUFFER_SYNC_PER_CPU
);
262 * Read offset and commit count in a loop so they are both read
263 * atomically wrt interrupts. By deal with interrupt concurrency by
264 * restarting both reads if the offset has been pushed. Note that given
265 * we only have to deal with interrupt concurrency here, an interrupt
266 * modifying the commit count will also modify "offset", so it is safe
267 * to only check for offset modifications.
270 offset
= v_read(config
, &buf
->offset
);
271 idx
= subbuf_index(offset
, chan
);
272 commit_count
= v_read(config
, &buf
->commit_hot
[idx
].cc
);
273 } while (offset
!= v_read(config
, &buf
->offset
));
275 return ((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
276 - (commit_count
& chan
->commit_count_mask
) == 0);
280 void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config
*config
,
281 struct lib_ring_buffer
*buf
,
282 struct channel
*chan
,
283 unsigned long offset
,
284 unsigned long commit_count
,
287 unsigned long old_commit_count
= commit_count
288 - chan
->backend
.subbuf_size
;
291 /* Check if all commits have been done */
292 if (unlikely((buf_trunc(offset
, chan
) >> chan
->backend
.num_subbuf_order
)
293 - (old_commit_count
& chan
->commit_count_mask
) == 0)) {
295 * If we succeeded at updating cc_sb below, we are the subbuffer
296 * writer delivering the subbuffer. Deals with concurrent
297 * updates of the "cc" value without adding a add_return atomic
298 * operation to the fast path.
300 * We are doing the delivery in two steps:
301 * - First, we cmpxchg() cc_sb to the new value
302 * old_commit_count + 1. This ensures that we are the only
303 * subbuffer user successfully filling the subbuffer, but we
304 * do _not_ set the cc_sb value to "commit_count" yet.
305 * Therefore, other writers that would wrap around the ring
306 * buffer and try to start writing to our subbuffer would
307 * have to drop records, because it would appear as
309 * We therefore have exclusive access to the subbuffer control
310 * structures. This mutual exclusion with other writers is
311 * crucially important to perform record overruns count in
312 * flight recorder mode locklessly.
313 * - When we are ready to release the subbuffer (either for
314 * reading or for overrun by other writers), we simply set the
315 * cc_sb value to "commit_count" and perform delivery.
317 * The subbuffer size is least 2 bytes (minimum size: 1 page).
318 * This guarantees that old_commit_count + 1 != commit_count.
320 if (likely(v_cmpxchg(config
, &buf
->commit_cold
[idx
].cc_sb
,
321 old_commit_count
, old_commit_count
+ 1)
322 == old_commit_count
)) {
324 * Start of exclusive subbuffer access. We are
325 * guaranteed to be the last writer in this subbuffer
326 * and any other writer trying to access this subbuffer
327 * in this state is required to drop records.
329 tsc
= config
->cb
.ring_buffer_clock_read(chan
);
331 subbuffer_get_records_count(config
,
333 &buf
->records_count
);
335 subbuffer_count_records_overrun(config
,
338 &buf
->records_overrun
);
339 config
->cb
.buffer_end(buf
, tsc
, idx
,
340 lib_ring_buffer_get_data_size(config
,
345 * Set noref flag and offset for this subbuffer id.
346 * Contains a memory barrier that ensures counter stores
347 * are ordered before set noref and offset.
349 lib_ring_buffer_set_noref_offset(config
, &buf
->backend
, idx
,
350 buf_trunc_val(offset
, chan
));
353 * Order set_noref and record counter updates before the
354 * end of subbuffer exclusive access. Orders with
355 * respect to writers coming into the subbuffer after
356 * wrap around, and also order wrt concurrent readers.
359 /* End of exclusive subbuffer access */
360 v_set(config
, &buf
->commit_cold
[idx
].cc_sb
,
362 lib_ring_buffer_vmcore_check_deliver(config
, buf
,
366 * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
368 if (config
->wakeup
== RING_BUFFER_WAKEUP_BY_WRITER
369 && uatomic_read(&buf
->active_readers
)
370 && lib_ring_buffer_poll_deliver(config
, buf
, chan
)) {
371 //wake_up_interruptible(&buf->read_wait);
372 //wake_up_interruptible(&chan->read_wait);
380 * lib_ring_buffer_write_commit_counter
382 * For flight recording. must be called after commit.
383 * This function increments the subbuffer's commit_seq counter each time the
384 * commit count reaches back the reserve offset (modulo subbuffer size). It is
385 * useful for crash dump.
388 void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config
*config
,
389 struct lib_ring_buffer
*buf
,
390 struct channel
*chan
,
392 unsigned long buf_offset
,
393 unsigned long commit_count
,
396 unsigned long offset
, commit_seq_old
;
398 if (config
->oops
!= RING_BUFFER_OOPS_CONSISTENCY
)
401 offset
= buf_offset
+ slot_size
;
404 * subbuf_offset includes commit_count_mask. We can simply
405 * compare the offsets within the subbuffer without caring about
406 * buffer full/empty mismatch because offset is never zero here
407 * (subbuffer header and record headers have non-zero length).
409 if (unlikely(subbuf_offset(offset
- commit_count
, chan
)))
412 commit_seq_old
= v_read(config
, &buf
->commit_hot
[idx
].seq
);
413 while ((long) (commit_seq_old
- commit_count
) < 0)
414 commit_seq_old
= v_cmpxchg(config
, &buf
->commit_hot
[idx
].seq
,
415 commit_seq_old
, commit_count
);
418 extern int lib_ring_buffer_create(struct lib_ring_buffer
*buf
,
419 struct channel_backend
*chanb
, int cpu
);
420 extern void lib_ring_buffer_free(struct lib_ring_buffer
*buf
);
422 /* Keep track of trap nesting inside ring buffer code */
423 extern __thread
unsigned int lib_ring_buffer_nesting
;
425 #endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */