Commit | Line | Data |
---|---|---|
e92f3e28 MD |
1 | #ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H |
2 | #define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H | |
852c2936 MD |
3 | |
4 | /* | |
e92f3e28 | 5 | * libringbuffer/frontend_internal.h |
852c2936 MD |
6 | * |
7 | * Ring Buffer Library Synchronization Header (internal helpers). | |
8 | * | |
e92f3e28 MD |
9 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; only | |
14 | * version 2.1 of the License. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
24 | * | |
25 | * | |
852c2936 MD |
26 | * Author: |
27 | * Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
28 | * | |
29 | * See ring_buffer_frontend.c for more information on wait-free algorithms. | |
30 | * | |
31 | * Dual LGPL v2.1/GPL v2 license. | |
32 | */ | |
33 | ||
14641deb | 34 | #include <urcu/compiler.h> |
8c90a710 | 35 | #include <urcu/tls-compat.h> |
2c44f5b9 MD |
36 | #include <signal.h> |
37 | #include <pthread.h> | |
14641deb | 38 | |
4318ae1b | 39 | #include <lttng/ringbuffer-config.h> |
4931a13e MD |
40 | #include "backend_types.h" |
41 | #include "frontend_types.h" | |
a6352fd4 | 42 | #include "shm.h" |
852c2936 MD |
43 | |
44 | /* Buffer offset macros */ | |
45 | ||
46 | /* buf_trunc mask selects only the buffer number. */ | |
47 | static inline | |
48 | unsigned long buf_trunc(unsigned long offset, struct channel *chan) | |
49 | { | |
50 | return offset & ~(chan->backend.buf_size - 1); | |
51 | ||
52 | } | |
53 | ||
54 | /* Select the buffer number value (counter). */ | |
55 | static inline | |
56 | unsigned long buf_trunc_val(unsigned long offset, struct channel *chan) | |
57 | { | |
58 | return buf_trunc(offset, chan) >> chan->backend.buf_size_order; | |
59 | } | |
60 | ||
61 | /* buf_offset mask selects only the offset within the current buffer. */ | |
62 | static inline | |
63 | unsigned long buf_offset(unsigned long offset, struct channel *chan) | |
64 | { | |
65 | return offset & (chan->backend.buf_size - 1); | |
66 | } | |
67 | ||
68 | /* subbuf_offset mask selects the offset within the current subbuffer. */ | |
69 | static inline | |
70 | unsigned long subbuf_offset(unsigned long offset, struct channel *chan) | |
71 | { | |
72 | return offset & (chan->backend.subbuf_size - 1); | |
73 | } | |
74 | ||
75 | /* subbuf_trunc mask selects the subbuffer number. */ | |
76 | static inline | |
77 | unsigned long subbuf_trunc(unsigned long offset, struct channel *chan) | |
78 | { | |
79 | return offset & ~(chan->backend.subbuf_size - 1); | |
80 | } | |
81 | ||
82 | /* subbuf_align aligns the offset to the next subbuffer. */ | |
83 | static inline | |
84 | unsigned long subbuf_align(unsigned long offset, struct channel *chan) | |
85 | { | |
86 | return (offset + chan->backend.subbuf_size) | |
87 | & ~(chan->backend.subbuf_size - 1); | |
88 | } | |
89 | ||
90 | /* subbuf_index returns the index of the current subbuffer within the buffer. */ | |
91 | static inline | |
92 | unsigned long subbuf_index(unsigned long offset, struct channel *chan) | |
93 | { | |
94 | return buf_offset(offset, chan) >> chan->backend.subbuf_size_order; | |
95 | } | |
96 | ||
97 | /* | |
98 | * Last TSC comparison functions. Check if the current TSC overflows tsc_bits | |
99 | * bits from the last TSC read. When overflows are detected, the full 64-bit | |
100 | * timestamp counter should be written in the record header. Reads and writes | |
101 | * last_tsc atomically. | |
102 | */ | |
103 | ||
14641deb | 104 | #if (CAA_BITS_PER_LONG == 32) |
852c2936 | 105 | static inline |
4cfec15c | 106 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 107 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
108 | { |
109 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
110 | return; | |
111 | ||
112 | /* | |
113 | * Ensure the compiler performs this update in a single instruction. | |
114 | */ | |
115 | v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits)); | |
116 | } | |
117 | ||
118 | static inline | |
4cfec15c | 119 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 120 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
121 | { |
122 | unsigned long tsc_shifted; | |
123 | ||
124 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
125 | return 0; | |
126 | ||
127 | tsc_shifted = (unsigned long)(tsc >> config->tsc_bits); | |
b5a3dfa5 | 128 | if (caa_unlikely(tsc_shifted |
852c2936 MD |
129 | - (unsigned long)v_read(config, &buf->last_tsc))) |
130 | return 1; | |
131 | else | |
132 | return 0; | |
133 | } | |
134 | #else | |
135 | static inline | |
4cfec15c | 136 | void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 137 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
138 | { |
139 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
140 | return; | |
141 | ||
142 | v_set(config, &buf->last_tsc, (unsigned long)tsc); | |
143 | } | |
144 | ||
145 | static inline | |
4cfec15c | 146 | int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, |
2fed87ae | 147 | struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc) |
852c2936 MD |
148 | { |
149 | if (config->tsc_bits == 0 || config->tsc_bits == 64) | |
150 | return 0; | |
151 | ||
b5a3dfa5 | 152 | if (caa_unlikely((tsc - v_read(config, &buf->last_tsc)) |
852c2936 MD |
153 | >> config->tsc_bits)) |
154 | return 1; | |
155 | else | |
156 | return 0; | |
157 | } | |
158 | #endif | |
159 | ||
160 | extern | |
4cfec15c | 161 | int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx); |
852c2936 MD |
162 | |
163 | extern | |
4cfec15c | 164 | void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, |
1d498196 | 165 | enum switch_mode mode, |
38fae1d3 | 166 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
167 | |
168 | /* Buffer write helpers */ | |
169 | ||
170 | static inline | |
4cfec15c | 171 | void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, |
852c2936 MD |
172 | struct channel *chan, |
173 | unsigned long offset) | |
174 | { | |
175 | unsigned long consumed_old, consumed_new; | |
176 | ||
177 | do { | |
14641deb | 178 | consumed_old = uatomic_read(&buf->consumed); |
852c2936 MD |
179 | /* |
180 | * If buffer is in overwrite mode, push the reader consumed | |
181 | * count if the write position has reached it and we are not | |
182 | * at the first iteration (don't push the reader farther than | |
183 | * the writer). This operation can be done concurrently by many | |
184 | * writers in the same buffer, the writer being at the farthest | |
185 | * write position sub-buffer index in the buffer being the one | |
186 | * which will win this loop. | |
187 | */ | |
b5a3dfa5 | 188 | if (caa_unlikely(subbuf_trunc(offset, chan) |
852c2936 MD |
189 | - subbuf_trunc(consumed_old, chan) |
190 | >= chan->backend.buf_size)) | |
191 | consumed_new = subbuf_align(consumed_old, chan); | |
192 | else | |
193 | return; | |
b5a3dfa5 | 194 | } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, |
852c2936 MD |
195 | consumed_new) != consumed_old)); |
196 | } | |
197 | ||
198 | static inline | |
4cfec15c MD |
199 | void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, |
200 | struct lttng_ust_lib_ring_buffer *buf, | |
852c2936 | 201 | unsigned long commit_count, |
1d498196 | 202 | unsigned long idx, |
38fae1d3 | 203 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
204 | { |
205 | if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) | |
4746ae29 | 206 | v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count); |
852c2936 MD |
207 | } |
208 | ||
209 | static inline | |
4cfec15c MD |
210 | int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config, |
211 | struct lttng_ust_lib_ring_buffer *buf, | |
1d498196 | 212 | struct channel *chan, |
38fae1d3 | 213 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
214 | { |
215 | unsigned long consumed_old, consumed_idx, commit_count, write_offset; | |
216 | ||
14641deb | 217 | consumed_old = uatomic_read(&buf->consumed); |
852c2936 | 218 | consumed_idx = subbuf_index(consumed_old, chan); |
4746ae29 | 219 | commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb); |
852c2936 MD |
220 | /* |
221 | * No memory barrier here, since we are only interested | |
222 | * in a statistically correct polling result. The next poll will | |
223 | * get the data is we are racing. The mb() that ensures correct | |
224 | * memory order is in get_subbuf. | |
225 | */ | |
226 | write_offset = v_read(config, &buf->offset); | |
227 | ||
228 | /* | |
229 | * Check that the subbuffer we are trying to consume has been | |
230 | * already fully committed. | |
231 | */ | |
232 | ||
233 | if (((commit_count - chan->backend.subbuf_size) | |
234 | & chan->commit_count_mask) | |
235 | - (buf_trunc(consumed_old, chan) | |
236 | >> chan->backend.num_subbuf_order) | |
237 | != 0) | |
238 | return 0; | |
239 | ||
240 | /* | |
241 | * Check that we are not about to read the same subbuffer in | |
242 | * which the writer head is. | |
243 | */ | |
244 | if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) | |
245 | == 0) | |
246 | return 0; | |
247 | ||
248 | return 1; | |
249 | ||
250 | } | |
251 | ||
252 | static inline | |
4cfec15c MD |
253 | int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config, |
254 | struct lttng_ust_lib_ring_buffer *buf, | |
852c2936 MD |
255 | struct channel *chan) |
256 | { | |
257 | return !!subbuf_offset(v_read(config, &buf->offset), chan); | |
258 | } | |
259 | ||
260 | static inline | |
4cfec15c MD |
261 | unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
262 | struct lttng_ust_lib_ring_buffer *buf, | |
1d498196 | 263 | unsigned long idx, |
38fae1d3 | 264 | struct lttng_ust_shm_handle *handle) |
852c2936 | 265 | { |
1d498196 | 266 | return subbuffer_get_data_size(config, &buf->backend, idx, handle); |
852c2936 MD |
267 | } |
268 | ||
269 | /* | |
270 | * Check if all space reservation in a buffer have been committed. This helps | |
271 | * knowing if an execution context is nested (for per-cpu buffers only). | |
272 | * This is a very specific ftrace use-case, so we keep this as "internal" API. | |
273 | */ | |
274 | static inline | |
4cfec15c MD |
275 | int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config, |
276 | struct lttng_ust_lib_ring_buffer *buf, | |
1d498196 | 277 | struct channel *chan, |
38fae1d3 | 278 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
279 | { |
280 | unsigned long offset, idx, commit_count; | |
281 | ||
282 | CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); | |
283 | CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); | |
284 | ||
285 | /* | |
286 | * Read offset and commit count in a loop so they are both read | |
287 | * atomically wrt interrupts. By deal with interrupt concurrency by | |
288 | * restarting both reads if the offset has been pushed. Note that given | |
289 | * we only have to deal with interrupt concurrency here, an interrupt | |
290 | * modifying the commit count will also modify "offset", so it is safe | |
291 | * to only check for offset modifications. | |
292 | */ | |
293 | do { | |
294 | offset = v_read(config, &buf->offset); | |
295 | idx = subbuf_index(offset, chan); | |
4746ae29 | 296 | commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc); |
852c2936 MD |
297 | } while (offset != v_read(config, &buf->offset)); |
298 | ||
299 | return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) | |
300 | - (commit_count & chan->commit_count_mask) == 0); | |
301 | } | |
302 | ||
34a91bdb MD |
303 | static inline |
304 | void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf, | |
305 | struct lttng_ust_shm_handle *handle) | |
306 | { | |
307 | int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); | |
308 | sigset_t sigpipe_set, pending_set, old_set; | |
309 | int ret, sigpipe_was_pending = 0; | |
310 | ||
311 | if (wakeup_fd < 0) | |
312 | return; | |
313 | ||
314 | /* | |
315 | * Wake-up the other end by writing a null byte in the pipe | |
316 | * (non-blocking). Important note: Because writing into the | |
317 | * pipe is non-blocking (and therefore we allow dropping wakeup | |
318 | * data, as long as there is wakeup data present in the pipe | |
319 | * buffer to wake up the consumer), the consumer should perform | |
320 | * the following sequence for waiting: | |
321 | * 1) empty the pipe (reads). | |
322 | * 2) check if there is data in the buffer. | |
323 | * 3) wait on the pipe (poll). | |
324 | * | |
325 | * Discard the SIGPIPE from write(), not disturbing any SIGPIPE | |
326 | * that might be already pending. If a bogus SIGPIPE is sent to | |
327 | * the entire process concurrently by a malicious user, it may | |
328 | * be simply discarded. | |
329 | */ | |
330 | ret = sigemptyset(&pending_set); | |
331 | assert(!ret); | |
332 | /* | |
333 | * sigpending returns the mask of signals that are _both_ | |
334 | * blocked for the thread _and_ pending for either the thread or | |
335 | * the entire process. | |
336 | */ | |
337 | ret = sigpending(&pending_set); | |
338 | assert(!ret); | |
339 | sigpipe_was_pending = sigismember(&pending_set, SIGPIPE); | |
340 | /* | |
341 | * If sigpipe was pending, it means it was already blocked, so | |
342 | * no need to block it. | |
343 | */ | |
344 | if (!sigpipe_was_pending) { | |
345 | ret = sigemptyset(&sigpipe_set); | |
346 | assert(!ret); | |
347 | ret = sigaddset(&sigpipe_set, SIGPIPE); | |
348 | assert(!ret); | |
349 | ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set); | |
350 | assert(!ret); | |
351 | } | |
352 | do { | |
353 | ret = write(wakeup_fd, "", 1); | |
354 | } while (ret == -1L && errno == EINTR); | |
355 | if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) { | |
356 | struct timespec timeout = { 0, 0 }; | |
357 | do { | |
358 | ret = sigtimedwait(&sigpipe_set, NULL, | |
359 | &timeout); | |
360 | } while (ret == -1L && errno == EINTR); | |
361 | } | |
362 | if (!sigpipe_was_pending) { | |
363 | ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL); | |
364 | assert(!ret); | |
365 | } | |
366 | } | |
367 | ||
1b7b0501 MD |
368 | /* |
369 | * Receive end of subbuffer TSC as parameter. It has been read in the | |
370 | * space reservation loop of either reserve or switch, which ensures it | |
371 | * progresses monotonically with event records in the buffer. Therefore, | |
372 | * it ensures that the end timestamp of a subbuffer is <= begin | |
373 | * timestamp of the following subbuffers. | |
374 | */ | |
852c2936 | 375 | static inline |
4cfec15c MD |
376 | void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, |
377 | struct lttng_ust_lib_ring_buffer *buf, | |
852c2936 MD |
378 | struct channel *chan, |
379 | unsigned long offset, | |
380 | unsigned long commit_count, | |
1d498196 | 381 | unsigned long idx, |
1b7b0501 MD |
382 | struct lttng_ust_shm_handle *handle, |
383 | uint64_t tsc) | |
852c2936 MD |
384 | { |
385 | unsigned long old_commit_count = commit_count | |
386 | - chan->backend.subbuf_size; | |
852c2936 MD |
387 | |
388 | /* Check if all commits have been done */ | |
b5a3dfa5 | 389 | if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) |
852c2936 MD |
390 | - (old_commit_count & chan->commit_count_mask) == 0)) { |
391 | /* | |
392 | * If we succeeded at updating cc_sb below, we are the subbuffer | |
393 | * writer delivering the subbuffer. Deals with concurrent | |
394 | * updates of the "cc" value without adding a add_return atomic | |
395 | * operation to the fast path. | |
396 | * | |
397 | * We are doing the delivery in two steps: | |
398 | * - First, we cmpxchg() cc_sb to the new value | |
399 | * old_commit_count + 1. This ensures that we are the only | |
400 | * subbuffer user successfully filling the subbuffer, but we | |
401 | * do _not_ set the cc_sb value to "commit_count" yet. | |
402 | * Therefore, other writers that would wrap around the ring | |
403 | * buffer and try to start writing to our subbuffer would | |
404 | * have to drop records, because it would appear as | |
405 | * non-filled. | |
406 | * We therefore have exclusive access to the subbuffer control | |
407 | * structures. This mutual exclusion with other writers is | |
408 | * crucially important to perform record overruns count in | |
409 | * flight recorder mode locklessly. | |
410 | * - When we are ready to release the subbuffer (either for | |
411 | * reading or for overrun by other writers), we simply set the | |
412 | * cc_sb value to "commit_count" and perform delivery. | |
413 | * | |
414 | * The subbuffer size is least 2 bytes (minimum size: 1 page). | |
415 | * This guarantees that old_commit_count + 1 != commit_count. | |
416 | */ | |
e185cf4b MD |
417 | |
418 | /* | |
419 | * Order prior updates to reserve count prior to the | |
420 | * commit_cold cc_sb update. | |
421 | */ | |
422 | cmm_smp_wmb(); | |
b5a3dfa5 | 423 | if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, |
852c2936 MD |
424 | old_commit_count, old_commit_count + 1) |
425 | == old_commit_count)) { | |
426 | /* | |
427 | * Start of exclusive subbuffer access. We are | |
428 | * guaranteed to be the last writer in this subbuffer | |
429 | * and any other writer trying to access this subbuffer | |
430 | * in this state is required to drop records. | |
431 | */ | |
852c2936 MD |
432 | v_add(config, |
433 | subbuffer_get_records_count(config, | |
1d498196 MD |
434 | &buf->backend, |
435 | idx, handle), | |
852c2936 MD |
436 | &buf->records_count); |
437 | v_add(config, | |
438 | subbuffer_count_records_overrun(config, | |
439 | &buf->backend, | |
1d498196 | 440 | idx, handle), |
852c2936 MD |
441 | &buf->records_overrun); |
442 | config->cb.buffer_end(buf, tsc, idx, | |
443 | lib_ring_buffer_get_data_size(config, | |
444 | buf, | |
1d498196 MD |
445 | idx, |
446 | handle), | |
447 | handle); | |
852c2936 | 448 | |
1ff31389 JD |
449 | /* |
450 | * Increment the packet counter while we have exclusive | |
451 | * access. | |
452 | */ | |
453 | subbuffer_inc_packet_count(config, &buf->backend, idx, handle); | |
454 | ||
852c2936 MD |
455 | /* |
456 | * Set noref flag and offset for this subbuffer id. | |
457 | * Contains a memory barrier that ensures counter stores | |
458 | * are ordered before set noref and offset. | |
459 | */ | |
460 | lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, | |
1d498196 | 461 | buf_trunc_val(offset, chan), handle); |
852c2936 MD |
462 | |
463 | /* | |
464 | * Order set_noref and record counter updates before the | |
465 | * end of subbuffer exclusive access. Orders with | |
466 | * respect to writers coming into the subbuffer after | |
467 | * wrap around, and also order wrt concurrent readers. | |
468 | */ | |
14641deb | 469 | cmm_smp_mb(); |
852c2936 | 470 | /* End of exclusive subbuffer access */ |
4746ae29 | 471 | v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, |
852c2936 | 472 | commit_count); |
e185cf4b MD |
473 | /* |
474 | * Order later updates to reserve count after | |
475 | * the commit cold cc_sb update. | |
476 | */ | |
477 | cmm_smp_wmb(); | |
852c2936 | 478 | lib_ring_buffer_vmcore_check_deliver(config, buf, |
1d498196 | 479 | commit_count, idx, handle); |
852c2936 MD |
480 | |
481 | /* | |
482 | * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. | |
483 | */ | |
484 | if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER | |
74d81a6c | 485 | && uatomic_read(&buf->active_readers) |
1d498196 | 486 | && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { |
34a91bdb | 487 | lib_ring_buffer_wakeup(buf, handle); |
852c2936 | 488 | } |
852c2936 MD |
489 | } |
490 | } | |
491 | } | |
492 | ||
493 | /* | |
494 | * lib_ring_buffer_write_commit_counter | |
495 | * | |
496 | * For flight recording. must be called after commit. | |
497 | * This function increments the subbuffer's commit_seq counter each time the | |
498 | * commit count reaches back the reserve offset (modulo subbuffer size). It is | |
499 | * useful for crash dump. | |
500 | */ | |
501 | static inline | |
4cfec15c MD |
502 | void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config, |
503 | struct lttng_ust_lib_ring_buffer *buf, | |
852c2936 MD |
504 | struct channel *chan, |
505 | unsigned long idx, | |
506 | unsigned long buf_offset, | |
507 | unsigned long commit_count, | |
38fae1d3 | 508 | struct lttng_ust_shm_handle *handle) |
852c2936 | 509 | { |
80249235 | 510 | unsigned long commit_seq_old; |
852c2936 MD |
511 | |
512 | if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) | |
513 | return; | |
514 | ||
852c2936 MD |
515 | /* |
516 | * subbuf_offset includes commit_count_mask. We can simply | |
517 | * compare the offsets within the subbuffer without caring about | |
518 | * buffer full/empty mismatch because offset is never zero here | |
519 | * (subbuffer header and record headers have non-zero length). | |
520 | */ | |
80249235 | 521 | if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan))) |
852c2936 MD |
522 | return; |
523 | ||
4746ae29 | 524 | commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq); |
852c2936 | 525 | while ((long) (commit_seq_old - commit_count) < 0) |
4746ae29 | 526 | commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq, |
852c2936 MD |
527 | commit_seq_old, commit_count); |
528 | } | |
529 | ||
4cfec15c | 530 | extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, |
a6352fd4 | 531 | struct channel_backend *chanb, int cpu, |
38fae1d3 | 532 | struct lttng_ust_shm_handle *handle, |
1d498196 | 533 | struct shm_object *shmobj); |
4cfec15c | 534 | extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, |
38fae1d3 | 535 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
536 | |
537 | /* Keep track of trap nesting inside ring buffer code */ | |
8c90a710 | 538 | extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); |
852c2936 | 539 | |
e92f3e28 | 540 | #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */ |