ring buffer: handle shmp errors
[lttng-ust.git] / libringbuffer / backend_internal.h
... / ...
CommitLineData
1#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
3
4/*
5 * libringbuffer/backend_internal.h
6 *
7 * Ring buffer backend (internal helpers).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26#include <unistd.h>
27#include <urcu/compiler.h>
28
29#include <lttng/ringbuffer-config.h>
30#include "backend_types.h"
31#include "frontend_types.h"
32#include "shm.h"
33
34/* Ring buffer backend API presented to the frontend */
35
36/* Ring buffer and channel backend create/free */
37
38int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
39 struct channel_backend *chan, int cpu,
40 struct lttng_ust_shm_handle *handle,
41 struct shm_object *shmobj);
42void channel_backend_unregister_notifiers(struct channel_backend *chanb);
43void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
44int channel_backend_init(struct channel_backend *chanb,
45 const char *name,
46 const struct lttng_ust_lib_ring_buffer_config *config,
47 size_t subbuf_size,
48 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
49 const int *stream_fds);
50void channel_backend_free(struct channel_backend *chanb,
51 struct lttng_ust_shm_handle *handle);
52
53void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
54 struct lttng_ust_shm_handle *handle);
55void channel_backend_reset(struct channel_backend *chanb);
56
57int lib_ring_buffer_backend_init(void);
58void lib_ring_buffer_backend_exit(void);
59
60extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
61 size_t offset, const void *src, size_t len,
62 ssize_t pagecpy);
63
64/*
65 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
66 * exchanged atomically.
67 *
68 * Top half word, except lowest bit, belongs to "offset", which is used to keep
69 * to count the produced buffers. For overwrite mode, this provides the
70 * consumer with the capacity to read subbuffers in order, handling the
71 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
72 * systems) concurrently with a single execution of get_subbuf (between offset
73 * sampling and subbuffer ID exchange).
74 */
75
76#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
77
78#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
79#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
80#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
81/*
82 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
83 */
84#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
85#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
86#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
87/*
88 * In overwrite mode: lowest half of word is used for index.
89 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
90 * In producer-consumer mode: whole word used for index.
91 */
92#define SB_ID_INDEX_SHIFT 0
93#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
94#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
95
96/*
97 * Construct the subbuffer id from offset, index and noref. Use only the index
98 * for producer-consumer mode (offset and noref are only used in overwrite
99 * mode).
100 */
101static inline
102unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
103 unsigned long offset, unsigned long noref,
104 unsigned long index)
105{
106 if (config->mode == RING_BUFFER_OVERWRITE)
107 return (offset << SB_ID_OFFSET_SHIFT)
108 | (noref << SB_ID_NOREF_SHIFT)
109 | index;
110 else
111 return index;
112}
113
114/*
115 * Compare offset with the offset contained within id. Return 1 if the offset
116 * bits are identical, else 0.
117 */
118static inline
119int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
120 unsigned long id, unsigned long offset)
121{
122 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
123}
124
125static inline
126unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
127 unsigned long id)
128{
129 if (config->mode == RING_BUFFER_OVERWRITE)
130 return id & SB_ID_INDEX_MASK;
131 else
132 return id;
133}
134
135static inline
136unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
137 unsigned long id)
138{
139 if (config->mode == RING_BUFFER_OVERWRITE)
140 return !!(id & SB_ID_NOREF_MASK);
141 else
142 return 1;
143}
144
145/*
146 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
147 * needed.
148 */
149static inline
150void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
151 unsigned long *id)
152{
153 if (config->mode == RING_BUFFER_OVERWRITE)
154 *id |= SB_ID_NOREF_MASK;
155}
156
157static inline
158void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
159 unsigned long *id, unsigned long offset)
160{
161 unsigned long tmp;
162
163 if (config->mode == RING_BUFFER_OVERWRITE) {
164 tmp = *id;
165 tmp &= ~SB_ID_OFFSET_MASK;
166 tmp |= offset << SB_ID_OFFSET_SHIFT;
167 tmp |= SB_ID_NOREF_MASK;
168 /* Volatile store, read concurrently by readers. */
169 CMM_ACCESS_ONCE(*id) = tmp;
170 }
171}
172
173/* No volatile access, since already used locally */
174static inline
175void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
176 unsigned long *id)
177{
178 if (config->mode == RING_BUFFER_OVERWRITE)
179 *id &= ~SB_ID_NOREF_MASK;
180}
181
182/*
183 * For overwrite mode, cap the number of subbuffers per buffer to:
184 * 2^16 on 32-bit architectures
185 * 2^32 on 64-bit architectures
186 * This is required to fit in the index part of the ID. Return 0 on success,
187 * -EPERM on failure.
188 */
189static inline
190int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
191 unsigned long num_subbuf)
192{
193 if (config->mode == RING_BUFFER_OVERWRITE)
194 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
195 else
196 return 0;
197}
198
199/*
200 * The ring buffer can count events recorded and overwritten per buffer,
201 * but it is disabled by default due to its performance overhead.
202 */
203#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
204static inline
205void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
206 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
207 struct lttng_ust_lib_ring_buffer_backend *bufb,
208 unsigned long idx, struct lttng_ust_shm_handle *handle)
209{
210 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
211
212 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
213 if (caa_unlikely(!backend_pages)) {
214 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
215 return;
216 }
217 v_inc(config, &backend_pages->records_commit);
218}
219#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
220static inline
221void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
222 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
223 struct lttng_ust_lib_ring_buffer_backend *bufb,
224 unsigned long idx, struct lttng_ust_shm_handle *handle)
225{
226}
227#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
228
229/*
230 * Reader has exclusive subbuffer access for record consumption. No need to
231 * perform the decrement atomically.
232 */
233static inline
234void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
235 struct lttng_ust_lib_ring_buffer_backend *bufb,
236 struct lttng_ust_shm_handle *handle)
237{
238 unsigned long sb_bindex;
239 struct channel *chan;
240 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
241 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
242
243 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
244 chan = shmp(handle, bufb->chan);
245 if (!chan)
246 return;
247 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
248 if (!pages_shmp)
249 return;
250 backend_pages = shmp(handle, pages_shmp->shmp);
251 if (!backend_pages)
252 return;
253 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
254 /* Non-atomic decrement protected by exclusive subbuffer access */
255 _v_dec(config, &backend_pages->records_unread);
256 v_inc(config, &bufb->records_read);
257}
258
259static inline
260unsigned long subbuffer_get_records_count(
261 const struct lttng_ust_lib_ring_buffer_config *config,
262 struct lttng_ust_lib_ring_buffer_backend *bufb,
263 unsigned long idx,
264 struct lttng_ust_shm_handle *handle)
265{
266 unsigned long sb_bindex;
267 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
268 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
269 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
270
271 wsb = shmp_index(handle, bufb->buf_wsb, idx);
272 if (!wsb)
273 return 0;
274 sb_bindex = subbuffer_id_get_index(config, wsb->id);
275 rpages = shmp_index(handle, bufb->array, sb_bindex);
276 if (!rpages)
277 return 0;
278 backend_pages = shmp(handle, rpages->shmp);
279 if (!backend_pages)
280 return 0;
281 return v_read(config, &backend_pages->records_commit);
282}
283
284/*
285 * Must be executed at subbuffer delivery when the writer has _exclusive_
286 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
287 * lib_ring_buffer_get_records_count() must be called to get the records
288 * count before this function, because it resets the records_commit
289 * count.
290 */
291static inline
292unsigned long subbuffer_count_records_overrun(
293 const struct lttng_ust_lib_ring_buffer_config *config,
294 struct lttng_ust_lib_ring_buffer_backend *bufb,
295 unsigned long idx,
296 struct lttng_ust_shm_handle *handle)
297{
298 unsigned long overruns, sb_bindex;
299 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
300 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
301 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
302
303 wsb = shmp_index(handle, bufb->buf_wsb, idx);
304 if (!wsb)
305 return 0;
306 sb_bindex = subbuffer_id_get_index(config, wsb->id);
307 rpages = shmp_index(handle, bufb->array, sb_bindex);
308 if (!rpages)
309 return 0;
310 backend_pages = shmp(handle, rpages->shmp);
311 if (!backend_pages)
312 return 0;
313 overruns = v_read(config, &backend_pages->records_unread);
314 v_set(config, &backend_pages->records_unread,
315 v_read(config, &backend_pages->records_commit));
316 v_set(config, &backend_pages->records_commit, 0);
317
318 return overruns;
319}
320
321static inline
322void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
323 struct lttng_ust_lib_ring_buffer_backend *bufb,
324 unsigned long idx,
325 unsigned long data_size,
326 struct lttng_ust_shm_handle *handle)
327{
328 unsigned long sb_bindex;
329 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
330 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
331 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
332
333 wsb = shmp_index(handle, bufb->buf_wsb, idx);
334 if (!wsb)
335 return;
336 sb_bindex = subbuffer_id_get_index(config, wsb->id);
337 rpages = shmp_index(handle, bufb->array, sb_bindex);
338 if (!rpages)
339 return;
340 backend_pages = shmp(handle, rpages->shmp);
341 if (!backend_pages)
342 return;
343 backend_pages->data_size = data_size;
344}
345
346static inline
347unsigned long subbuffer_get_read_data_size(
348 const struct lttng_ust_lib_ring_buffer_config *config,
349 struct lttng_ust_lib_ring_buffer_backend *bufb,
350 struct lttng_ust_shm_handle *handle)
351{
352 unsigned long sb_bindex;
353 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
354 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
355
356 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
357 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
358 if (!pages_shmp)
359 return 0;
360 backend_pages = shmp(handle, pages_shmp->shmp);
361 if (!backend_pages)
362 return 0;
363 return backend_pages->data_size;
364}
365
366static inline
367unsigned long subbuffer_get_data_size(
368 const struct lttng_ust_lib_ring_buffer_config *config,
369 struct lttng_ust_lib_ring_buffer_backend *bufb,
370 unsigned long idx,
371 struct lttng_ust_shm_handle *handle)
372{
373 unsigned long sb_bindex;
374 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
375 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
376 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
377
378 wsb = shmp_index(handle, bufb->buf_wsb, idx);
379 if (!wsb)
380 return 0;
381 sb_bindex = subbuffer_id_get_index(config, wsb->id);
382 rpages = shmp_index(handle, bufb->array, sb_bindex);
383 if (!rpages)
384 return 0;
385 backend_pages = shmp(handle, rpages->shmp);
386 if (!backend_pages)
387 return 0;
388 return backend_pages->data_size;
389}
390
391static inline
392void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
393 struct lttng_ust_lib_ring_buffer_backend *bufb,
394 unsigned long idx, struct lttng_ust_shm_handle *handle)
395{
396 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
397
398 counts = shmp_index(handle, bufb->buf_cnt, idx);
399 if (!counts)
400 return;
401 counts->seq_cnt++;
402}
403
404/**
405 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
406 * writer.
407 */
408static inline
409void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
410 struct lttng_ust_lib_ring_buffer_backend *bufb,
411 unsigned long idx,
412 struct lttng_ust_shm_handle *handle)
413{
414 unsigned long id, new_id;
415 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
416
417 if (config->mode != RING_BUFFER_OVERWRITE)
418 return;
419
420 /*
421 * Performing a volatile access to read the sb_pages, because we want to
422 * read a coherent version of the pointer and the associated noref flag.
423 */
424 wsb = shmp_index(handle, bufb->buf_wsb, idx);
425 if (!wsb)
426 return;
427 id = CMM_ACCESS_ONCE(wsb->id);
428 for (;;) {
429 /* This check is called on the fast path for each record. */
430 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
431 /*
432 * Store after load dependency ordering the writes to
433 * the subbuffer after load and test of the noref flag
434 * matches the memory barrier implied by the cmpxchg()
435 * in update_read_sb_index().
436 */
437 return; /* Already writing to this buffer */
438 }
439 new_id = id;
440 subbuffer_id_clear_noref(config, &new_id);
441 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
442 if (caa_likely(new_id == id))
443 break;
444 id = new_id;
445 }
446}
447
448/**
449 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
450 * called by writer.
451 */
452static inline
453void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
454 struct lttng_ust_lib_ring_buffer_backend *bufb,
455 unsigned long idx, unsigned long offset,
456 struct lttng_ust_shm_handle *handle)
457{
458 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
459 struct channel *chan;
460
461 if (config->mode != RING_BUFFER_OVERWRITE)
462 return;
463
464 wsb = shmp_index(handle, bufb->buf_wsb, idx);
465 if (!wsb)
466 return;
467 /*
468 * Because ring_buffer_set_noref() is only called by a single thread
469 * (the one which updated the cc_sb value), there are no concurrent
470 * updates to take care of: other writers have not updated cc_sb, so
471 * they cannot set the noref flag, and concurrent readers cannot modify
472 * the pointer because the noref flag is not set yet.
473 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
474 * to the subbuffer before this set noref operation.
475 * subbuffer_set_noref() uses a volatile store to deal with concurrent
476 * readers of the noref flag.
477 */
478 chan = shmp(handle, bufb->chan);
479 if (!chan)
480 return;
481 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
482 /*
483 * Memory barrier that ensures counter stores are ordered before set
484 * noref and offset.
485 */
486 cmm_smp_mb();
487 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
488}
489
490/**
491 * update_read_sb_index - Read-side subbuffer index update.
492 */
493static inline
494int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
495 struct lttng_ust_lib_ring_buffer_backend *bufb,
496 struct channel_backend *chanb,
497 unsigned long consumed_idx,
498 unsigned long consumed_count,
499 struct lttng_ust_shm_handle *handle)
500{
501 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
502 unsigned long old_id, new_id;
503
504 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
505 if (caa_unlikely(!wsb))
506 return -EPERM;
507
508 if (config->mode == RING_BUFFER_OVERWRITE) {
509 struct channel *chan;
510
511 /*
512 * Exchange the target writer subbuffer with our own unused
513 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
514 * old_wpage, because the value read will be confirmed by the
515 * following cmpxchg().
516 */
517 old_id = wsb->id;
518 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
519 return -EAGAIN;
520 /*
521 * Make sure the offset count we are expecting matches the one
522 * indicated by the writer.
523 */
524 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
525 consumed_count)))
526 return -EAGAIN;
527 chan = shmp(handle, bufb->chan);
528 if (caa_unlikely(!chan))
529 return -EPERM;
530 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
531 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
532 consumed_count);
533 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
534 if (caa_unlikely(old_id != new_id))
535 return -EAGAIN;
536 bufb->buf_rsb.id = new_id;
537 } else {
538 /* No page exchange, use the writer page directly */
539 bufb->buf_rsb.id = wsb->id;
540 }
541 return 0;
542}
543
544#ifndef inline_memcpy
545#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
546#endif
547
548/*
549 * Use the architecture-specific memcpy implementation for constant-sized
550 * inputs, but rely on an inline memcpy for length statically unknown.
551 * The function call to memcpy is just way too expensive for a fast path.
552 */
553#define lib_ring_buffer_do_copy(config, dest, src, len) \
554do { \
555 size_t __len = (len); \
556 if (__builtin_constant_p(len)) \
557 memcpy(dest, src, __len); \
558 else \
559 inline_memcpy(dest, src, __len); \
560} while (0)
561
562/*
563 * write len bytes to dest with c
564 */
565static inline
566void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
567{
568 unsigned long i;
569
570 for (i = 0; i < len; i++)
571 dest[i] = c;
572}
573
574/* arch-agnostic implementation */
575
576static inline int lttng_ust_fls(unsigned int x)
577{
578 int r = 32;
579
580 if (!x)
581 return 0;
582 if (!(x & 0xFFFF0000U)) {
583 x <<= 16;
584 r -= 16;
585 }
586 if (!(x & 0xFF000000U)) {
587 x <<= 8;
588 r -= 8;
589 }
590 if (!(x & 0xF0000000U)) {
591 x <<= 4;
592 r -= 4;
593 }
594 if (!(x & 0xC0000000U)) {
595 x <<= 2;
596 r -= 2;
597 }
598 if (!(x & 0x80000000U)) {
599 /* No need to bit shift on last operation */
600 r -= 1;
601 }
602 return r;
603}
604
605static inline int get_count_order(unsigned int count)
606{
607 int order;
608
609 order = lttng_ust_fls(count) - 1;
610 if (count & (count - 1))
611 order++;
612 return order;
613}
614
615#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.024215 seconds and 4 git commands to generate.