Cleanup: Move lib/ringbuffer/ headers to include/ringbuffer/
[lttng-modules.git] / include / ringbuffer / backend_internal.h
1 /* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
2 *
3 * ringbuffer/backend_internal.h
4 *
5 * Ring buffer backend (internal helpers).
6 *
7 * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #ifndef _LIB_RING_BUFFER_BACKEND_INTERNAL_H
11 #define _LIB_RING_BUFFER_BACKEND_INTERNAL_H
12
13 #include <wrapper/compiler.h>
14 #include <wrapper/inline_memcpy.h>
15 #include <ringbuffer/config.h>
16 #include <ringbuffer/backend_types.h>
17 #include <ringbuffer/frontend_types.h>
18 #include <linux/string.h>
19 #include <linux/uaccess.h>
20
21 /* Ring buffer backend API presented to the frontend */
22
23 /* Ring buffer and channel backend create/free */
24
25 int lib_ring_buffer_backend_create(struct lib_ring_buffer_backend *bufb,
26 struct channel_backend *chan, int cpu);
27 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
28 void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb);
29 int channel_backend_init(struct channel_backend *chanb,
30 const char *name,
31 const struct lib_ring_buffer_config *config,
32 void *priv, size_t subbuf_size,
33 size_t num_subbuf);
34 void channel_backend_free(struct channel_backend *chanb);
35
36 void lib_ring_buffer_backend_reset(struct lib_ring_buffer_backend *bufb);
37 void channel_backend_reset(struct channel_backend *chanb);
38
39 int lib_ring_buffer_backend_init(void);
40 void lib_ring_buffer_backend_exit(void);
41
42 extern void _lib_ring_buffer_write(struct lib_ring_buffer_backend *bufb,
43 size_t offset, const void *src, size_t len,
44 size_t pagecpy);
45 extern void _lib_ring_buffer_memset(struct lib_ring_buffer_backend *bufb,
46 size_t offset, int c, size_t len,
47 size_t pagecpy);
48 extern void _lib_ring_buffer_strcpy(struct lib_ring_buffer_backend *bufb,
49 size_t offset, const char *src, size_t len,
50 size_t pagecpy, int pad);
51 extern void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
52 size_t offset, const void *src,
53 size_t len, size_t pagecpy);
54 extern void _lib_ring_buffer_strcpy_from_user_inatomic(struct lib_ring_buffer_backend *bufb,
55 size_t offset, const char __user *src, size_t len,
56 size_t pagecpy, int pad);
57
58 /*
59 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
60 * exchanged atomically.
61 *
62 * Top half word, except lowest bit, belongs to "offset", which is used to keep
63 * to count the produced buffers. For overwrite mode, this provides the
64 * consumer with the capacity to read subbuffers in order, handling the
65 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
66 * systems) concurrently with a single execution of get_subbuf (between offset
67 * sampling and subbuffer ID exchange).
68 */
69
70 #define HALF_ULONG_BITS (BITS_PER_LONG >> 1)
71
72 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
73 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
74 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
75 /*
76 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
77 */
78 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
79 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
80 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
81 /*
82 * In overwrite mode: lowest half of word is used for index.
83 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
84 * In producer-consumer mode: whole word used for index.
85 */
86 #define SB_ID_INDEX_SHIFT 0
87 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
88 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
89
90 /*
91 * Construct the subbuffer id from offset, index and noref. Use only the index
92 * for producer-consumer mode (offset and noref are only used in overwrite
93 * mode).
94 */
95 static inline
96 unsigned long subbuffer_id(const struct lib_ring_buffer_config *config,
97 unsigned long offset, unsigned long noref,
98 unsigned long index)
99 {
100 if (config->mode == RING_BUFFER_OVERWRITE)
101 return (offset << SB_ID_OFFSET_SHIFT)
102 | (noref << SB_ID_NOREF_SHIFT)
103 | index;
104 else
105 return index;
106 }
107
108 /*
109 * Compare offset with the offset contained within id. Return 1 if the offset
110 * bits are identical, else 0.
111 */
112 static inline
113 int subbuffer_id_compare_offset(const struct lib_ring_buffer_config *config,
114 unsigned long id, unsigned long offset)
115 {
116 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
117 }
118
119 static inline
120 unsigned long subbuffer_id_get_index(const struct lib_ring_buffer_config *config,
121 unsigned long id)
122 {
123 if (config->mode == RING_BUFFER_OVERWRITE)
124 return id & SB_ID_INDEX_MASK;
125 else
126 return id;
127 }
128
129 static inline
130 unsigned long subbuffer_id_is_noref(const struct lib_ring_buffer_config *config,
131 unsigned long id)
132 {
133 if (config->mode == RING_BUFFER_OVERWRITE)
134 return !!(id & SB_ID_NOREF_MASK);
135 else
136 return 1;
137 }
138
139 /*
140 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
141 * needed.
142 */
143 static inline
144 void subbuffer_id_set_noref(const struct lib_ring_buffer_config *config,
145 unsigned long *id)
146 {
147 if (config->mode == RING_BUFFER_OVERWRITE)
148 *id |= SB_ID_NOREF_MASK;
149 }
150
151 static inline
152 void subbuffer_id_set_noref_offset(const struct lib_ring_buffer_config *config,
153 unsigned long *id, unsigned long offset)
154 {
155 unsigned long tmp;
156
157 if (config->mode == RING_BUFFER_OVERWRITE) {
158 tmp = *id;
159 tmp &= ~SB_ID_OFFSET_MASK;
160 tmp |= offset << SB_ID_OFFSET_SHIFT;
161 tmp |= SB_ID_NOREF_MASK;
162 /* Volatile store, read concurrently by readers. */
163 WRITE_ONCE(*id, tmp);
164 }
165 }
166
167 /* No volatile access, since already used locally */
168 static inline
169 void subbuffer_id_clear_noref(const struct lib_ring_buffer_config *config,
170 unsigned long *id)
171 {
172 if (config->mode == RING_BUFFER_OVERWRITE)
173 *id &= ~SB_ID_NOREF_MASK;
174 }
175
176 /*
177 * For overwrite mode, cap the number of subbuffers per buffer to:
178 * 2^16 on 32-bit architectures
179 * 2^32 on 64-bit architectures
180 * This is required to fit in the index part of the ID. Return 0 on success,
181 * -EPERM on failure.
182 */
183 static inline
184 int subbuffer_id_check_index(const struct lib_ring_buffer_config *config,
185 unsigned long num_subbuf)
186 {
187 if (config->mode == RING_BUFFER_OVERWRITE)
188 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
189 else
190 return 0;
191 }
192
193 static inline
194 void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *config,
195 struct lib_ring_buffer_ctx *ctx,
196 struct lib_ring_buffer_backend_pages **backend_pages)
197 {
198 struct lib_ring_buffer_backend *bufb = &ctx->buf->backend;
199 struct channel_backend *chanb = &ctx->chan->backend;
200 size_t sbidx, offset = ctx->buf_offset;
201 unsigned long sb_bindex, id;
202 struct lib_ring_buffer_backend_pages *rpages;
203
204 offset &= chanb->buf_size - 1;
205 sbidx = offset >> chanb->subbuf_size_order;
206 id = bufb->buf_wsb[sbidx].id;
207 sb_bindex = subbuffer_id_get_index(config, id);
208 rpages = bufb->array[sb_bindex];
209 CHAN_WARN_ON(ctx->chan,
210 config->mode == RING_BUFFER_OVERWRITE
211 && subbuffer_id_is_noref(config, id));
212 *backend_pages = rpages;
213 }
214
215 /* Get backend pages from cache. */
216 static inline
217 struct lib_ring_buffer_backend_pages *
218 lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config,
219 struct lib_ring_buffer_ctx *ctx)
220 {
221 return ctx->backend_pages;
222 }
223
224 /*
225 * The ring buffer can count events recorded and overwritten per buffer,
226 * but it is disabled by default due to its performance overhead.
227 */
228 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
229 static inline
230 void subbuffer_count_record(const struct lib_ring_buffer_config *config,
231 struct lib_ring_buffer_backend *bufb,
232 unsigned long idx)
233 {
234 unsigned long sb_bindex;
235
236 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
237 v_inc(config, &bufb->array[sb_bindex]->records_commit);
238 }
239 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
240 static inline
241 void subbuffer_count_record(const struct lib_ring_buffer_config *config,
242 struct lib_ring_buffer_backend *bufb,
243 unsigned long idx)
244 {
245 }
246 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
247
248 /*
249 * Reader has exclusive subbuffer access for record consumption. No need to
250 * perform the decrement atomically.
251 */
252 static inline
253 void subbuffer_consume_record(const struct lib_ring_buffer_config *config,
254 struct lib_ring_buffer_backend *bufb)
255 {
256 unsigned long sb_bindex;
257
258 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
259 CHAN_WARN_ON(bufb->chan,
260 !v_read(config, &bufb->array[sb_bindex]->records_unread));
261 /* Non-atomic decrement protected by exclusive subbuffer access */
262 _v_dec(config, &bufb->array[sb_bindex]->records_unread);
263 v_inc(config, &bufb->records_read);
264 }
265
266 static inline
267 unsigned long subbuffer_get_records_count(
268 const struct lib_ring_buffer_config *config,
269 struct lib_ring_buffer_backend *bufb,
270 unsigned long idx)
271 {
272 unsigned long sb_bindex;
273
274 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
275 return v_read(config, &bufb->array[sb_bindex]->records_commit);
276 }
277
278 /*
279 * Must be executed at subbuffer delivery when the writer has _exclusive_
280 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
281 * lib_ring_buffer_get_records_count() must be called to get the records
282 * count before this function, because it resets the records_commit
283 * count.
284 */
285 static inline
286 unsigned long subbuffer_count_records_overrun(
287 const struct lib_ring_buffer_config *config,
288 struct lib_ring_buffer_backend *bufb,
289 unsigned long idx)
290 {
291 struct lib_ring_buffer_backend_pages *pages;
292 unsigned long overruns, sb_bindex;
293
294 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
295 pages = bufb->array[sb_bindex];
296 overruns = v_read(config, &pages->records_unread);
297 v_set(config, &pages->records_unread,
298 v_read(config, &pages->records_commit));
299 v_set(config, &pages->records_commit, 0);
300
301 return overruns;
302 }
303
304 static inline
305 void subbuffer_set_data_size(const struct lib_ring_buffer_config *config,
306 struct lib_ring_buffer_backend *bufb,
307 unsigned long idx,
308 unsigned long data_size)
309 {
310 struct lib_ring_buffer_backend_pages *pages;
311 unsigned long sb_bindex;
312
313 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
314 pages = bufb->array[sb_bindex];
315 pages->data_size = data_size;
316 }
317
318 static inline
319 unsigned long subbuffer_get_read_data_size(
320 const struct lib_ring_buffer_config *config,
321 struct lib_ring_buffer_backend *bufb)
322 {
323 struct lib_ring_buffer_backend_pages *pages;
324 unsigned long sb_bindex;
325
326 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
327 pages = bufb->array[sb_bindex];
328 return pages->data_size;
329 }
330
331 static inline
332 unsigned long subbuffer_get_data_size(
333 const struct lib_ring_buffer_config *config,
334 struct lib_ring_buffer_backend *bufb,
335 unsigned long idx)
336 {
337 struct lib_ring_buffer_backend_pages *pages;
338 unsigned long sb_bindex;
339
340 sb_bindex = subbuffer_id_get_index(config, bufb->buf_wsb[idx].id);
341 pages = bufb->array[sb_bindex];
342 return pages->data_size;
343 }
344
345 static inline
346 void subbuffer_inc_packet_count(const struct lib_ring_buffer_config *config,
347 struct lib_ring_buffer_backend *bufb,
348 unsigned long idx)
349 {
350 bufb->buf_cnt[idx].seq_cnt++;
351 }
352
353 /**
354 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
355 * writer.
356 */
357 static inline
358 void lib_ring_buffer_clear_noref(const struct lib_ring_buffer_config *config,
359 struct lib_ring_buffer_backend *bufb,
360 unsigned long idx)
361 {
362 unsigned long id, new_id;
363
364 if (config->mode != RING_BUFFER_OVERWRITE)
365 return;
366
367 /*
368 * Performing a volatile access to read the sb_pages, because we want to
369 * read a coherent version of the pointer and the associated noref flag.
370 */
371 id = READ_ONCE(bufb->buf_wsb[idx].id);
372 for (;;) {
373 /* This check is called on the fast path for each record. */
374 if (likely(!subbuffer_id_is_noref(config, id))) {
375 /*
376 * Store after load dependency ordering the writes to
377 * the subbuffer after load and test of the noref flag
378 * matches the memory barrier implied by the cmpxchg()
379 * in update_read_sb_index().
380 */
381 return; /* Already writing to this buffer */
382 }
383 new_id = id;
384 subbuffer_id_clear_noref(config, &new_id);
385 new_id = cmpxchg(&bufb->buf_wsb[idx].id, id, new_id);
386 if (likely(new_id == id))
387 break;
388 id = new_id;
389 }
390 }
391
392 /**
393 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
394 * called by writer.
395 */
396 static inline
397 void lib_ring_buffer_set_noref_offset(const struct lib_ring_buffer_config *config,
398 struct lib_ring_buffer_backend *bufb,
399 unsigned long idx, unsigned long offset)
400 {
401 if (config->mode != RING_BUFFER_OVERWRITE)
402 return;
403
404 /*
405 * Because ring_buffer_set_noref() is only called by a single thread
406 * (the one which updated the cc_sb value), there are no concurrent
407 * updates to take care of: other writers have not updated cc_sb, so
408 * they cannot set the noref flag, and concurrent readers cannot modify
409 * the pointer because the noref flag is not set yet.
410 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
411 * to the subbuffer before this set noref operation.
412 * subbuffer_set_noref() uses a volatile store to deal with concurrent
413 * readers of the noref flag.
414 */
415 CHAN_WARN_ON(bufb->chan,
416 subbuffer_id_is_noref(config, bufb->buf_wsb[idx].id));
417 /*
418 * Memory barrier that ensures counter stores are ordered before set
419 * noref and offset.
420 */
421 smp_mb();
422 subbuffer_id_set_noref_offset(config, &bufb->buf_wsb[idx].id, offset);
423 }
424
425 /**
426 * update_read_sb_index - Read-side subbuffer index update.
427 */
428 static inline
429 int update_read_sb_index(const struct lib_ring_buffer_config *config,
430 struct lib_ring_buffer_backend *bufb,
431 struct channel_backend *chanb,
432 unsigned long consumed_idx,
433 unsigned long consumed_count)
434 {
435 unsigned long old_id, new_id;
436
437 if (config->mode == RING_BUFFER_OVERWRITE) {
438 /*
439 * Exchange the target writer subbuffer with our own unused
440 * subbuffer. No need to use READ_ONCE() here to read the
441 * old_wpage, because the value read will be confirmed by the
442 * following cmpxchg().
443 */
444 old_id = bufb->buf_wsb[consumed_idx].id;
445 if (unlikely(!subbuffer_id_is_noref(config, old_id)))
446 return -EAGAIN;
447 /*
448 * Make sure the offset count we are expecting matches the one
449 * indicated by the writer.
450 */
451 if (unlikely(!subbuffer_id_compare_offset(config, old_id,
452 consumed_count)))
453 return -EAGAIN;
454 CHAN_WARN_ON(bufb->chan,
455 !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
456 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
457 consumed_count);
458 new_id = cmpxchg(&bufb->buf_wsb[consumed_idx].id, old_id,
459 bufb->buf_rsb.id);
460 if (unlikely(old_id != new_id))
461 return -EAGAIN;
462 bufb->buf_rsb.id = new_id;
463 } else {
464 /* No page exchange, use the writer page directly */
465 bufb->buf_rsb.id = bufb->buf_wsb[consumed_idx].id;
466 }
467 return 0;
468 }
469
470 static inline __attribute__((always_inline))
471 void lttng_inline_memcpy(void *dest, const void *src,
472 unsigned long len)
473 {
474 switch (len) {
475 case 1:
476 *(uint8_t *) dest = *(const uint8_t *) src;
477 break;
478 case 2:
479 *(uint16_t *) dest = *(const uint16_t *) src;
480 break;
481 case 4:
482 *(uint32_t *) dest = *(const uint32_t *) src;
483 break;
484 case 8:
485 *(uint64_t *) dest = *(const uint64_t *) src;
486 break;
487 default:
488 inline_memcpy(dest, src, len);
489 }
490 }
491
492 /*
493 * Use the architecture-specific memcpy implementation for constant-sized
494 * inputs, but rely on an inline memcpy for length statically unknown.
495 * The function call to memcpy is just way too expensive for a fast path.
496 */
497 #define lib_ring_buffer_do_copy(config, dest, src, len) \
498 do { \
499 size_t __len = (len); \
500 if (__builtin_constant_p(len)) \
501 memcpy(dest, src, __len); \
502 else \
503 lttng_inline_memcpy(dest, src, __len); \
504 } while (0)
505
506 /*
507 * We use __copy_from_user_inatomic to copy userspace data since we already
508 * did the access_ok for the whole range.
509 *
510 * Return 0 if OK, nonzero on error.
511 */
512 static inline
513 unsigned long lib_ring_buffer_do_copy_from_user_inatomic(void *dest,
514 const void __user *src,
515 unsigned long len)
516 {
517 return __copy_from_user_inatomic(dest, src, len);
518 }
519
520 /*
521 * write len bytes to dest with c
522 */
523 static inline
524 void lib_ring_buffer_do_memset(char *dest, int c,
525 unsigned long len)
526 {
527 unsigned long i;
528
529 for (i = 0; i < len; i++)
530 dest[i] = c;
531 }
532
533 #endif /* _LIB_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.039671 seconds and 4 git commands to generate.