Remove handle field from ring buffer context
[lttng-ust.git] / libringbuffer / backend_internal.h
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer backend (internal helpers).
7 */
8
9 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
10 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
11
12 #include <stddef.h>
13 #include <stdint.h>
14 #include <unistd.h>
15 #include <urcu/compiler.h>
16
17 #include <lttng/ringbuffer-context.h>
18 #include "ringbuffer-config.h"
19 #include "backend_types.h"
20 #include "frontend_types.h"
21 #include "shm.h"
22
23 /* Ring buffer backend API presented to the frontend */
24
25 /* Ring buffer and channel backend create/free */
26
27 __attribute__((visibility("hidden")))
28 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
29 struct channel_backend *chan,
30 int cpu,
31 struct lttng_ust_shm_handle *handle,
32 struct shm_object *shmobj);
33
34 __attribute__((visibility("hidden")))
35 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
36
37 __attribute__((visibility("hidden")))
38 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
39
40 __attribute__((visibility("hidden")))
41 int channel_backend_init(struct channel_backend *chanb,
42 const char *name,
43 const struct lttng_ust_lib_ring_buffer_config *config,
44 size_t subbuf_size,
45 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
46 const int *stream_fds);
47
48 __attribute__((visibility("hidden")))
49 void channel_backend_free(struct channel_backend *chanb,
50 struct lttng_ust_shm_handle *handle);
51
52 __attribute__((visibility("hidden")))
53 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
54 struct lttng_ust_shm_handle *handle);
55
56 __attribute__((visibility("hidden")))
57 void channel_backend_reset(struct channel_backend *chanb);
58
59 __attribute__((visibility("hidden")))
60 int lib_ring_buffer_backend_init(void);
61
62 __attribute__((visibility("hidden")))
63 void lib_ring_buffer_backend_exit(void);
64
65 __attribute__((visibility("hidden")))
66 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
67 size_t offset, const void *src, size_t len,
68 ssize_t pagecpy);
69
70 /*
71 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
72 * exchanged atomically.
73 *
74 * Top half word, except lowest bit, belongs to "offset", which is used to keep
75 * to count the produced buffers. For overwrite mode, this provides the
76 * consumer with the capacity to read subbuffers in order, handling the
77 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
78 * systems) concurrently with a single execution of get_subbuf (between offset
79 * sampling and subbuffer ID exchange).
80 */
81
82 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
83
84 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
85 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
86 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
87 /*
88 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
89 */
90 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
91 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
92 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
93 /*
94 * In overwrite mode: lowest half of word is used for index.
95 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
96 * In producer-consumer mode: whole word used for index.
97 */
98 #define SB_ID_INDEX_SHIFT 0
99 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
100 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
101
102 /*
103 * Construct the subbuffer id from offset, index and noref. Use only the index
104 * for producer-consumer mode (offset and noref are only used in overwrite
105 * mode).
106 */
107 static inline
108 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
109 unsigned long offset, unsigned long noref,
110 unsigned long index)
111 {
112 if (config->mode == RING_BUFFER_OVERWRITE)
113 return (offset << SB_ID_OFFSET_SHIFT)
114 | (noref << SB_ID_NOREF_SHIFT)
115 | index;
116 else
117 return index;
118 }
119
120 /*
121 * Compare offset with the offset contained within id. Return 1 if the offset
122 * bits are identical, else 0.
123 */
124 static inline
125 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
126 unsigned long id, unsigned long offset)
127 {
128 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
129 }
130
131 static inline
132 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
133 unsigned long id)
134 {
135 if (config->mode == RING_BUFFER_OVERWRITE)
136 return id & SB_ID_INDEX_MASK;
137 else
138 return id;
139 }
140
141 static inline
142 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
143 unsigned long id)
144 {
145 if (config->mode == RING_BUFFER_OVERWRITE)
146 return !!(id & SB_ID_NOREF_MASK);
147 else
148 return 1;
149 }
150
151 /*
152 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
153 * needed.
154 */
155 static inline
156 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
157 unsigned long *id)
158 {
159 if (config->mode == RING_BUFFER_OVERWRITE)
160 *id |= SB_ID_NOREF_MASK;
161 }
162
163 static inline
164 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
165 unsigned long *id, unsigned long offset)
166 {
167 unsigned long tmp;
168
169 if (config->mode == RING_BUFFER_OVERWRITE) {
170 tmp = *id;
171 tmp &= ~SB_ID_OFFSET_MASK;
172 tmp |= offset << SB_ID_OFFSET_SHIFT;
173 tmp |= SB_ID_NOREF_MASK;
174 /* Volatile store, read concurrently by readers. */
175 CMM_ACCESS_ONCE(*id) = tmp;
176 }
177 }
178
179 /* No volatile access, since already used locally */
180 static inline
181 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
182 unsigned long *id)
183 {
184 if (config->mode == RING_BUFFER_OVERWRITE)
185 *id &= ~SB_ID_NOREF_MASK;
186 }
187
188 /*
189 * For overwrite mode, cap the number of subbuffers per buffer to:
190 * 2^16 on 32-bit architectures
191 * 2^32 on 64-bit architectures
192 * This is required to fit in the index part of the ID. Return 0 on success,
193 * -EPERM on failure.
194 */
195 static inline
196 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
197 unsigned long num_subbuf)
198 {
199 if (config->mode == RING_BUFFER_OVERWRITE)
200 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
201 else
202 return 0;
203 }
204
205 static inline
206 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
207 struct lttng_ust_lib_ring_buffer_ctx *ctx,
208 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
209 {
210 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
211 struct channel_backend *chanb = &ctx->chan->backend;
212 struct lttng_ust_shm_handle *handle = ctx->chan->handle;
213 size_t sbidx;
214 size_t offset = ctx->buf_offset;
215 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
216 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
217 unsigned long sb_bindex, id;
218 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
219
220 offset &= chanb->buf_size - 1;
221 sbidx = offset >> chanb->subbuf_size_order;
222 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
223 if (caa_unlikely(!wsb))
224 return -1;
225 id = wsb->id;
226 sb_bindex = subbuffer_id_get_index(config, id);
227 rpages = shmp_index(handle, bufb->array, sb_bindex);
228 if (caa_unlikely(!rpages))
229 return -1;
230 CHAN_WARN_ON(ctx->chan,
231 config->mode == RING_BUFFER_OVERWRITE
232 && subbuffer_id_is_noref(config, id));
233 _backend_pages = shmp(handle, rpages->shmp);
234 if (caa_unlikely(!_backend_pages))
235 return -1;
236 *backend_pages = _backend_pages;
237 return 0;
238 }
239
240 /* Get backend pages from cache. */
241 static inline
242 struct lttng_ust_lib_ring_buffer_backend_pages *
243 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
244 struct lttng_ust_lib_ring_buffer_ctx *ctx)
245 {
246 return ctx->backend_pages;
247 }
248
249 /*
250 * The ring buffer can count events recorded and overwritten per buffer,
251 * but it is disabled by default due to its performance overhead.
252 */
253 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
254 static inline
255 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
256 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
257 struct lttng_ust_lib_ring_buffer_backend *bufb,
258 unsigned long idx, struct lttng_ust_shm_handle *handle)
259 {
260 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
261
262 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
263 if (caa_unlikely(!backend_pages)) {
264 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
265 return;
266 }
267 v_inc(config, &backend_pages->records_commit);
268 }
269 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
270 static inline
271 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
272 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
273 struct lttng_ust_lib_ring_buffer_backend *bufb,
274 unsigned long idx, struct lttng_ust_shm_handle *handle)
275 {
276 }
277 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
278
279 /*
280 * Reader has exclusive subbuffer access for record consumption. No need to
281 * perform the decrement atomically.
282 */
283 static inline
284 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
285 struct lttng_ust_lib_ring_buffer_backend *bufb,
286 struct lttng_ust_shm_handle *handle)
287 {
288 unsigned long sb_bindex;
289 struct lttng_ust_lib_ring_buffer_channel *chan;
290 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
291 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
292
293 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
294 chan = shmp(handle, bufb->chan);
295 if (!chan)
296 return;
297 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
298 if (!pages_shmp)
299 return;
300 backend_pages = shmp(handle, pages_shmp->shmp);
301 if (!backend_pages)
302 return;
303 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
304 /* Non-atomic decrement protected by exclusive subbuffer access */
305 _v_dec(config, &backend_pages->records_unread);
306 v_inc(config, &bufb->records_read);
307 }
308
309 static inline
310 unsigned long subbuffer_get_records_count(
311 const struct lttng_ust_lib_ring_buffer_config *config,
312 struct lttng_ust_lib_ring_buffer_backend *bufb,
313 unsigned long idx,
314 struct lttng_ust_shm_handle *handle)
315 {
316 unsigned long sb_bindex;
317 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
318 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
319 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
320
321 wsb = shmp_index(handle, bufb->buf_wsb, idx);
322 if (!wsb)
323 return 0;
324 sb_bindex = subbuffer_id_get_index(config, wsb->id);
325 rpages = shmp_index(handle, bufb->array, sb_bindex);
326 if (!rpages)
327 return 0;
328 backend_pages = shmp(handle, rpages->shmp);
329 if (!backend_pages)
330 return 0;
331 return v_read(config, &backend_pages->records_commit);
332 }
333
334 /*
335 * Must be executed at subbuffer delivery when the writer has _exclusive_
336 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
337 * lib_ring_buffer_get_records_count() must be called to get the records
338 * count before this function, because it resets the records_commit
339 * count.
340 */
341 static inline
342 unsigned long subbuffer_count_records_overrun(
343 const struct lttng_ust_lib_ring_buffer_config *config,
344 struct lttng_ust_lib_ring_buffer_backend *bufb,
345 unsigned long idx,
346 struct lttng_ust_shm_handle *handle)
347 {
348 unsigned long overruns, sb_bindex;
349 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
350 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
351 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
352
353 wsb = shmp_index(handle, bufb->buf_wsb, idx);
354 if (!wsb)
355 return 0;
356 sb_bindex = subbuffer_id_get_index(config, wsb->id);
357 rpages = shmp_index(handle, bufb->array, sb_bindex);
358 if (!rpages)
359 return 0;
360 backend_pages = shmp(handle, rpages->shmp);
361 if (!backend_pages)
362 return 0;
363 overruns = v_read(config, &backend_pages->records_unread);
364 v_set(config, &backend_pages->records_unread,
365 v_read(config, &backend_pages->records_commit));
366 v_set(config, &backend_pages->records_commit, 0);
367
368 return overruns;
369 }
370
371 static inline
372 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
373 struct lttng_ust_lib_ring_buffer_backend *bufb,
374 unsigned long idx,
375 unsigned long data_size,
376 struct lttng_ust_shm_handle *handle)
377 {
378 unsigned long sb_bindex;
379 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
380 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
381 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
382
383 wsb = shmp_index(handle, bufb->buf_wsb, idx);
384 if (!wsb)
385 return;
386 sb_bindex = subbuffer_id_get_index(config, wsb->id);
387 rpages = shmp_index(handle, bufb->array, sb_bindex);
388 if (!rpages)
389 return;
390 backend_pages = shmp(handle, rpages->shmp);
391 if (!backend_pages)
392 return;
393 backend_pages->data_size = data_size;
394 }
395
396 static inline
397 unsigned long subbuffer_get_read_data_size(
398 const struct lttng_ust_lib_ring_buffer_config *config,
399 struct lttng_ust_lib_ring_buffer_backend *bufb,
400 struct lttng_ust_shm_handle *handle)
401 {
402 unsigned long sb_bindex;
403 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
404 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
405
406 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
407 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
408 if (!pages_shmp)
409 return 0;
410 backend_pages = shmp(handle, pages_shmp->shmp);
411 if (!backend_pages)
412 return 0;
413 return backend_pages->data_size;
414 }
415
416 static inline
417 unsigned long subbuffer_get_data_size(
418 const struct lttng_ust_lib_ring_buffer_config *config,
419 struct lttng_ust_lib_ring_buffer_backend *bufb,
420 unsigned long idx,
421 struct lttng_ust_shm_handle *handle)
422 {
423 unsigned long sb_bindex;
424 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
425 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
426 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
427
428 wsb = shmp_index(handle, bufb->buf_wsb, idx);
429 if (!wsb)
430 return 0;
431 sb_bindex = subbuffer_id_get_index(config, wsb->id);
432 rpages = shmp_index(handle, bufb->array, sb_bindex);
433 if (!rpages)
434 return 0;
435 backend_pages = shmp(handle, rpages->shmp);
436 if (!backend_pages)
437 return 0;
438 return backend_pages->data_size;
439 }
440
441 static inline
442 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
443 struct lttng_ust_lib_ring_buffer_backend *bufb,
444 unsigned long idx, struct lttng_ust_shm_handle *handle)
445 {
446 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
447
448 counts = shmp_index(handle, bufb->buf_cnt, idx);
449 if (!counts)
450 return;
451 counts->seq_cnt++;
452 }
453
454 /**
455 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
456 * writer.
457 */
458 static inline
459 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
460 struct lttng_ust_lib_ring_buffer_backend *bufb,
461 unsigned long idx,
462 struct lttng_ust_shm_handle *handle)
463 {
464 unsigned long id, new_id;
465 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
466
467 if (config->mode != RING_BUFFER_OVERWRITE)
468 return;
469
470 /*
471 * Performing a volatile access to read the sb_pages, because we want to
472 * read a coherent version of the pointer and the associated noref flag.
473 */
474 wsb = shmp_index(handle, bufb->buf_wsb, idx);
475 if (!wsb)
476 return;
477 id = CMM_ACCESS_ONCE(wsb->id);
478 for (;;) {
479 /* This check is called on the fast path for each record. */
480 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
481 /*
482 * Store after load dependency ordering the writes to
483 * the subbuffer after load and test of the noref flag
484 * matches the memory barrier implied by the cmpxchg()
485 * in update_read_sb_index().
486 */
487 return; /* Already writing to this buffer */
488 }
489 new_id = id;
490 subbuffer_id_clear_noref(config, &new_id);
491 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
492 if (caa_likely(new_id == id))
493 break;
494 id = new_id;
495 }
496 }
497
498 /**
499 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
500 * called by writer.
501 */
502 static inline
503 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
504 struct lttng_ust_lib_ring_buffer_backend *bufb,
505 unsigned long idx, unsigned long offset,
506 struct lttng_ust_shm_handle *handle)
507 {
508 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
509 struct lttng_ust_lib_ring_buffer_channel *chan;
510
511 if (config->mode != RING_BUFFER_OVERWRITE)
512 return;
513
514 wsb = shmp_index(handle, bufb->buf_wsb, idx);
515 if (!wsb)
516 return;
517 /*
518 * Because ring_buffer_set_noref() is only called by a single thread
519 * (the one which updated the cc_sb value), there are no concurrent
520 * updates to take care of: other writers have not updated cc_sb, so
521 * they cannot set the noref flag, and concurrent readers cannot modify
522 * the pointer because the noref flag is not set yet.
523 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
524 * to the subbuffer before this set noref operation.
525 * subbuffer_set_noref() uses a volatile store to deal with concurrent
526 * readers of the noref flag.
527 */
528 chan = shmp(handle, bufb->chan);
529 if (!chan)
530 return;
531 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
532 /*
533 * Memory barrier that ensures counter stores are ordered before set
534 * noref and offset.
535 */
536 cmm_smp_mb();
537 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
538 }
539
540 /**
541 * update_read_sb_index - Read-side subbuffer index update.
542 */
543 static inline
544 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
545 struct lttng_ust_lib_ring_buffer_backend *bufb,
546 struct channel_backend *chanb,
547 unsigned long consumed_idx,
548 unsigned long consumed_count,
549 struct lttng_ust_shm_handle *handle)
550 {
551 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
552 unsigned long old_id, new_id;
553
554 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
555 if (caa_unlikely(!wsb))
556 return -EPERM;
557
558 if (config->mode == RING_BUFFER_OVERWRITE) {
559 struct lttng_ust_lib_ring_buffer_channel *chan;
560
561 /*
562 * Exchange the target writer subbuffer with our own unused
563 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
564 * old_wpage, because the value read will be confirmed by the
565 * following cmpxchg().
566 */
567 old_id = wsb->id;
568 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
569 return -EAGAIN;
570 /*
571 * Make sure the offset count we are expecting matches the one
572 * indicated by the writer.
573 */
574 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
575 consumed_count)))
576 return -EAGAIN;
577 chan = shmp(handle, bufb->chan);
578 if (caa_unlikely(!chan))
579 return -EPERM;
580 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
581 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
582 consumed_count);
583 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
584 if (caa_unlikely(old_id != new_id))
585 return -EAGAIN;
586 bufb->buf_rsb.id = new_id;
587 } else {
588 /* No page exchange, use the writer page directly */
589 bufb->buf_rsb.id = wsb->id;
590 }
591 return 0;
592 }
593
594 #ifndef inline_memcpy
595 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
596 #endif
597
598 static inline __attribute__((always_inline))
599 void lttng_inline_memcpy(void *dest, const void *src,
600 unsigned long len)
601 {
602 switch (len) {
603 case 1:
604 *(uint8_t *) dest = *(const uint8_t *) src;
605 break;
606 case 2:
607 *(uint16_t *) dest = *(const uint16_t *) src;
608 break;
609 case 4:
610 *(uint32_t *) dest = *(const uint32_t *) src;
611 break;
612 case 8:
613 *(uint64_t *) dest = *(const uint64_t *) src;
614 break;
615 default:
616 inline_memcpy(dest, src, len);
617 }
618 }
619
620 /*
621 * Use the architecture-specific memcpy implementation for constant-sized
622 * inputs, but rely on an inline memcpy for length statically unknown.
623 * The function call to memcpy is just way too expensive for a fast path.
624 */
625 #define lib_ring_buffer_do_copy(config, dest, src, len) \
626 do { \
627 size_t __len = (len); \
628 if (__builtin_constant_p(len)) \
629 memcpy(dest, src, __len); \
630 else \
631 lttng_inline_memcpy(dest, src, __len); \
632 } while (0)
633
634 /*
635 * write len bytes to dest with c
636 */
637 static inline
638 void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len)
639 {
640 unsigned long i;
641
642 for (i = 0; i < len; i++)
643 dest[i] = c;
644 }
645
646 /* arch-agnostic implementation */
647
648 static inline int lttng_ust_fls(unsigned int x)
649 {
650 int r = 32;
651
652 if (!x)
653 return 0;
654 if (!(x & 0xFFFF0000U)) {
655 x <<= 16;
656 r -= 16;
657 }
658 if (!(x & 0xFF000000U)) {
659 x <<= 8;
660 r -= 8;
661 }
662 if (!(x & 0xF0000000U)) {
663 x <<= 4;
664 r -= 4;
665 }
666 if (!(x & 0xC0000000U)) {
667 x <<= 2;
668 r -= 2;
669 }
670 if (!(x & 0x80000000U)) {
671 /* No need to bit shift on last operation */
672 r -= 1;
673 }
674 return r;
675 }
676
677 static inline int get_count_order(unsigned int count)
678 {
679 int order;
680
681 order = lttng_ust_fls(count) - 1;
682 if (count & (count - 1))
683 order++;
684 return order;
685 }
686
687 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.045288 seconds and 5 git commands to generate.