Introduce LTTNG_UST_MAP_POPULATE_POLICY environment variable
[lttng-ust.git] / libringbuffer / backend_internal.h
1 #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
2 #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
3
4 /*
5 * libringbuffer/backend_internal.h
6 *
7 * Ring buffer backend (internal helpers).
8 *
9 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 */
25
26 #include <unistd.h>
27 #include <urcu/compiler.h>
28
29 #include <lttng/ringbuffer-config.h>
30 #include "backend_types.h"
31 #include "frontend_types.h"
32 #include "shm.h"
33
34 /* Ring buffer backend API presented to the frontend */
35
36 /* Ring buffer and channel backend create/free */
37
38 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
39 struct channel_backend *chan, int cpu,
40 struct lttng_ust_shm_handle *handle,
41 struct shm_object *shmobj);
42 void channel_backend_unregister_notifiers(struct channel_backend *chanb);
43 void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb);
44 int channel_backend_init(struct channel_backend *chanb,
45 const char *name,
46 const struct lttng_ust_lib_ring_buffer_config *config,
47 size_t subbuf_size,
48 size_t num_subbuf, struct lttng_ust_shm_handle *handle,
49 const int *stream_fds);
50 void channel_backend_free(struct channel_backend *chanb,
51 struct lttng_ust_shm_handle *handle);
52
53 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
54 struct lttng_ust_shm_handle *handle);
55 void channel_backend_reset(struct channel_backend *chanb);
56
57 int lib_ring_buffer_backend_init(void);
58 void lib_ring_buffer_backend_exit(void);
59
60 extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
61 size_t offset, const void *src, size_t len,
62 ssize_t pagecpy);
63
64 /*
65 * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
66 * exchanged atomically.
67 *
68 * Top half word, except lowest bit, belongs to "offset", which is used to keep
69 * to count the produced buffers. For overwrite mode, this provides the
70 * consumer with the capacity to read subbuffers in order, handling the
71 * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
72 * systems) concurrently with a single execution of get_subbuf (between offset
73 * sampling and subbuffer ID exchange).
74 */
75
76 #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
77
78 #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
79 #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
80 #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
81 /*
82 * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
83 */
84 #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
85 #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
86 #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
87 /*
88 * In overwrite mode: lowest half of word is used for index.
89 * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
90 * In producer-consumer mode: whole word used for index.
91 */
92 #define SB_ID_INDEX_SHIFT 0
93 #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
94 #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
95
96 /*
97 * Construct the subbuffer id from offset, index and noref. Use only the index
98 * for producer-consumer mode (offset and noref are only used in overwrite
99 * mode).
100 */
101 static inline
102 unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
103 unsigned long offset, unsigned long noref,
104 unsigned long index)
105 {
106 if (config->mode == RING_BUFFER_OVERWRITE)
107 return (offset << SB_ID_OFFSET_SHIFT)
108 | (noref << SB_ID_NOREF_SHIFT)
109 | index;
110 else
111 return index;
112 }
113
114 /*
115 * Compare offset with the offset contained within id. Return 1 if the offset
116 * bits are identical, else 0.
117 */
118 static inline
119 int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config,
120 unsigned long id, unsigned long offset)
121 {
122 return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
123 }
124
125 static inline
126 unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
127 unsigned long id)
128 {
129 if (config->mode == RING_BUFFER_OVERWRITE)
130 return id & SB_ID_INDEX_MASK;
131 else
132 return id;
133 }
134
135 static inline
136 unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
137 unsigned long id)
138 {
139 if (config->mode == RING_BUFFER_OVERWRITE)
140 return !!(id & SB_ID_NOREF_MASK);
141 else
142 return 1;
143 }
144
145 /*
146 * Only used by reader on subbuffer ID it has exclusive access to. No volatile
147 * needed.
148 */
149 static inline
150 void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
151 unsigned long *id)
152 {
153 if (config->mode == RING_BUFFER_OVERWRITE)
154 *id |= SB_ID_NOREF_MASK;
155 }
156
157 static inline
158 void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
159 unsigned long *id, unsigned long offset)
160 {
161 unsigned long tmp;
162
163 if (config->mode == RING_BUFFER_OVERWRITE) {
164 tmp = *id;
165 tmp &= ~SB_ID_OFFSET_MASK;
166 tmp |= offset << SB_ID_OFFSET_SHIFT;
167 tmp |= SB_ID_NOREF_MASK;
168 /* Volatile store, read concurrently by readers. */
169 CMM_ACCESS_ONCE(*id) = tmp;
170 }
171 }
172
173 /* No volatile access, since already used locally */
174 static inline
175 void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
176 unsigned long *id)
177 {
178 if (config->mode == RING_BUFFER_OVERWRITE)
179 *id &= ~SB_ID_NOREF_MASK;
180 }
181
182 /*
183 * For overwrite mode, cap the number of subbuffers per buffer to:
184 * 2^16 on 32-bit architectures
185 * 2^32 on 64-bit architectures
186 * This is required to fit in the index part of the ID. Return 0 on success,
187 * -EPERM on failure.
188 */
189 static inline
190 int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
191 unsigned long num_subbuf)
192 {
193 if (config->mode == RING_BUFFER_OVERWRITE)
194 return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
195 else
196 return 0;
197 }
198
199 static inline
200 int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
201 struct lttng_ust_lib_ring_buffer_ctx *ctx,
202 struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
203 {
204 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
205 struct channel_backend *chanb = &ctx->chan->backend;
206 struct lttng_ust_shm_handle *handle = ctx->handle;
207 size_t sbidx;
208 size_t offset = ctx->buf_offset;
209 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
210 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
211 unsigned long sb_bindex, id;
212 struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
213
214 offset &= chanb->buf_size - 1;
215 sbidx = offset >> chanb->subbuf_size_order;
216 wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
217 if (caa_unlikely(!wsb))
218 return -1;
219 id = wsb->id;
220 sb_bindex = subbuffer_id_get_index(config, id);
221 rpages = shmp_index(handle, bufb->array, sb_bindex);
222 if (caa_unlikely(!rpages))
223 return -1;
224 CHAN_WARN_ON(ctx->chan,
225 config->mode == RING_BUFFER_OVERWRITE
226 && subbuffer_id_is_noref(config, id));
227 _backend_pages = shmp(handle, rpages->shmp);
228 if (caa_unlikely(!_backend_pages))
229 return -1;
230 *backend_pages = _backend_pages;
231 return 0;
232 }
233
234 /* Get backend pages from cache. */
235 static inline
236 struct lttng_ust_lib_ring_buffer_backend_pages *
237 lib_ring_buffer_get_backend_pages_from_ctx(const struct lttng_ust_lib_ring_buffer_config *config,
238 struct lttng_ust_lib_ring_buffer_ctx *ctx)
239 {
240 if (caa_unlikely(ctx->ctx_len
241 < sizeof(struct lttng_ust_lib_ring_buffer_ctx)))
242 return NULL;
243 return ctx->backend_pages;
244 }
245
246 /*
247 * The ring buffer can count events recorded and overwritten per buffer,
248 * but it is disabled by default due to its performance overhead.
249 */
250 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
251 static inline
252 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
253 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
254 struct lttng_ust_lib_ring_buffer_backend *bufb,
255 unsigned long idx, struct lttng_ust_shm_handle *handle)
256 {
257 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
258
259 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
260 if (caa_unlikely(!backend_pages)) {
261 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
262 return;
263 }
264 v_inc(config, &backend_pages->records_commit);
265 }
266 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
267 static inline
268 void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
269 const struct lttng_ust_lib_ring_buffer_ctx *ctx,
270 struct lttng_ust_lib_ring_buffer_backend *bufb,
271 unsigned long idx, struct lttng_ust_shm_handle *handle)
272 {
273 }
274 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
275
276 /*
277 * Reader has exclusive subbuffer access for record consumption. No need to
278 * perform the decrement atomically.
279 */
280 static inline
281 void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
282 struct lttng_ust_lib_ring_buffer_backend *bufb,
283 struct lttng_ust_shm_handle *handle)
284 {
285 unsigned long sb_bindex;
286 struct channel *chan;
287 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
288 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
289
290 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
291 chan = shmp(handle, bufb->chan);
292 if (!chan)
293 return;
294 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
295 if (!pages_shmp)
296 return;
297 backend_pages = shmp(handle, pages_shmp->shmp);
298 if (!backend_pages)
299 return;
300 CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
301 /* Non-atomic decrement protected by exclusive subbuffer access */
302 _v_dec(config, &backend_pages->records_unread);
303 v_inc(config, &bufb->records_read);
304 }
305
306 static inline
307 unsigned long subbuffer_get_records_count(
308 const struct lttng_ust_lib_ring_buffer_config *config,
309 struct lttng_ust_lib_ring_buffer_backend *bufb,
310 unsigned long idx,
311 struct lttng_ust_shm_handle *handle)
312 {
313 unsigned long sb_bindex;
314 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
315 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
316 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
317
318 wsb = shmp_index(handle, bufb->buf_wsb, idx);
319 if (!wsb)
320 return 0;
321 sb_bindex = subbuffer_id_get_index(config, wsb->id);
322 rpages = shmp_index(handle, bufb->array, sb_bindex);
323 if (!rpages)
324 return 0;
325 backend_pages = shmp(handle, rpages->shmp);
326 if (!backend_pages)
327 return 0;
328 return v_read(config, &backend_pages->records_commit);
329 }
330
331 /*
332 * Must be executed at subbuffer delivery when the writer has _exclusive_
333 * subbuffer access. See lib_ring_buffer_check_deliver() for details.
334 * lib_ring_buffer_get_records_count() must be called to get the records
335 * count before this function, because it resets the records_commit
336 * count.
337 */
338 static inline
339 unsigned long subbuffer_count_records_overrun(
340 const struct lttng_ust_lib_ring_buffer_config *config,
341 struct lttng_ust_lib_ring_buffer_backend *bufb,
342 unsigned long idx,
343 struct lttng_ust_shm_handle *handle)
344 {
345 unsigned long overruns, sb_bindex;
346 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
347 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
348 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
349
350 wsb = shmp_index(handle, bufb->buf_wsb, idx);
351 if (!wsb)
352 return 0;
353 sb_bindex = subbuffer_id_get_index(config, wsb->id);
354 rpages = shmp_index(handle, bufb->array, sb_bindex);
355 if (!rpages)
356 return 0;
357 backend_pages = shmp(handle, rpages->shmp);
358 if (!backend_pages)
359 return 0;
360 overruns = v_read(config, &backend_pages->records_unread);
361 v_set(config, &backend_pages->records_unread,
362 v_read(config, &backend_pages->records_commit));
363 v_set(config, &backend_pages->records_commit, 0);
364
365 return overruns;
366 }
367
368 static inline
369 void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
370 struct lttng_ust_lib_ring_buffer_backend *bufb,
371 unsigned long idx,
372 unsigned long data_size,
373 struct lttng_ust_shm_handle *handle)
374 {
375 unsigned long sb_bindex;
376 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
377 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
378 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
379
380 wsb = shmp_index(handle, bufb->buf_wsb, idx);
381 if (!wsb)
382 return;
383 sb_bindex = subbuffer_id_get_index(config, wsb->id);
384 rpages = shmp_index(handle, bufb->array, sb_bindex);
385 if (!rpages)
386 return;
387 backend_pages = shmp(handle, rpages->shmp);
388 if (!backend_pages)
389 return;
390 backend_pages->data_size = data_size;
391 }
392
393 static inline
394 unsigned long subbuffer_get_read_data_size(
395 const struct lttng_ust_lib_ring_buffer_config *config,
396 struct lttng_ust_lib_ring_buffer_backend *bufb,
397 struct lttng_ust_shm_handle *handle)
398 {
399 unsigned long sb_bindex;
400 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
401 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
402
403 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
404 pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
405 if (!pages_shmp)
406 return 0;
407 backend_pages = shmp(handle, pages_shmp->shmp);
408 if (!backend_pages)
409 return 0;
410 return backend_pages->data_size;
411 }
412
413 static inline
414 unsigned long subbuffer_get_data_size(
415 const struct lttng_ust_lib_ring_buffer_config *config,
416 struct lttng_ust_lib_ring_buffer_backend *bufb,
417 unsigned long idx,
418 struct lttng_ust_shm_handle *handle)
419 {
420 unsigned long sb_bindex;
421 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
422 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
423 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
424
425 wsb = shmp_index(handle, bufb->buf_wsb, idx);
426 if (!wsb)
427 return 0;
428 sb_bindex = subbuffer_id_get_index(config, wsb->id);
429 rpages = shmp_index(handle, bufb->array, sb_bindex);
430 if (!rpages)
431 return 0;
432 backend_pages = shmp(handle, rpages->shmp);
433 if (!backend_pages)
434 return 0;
435 return backend_pages->data_size;
436 }
437
438 static inline
439 void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config,
440 struct lttng_ust_lib_ring_buffer_backend *bufb,
441 unsigned long idx, struct lttng_ust_shm_handle *handle)
442 {
443 struct lttng_ust_lib_ring_buffer_backend_counts *counts;
444
445 counts = shmp_index(handle, bufb->buf_cnt, idx);
446 if (!counts)
447 return;
448 counts->seq_cnt++;
449 }
450
451 /**
452 * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
453 * writer.
454 */
455 static inline
456 void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
457 struct lttng_ust_lib_ring_buffer_backend *bufb,
458 unsigned long idx,
459 struct lttng_ust_shm_handle *handle)
460 {
461 unsigned long id, new_id;
462 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
463
464 if (config->mode != RING_BUFFER_OVERWRITE)
465 return;
466
467 /*
468 * Performing a volatile access to read the sb_pages, because we want to
469 * read a coherent version of the pointer and the associated noref flag.
470 */
471 wsb = shmp_index(handle, bufb->buf_wsb, idx);
472 if (!wsb)
473 return;
474 id = CMM_ACCESS_ONCE(wsb->id);
475 for (;;) {
476 /* This check is called on the fast path for each record. */
477 if (caa_likely(!subbuffer_id_is_noref(config, id))) {
478 /*
479 * Store after load dependency ordering the writes to
480 * the subbuffer after load and test of the noref flag
481 * matches the memory barrier implied by the cmpxchg()
482 * in update_read_sb_index().
483 */
484 return; /* Already writing to this buffer */
485 }
486 new_id = id;
487 subbuffer_id_clear_noref(config, &new_id);
488 new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
489 if (caa_likely(new_id == id))
490 break;
491 id = new_id;
492 }
493 }
494
495 /**
496 * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
497 * called by writer.
498 */
499 static inline
500 void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
501 struct lttng_ust_lib_ring_buffer_backend *bufb,
502 unsigned long idx, unsigned long offset,
503 struct lttng_ust_shm_handle *handle)
504 {
505 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
506 struct channel *chan;
507
508 if (config->mode != RING_BUFFER_OVERWRITE)
509 return;
510
511 wsb = shmp_index(handle, bufb->buf_wsb, idx);
512 if (!wsb)
513 return;
514 /*
515 * Because ring_buffer_set_noref() is only called by a single thread
516 * (the one which updated the cc_sb value), there are no concurrent
517 * updates to take care of: other writers have not updated cc_sb, so
518 * they cannot set the noref flag, and concurrent readers cannot modify
519 * the pointer because the noref flag is not set yet.
520 * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
521 * to the subbuffer before this set noref operation.
522 * subbuffer_set_noref() uses a volatile store to deal with concurrent
523 * readers of the noref flag.
524 */
525 chan = shmp(handle, bufb->chan);
526 if (!chan)
527 return;
528 CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
529 /*
530 * Memory barrier that ensures counter stores are ordered before set
531 * noref and offset.
532 */
533 cmm_smp_mb();
534 subbuffer_id_set_noref_offset(config, &wsb->id, offset);
535 }
536
537 /**
538 * update_read_sb_index - Read-side subbuffer index update.
539 */
540 static inline
541 int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
542 struct lttng_ust_lib_ring_buffer_backend *bufb,
543 struct channel_backend *chanb,
544 unsigned long consumed_idx,
545 unsigned long consumed_count,
546 struct lttng_ust_shm_handle *handle)
547 {
548 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
549 unsigned long old_id, new_id;
550
551 wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
552 if (caa_unlikely(!wsb))
553 return -EPERM;
554
555 if (config->mode == RING_BUFFER_OVERWRITE) {
556 struct channel *chan;
557
558 /*
559 * Exchange the target writer subbuffer with our own unused
560 * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
561 * old_wpage, because the value read will be confirmed by the
562 * following cmpxchg().
563 */
564 old_id = wsb->id;
565 if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
566 return -EAGAIN;
567 /*
568 * Make sure the offset count we are expecting matches the one
569 * indicated by the writer.
570 */
571 if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
572 consumed_count)))
573 return -EAGAIN;
574 chan = shmp(handle, bufb->chan);
575 if (caa_unlikely(!chan))
576 return -EPERM;
577 CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
578 subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
579 consumed_count);
580 new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
581 if (caa_unlikely(old_id != new_id))
582 return -EAGAIN;
583 bufb->buf_rsb.id = new_id;
584 } else {
585 /* No page exchange, use the writer page directly */
586 bufb->buf_rsb.id = wsb->id;
587 }
588 return 0;
589 }
590
591 #ifndef inline_memcpy
592 #define inline_memcpy(dest, src, n) memcpy(dest, src, n)
593 #endif
594
595 static inline __attribute__((always_inline))
596 void lttng_inline_memcpy(void *dest, const void *src,
597 unsigned long len)
598 {
599 switch (len) {
600 case 1:
601 *(uint8_t *) dest = *(const uint8_t *) src;
602 break;
603 case 2:
604 *(uint16_t *) dest = *(const uint16_t *) src;
605 break;
606 case 4:
607 *(uint32_t *) dest = *(const uint32_t *) src;
608 break;
609 case 8:
610 *(uint64_t *) dest = *(const uint64_t *) src;
611 break;
612 default:
613 inline_memcpy(dest, src, len);
614 }
615 }
616
617 /*
618 * Use the architecture-specific memcpy implementation for constant-sized
619 * inputs, but rely on an inline memcpy for length statically unknown.
620 * The function call to memcpy is just way too expensive for a fast path.
621 */
622 #define lib_ring_buffer_do_copy(config, dest, src, len) \
623 do { \
624 size_t __len = (len); \
625 if (__builtin_constant_p(len)) \
626 memcpy(dest, src, __len); \
627 else \
628 lttng_inline_memcpy(dest, src, __len); \
629 } while (0)
630
631 /*
632 * write len bytes to dest with c
633 */
634 static inline
635 void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len)
636 {
637 unsigned long i;
638
639 for (i = 0; i < len; i++)
640 dest[i] = c;
641 }
642
643 /* arch-agnostic implementation */
644
645 static inline int lttng_ust_fls(unsigned int x)
646 {
647 int r = 32;
648
649 if (!x)
650 return 0;
651 if (!(x & 0xFFFF0000U)) {
652 x <<= 16;
653 r -= 16;
654 }
655 if (!(x & 0xFF000000U)) {
656 x <<= 8;
657 r -= 8;
658 }
659 if (!(x & 0xF0000000U)) {
660 x <<= 4;
661 r -= 4;
662 }
663 if (!(x & 0xC0000000U)) {
664 x <<= 2;
665 r -= 2;
666 }
667 if (!(x & 0x80000000U)) {
668 /* No need to bit shift on last operation */
669 r -= 1;
670 }
671 return r;
672 }
673
674 static inline int get_count_order(unsigned int count)
675 {
676 int order;
677
678 order = lttng_ust_fls(count) - 1;
679 if (count & (count - 1))
680 order++;
681 return order;
682 }
683
684 #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
This page took 0.044166 seconds and 4 git commands to generate.