Commit | Line | Data |
---|---|---|
e92f3e28 MD |
1 | #ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H |
2 | #define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H | |
852c2936 MD |
3 | |
4 | /* | |
e92f3e28 | 5 | * libringbuffer/backend_internal.h |
852c2936 MD |
6 | * |
7 | * Ring buffer backend (internal helpers). | |
8 | * | |
e92f3e28 MD |
9 | * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
10 | * | |
11 | * This library is free software; you can redistribute it and/or | |
12 | * modify it under the terms of the GNU Lesser General Public | |
13 | * License as published by the Free Software Foundation; only | |
14 | * version 2.1 of the License. | |
15 | * | |
16 | * This library is distributed in the hope that it will be useful, | |
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * Lesser General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU Lesser General Public | |
22 | * License along with this library; if not, write to the Free Software | |
23 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
852c2936 MD |
24 | */ |
25 | ||
14641deb MD |
26 | #include <unistd.h> |
27 | #include <urcu/compiler.h> | |
28 | ||
4318ae1b | 29 | #include <lttng/ringbuffer-config.h> |
4931a13e MD |
30 | #include "backend_types.h" |
31 | #include "frontend_types.h" | |
a6352fd4 | 32 | #include "shm.h" |
852c2936 MD |
33 | |
34 | /* Ring buffer backend API presented to the frontend */ | |
35 | ||
36 | /* Ring buffer and channel backend create/free */ | |
37 | ||
4cfec15c | 38 | int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb, |
a6352fd4 | 39 | struct channel_backend *chan, int cpu, |
38fae1d3 | 40 | struct lttng_ust_shm_handle *handle, |
1d498196 | 41 | struct shm_object *shmobj); |
852c2936 | 42 | void channel_backend_unregister_notifiers(struct channel_backend *chanb); |
4cfec15c | 43 | void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb); |
852c2936 MD |
44 | int channel_backend_init(struct channel_backend *chanb, |
45 | const char *name, | |
4cfec15c | 46 | const struct lttng_ust_lib_ring_buffer_config *config, |
a3f61e7f | 47 | size_t subbuf_size, |
a9ff648c | 48 | size_t num_subbuf, struct lttng_ust_shm_handle *handle, |
5ea386c3 | 49 | const int *stream_fds); |
1d498196 | 50 | void channel_backend_free(struct channel_backend *chanb, |
38fae1d3 | 51 | struct lttng_ust_shm_handle *handle); |
852c2936 | 52 | |
4cfec15c | 53 | void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb, |
38fae1d3 | 54 | struct lttng_ust_shm_handle *handle); |
852c2936 MD |
55 | void channel_backend_reset(struct channel_backend *chanb); |
56 | ||
57 | int lib_ring_buffer_backend_init(void); | |
58 | void lib_ring_buffer_backend_exit(void); | |
59 | ||
4cfec15c | 60 | extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb, |
852c2936 MD |
61 | size_t offset, const void *src, size_t len, |
62 | ssize_t pagecpy); | |
63 | ||
64 | /* | |
65 | * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be | |
66 | * exchanged atomically. | |
67 | * | |
68 | * Top half word, except lowest bit, belongs to "offset", which is used to keep | |
69 | * to count the produced buffers. For overwrite mode, this provides the | |
70 | * consumer with the capacity to read subbuffers in order, handling the | |
71 | * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit | |
72 | * systems) concurrently with a single execution of get_subbuf (between offset | |
73 | * sampling and subbuffer ID exchange). | |
74 | */ | |
75 | ||
14641deb | 76 | #define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1) |
852c2936 MD |
77 | |
78 | #define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1) | |
79 | #define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT) | |
80 | #define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1)) | |
81 | /* | |
82 | * Lowest bit of top word half belongs to noref. Used only for overwrite mode. | |
83 | */ | |
84 | #define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1) | |
85 | #define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT) | |
86 | #define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT | |
87 | /* | |
88 | * In overwrite mode: lowest half of word is used for index. | |
89 | * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit. | |
90 | * In producer-consumer mode: whole word used for index. | |
91 | */ | |
92 | #define SB_ID_INDEX_SHIFT 0 | |
93 | #define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT) | |
94 | #define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1) | |
95 | ||
96 | /* | |
97 | * Construct the subbuffer id from offset, index and noref. Use only the index | |
98 | * for producer-consumer mode (offset and noref are only used in overwrite | |
99 | * mode). | |
100 | */ | |
101 | static inline | |
4cfec15c | 102 | unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
103 | unsigned long offset, unsigned long noref, |
104 | unsigned long index) | |
105 | { | |
106 | if (config->mode == RING_BUFFER_OVERWRITE) | |
107 | return (offset << SB_ID_OFFSET_SHIFT) | |
108 | | (noref << SB_ID_NOREF_SHIFT) | |
109 | | index; | |
110 | else | |
111 | return index; | |
112 | } | |
113 | ||
114 | /* | |
115 | * Compare offset with the offset contained within id. Return 1 if the offset | |
116 | * bits are identical, else 0. | |
117 | */ | |
118 | static inline | |
4cfec15c | 119 | int subbuffer_id_compare_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
120 | unsigned long id, unsigned long offset) |
121 | { | |
122 | return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT); | |
123 | } | |
124 | ||
125 | static inline | |
4cfec15c | 126 | unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
127 | unsigned long id) |
128 | { | |
129 | if (config->mode == RING_BUFFER_OVERWRITE) | |
130 | return id & SB_ID_INDEX_MASK; | |
131 | else | |
132 | return id; | |
133 | } | |
134 | ||
135 | static inline | |
4cfec15c | 136 | unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
137 | unsigned long id) |
138 | { | |
139 | if (config->mode == RING_BUFFER_OVERWRITE) | |
140 | return !!(id & SB_ID_NOREF_MASK); | |
141 | else | |
142 | return 1; | |
143 | } | |
144 | ||
145 | /* | |
146 | * Only used by reader on subbuffer ID it has exclusive access to. No volatile | |
147 | * needed. | |
148 | */ | |
149 | static inline | |
4cfec15c | 150 | void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
151 | unsigned long *id) |
152 | { | |
153 | if (config->mode == RING_BUFFER_OVERWRITE) | |
154 | *id |= SB_ID_NOREF_MASK; | |
155 | } | |
156 | ||
157 | static inline | |
4cfec15c | 158 | void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
159 | unsigned long *id, unsigned long offset) |
160 | { | |
161 | unsigned long tmp; | |
162 | ||
163 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
164 | tmp = *id; | |
165 | tmp &= ~SB_ID_OFFSET_MASK; | |
166 | tmp |= offset << SB_ID_OFFSET_SHIFT; | |
167 | tmp |= SB_ID_NOREF_MASK; | |
168 | /* Volatile store, read concurrently by readers. */ | |
14641deb | 169 | CMM_ACCESS_ONCE(*id) = tmp; |
852c2936 MD |
170 | } |
171 | } | |
172 | ||
173 | /* No volatile access, since already used locally */ | |
174 | static inline | |
4cfec15c | 175 | void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
176 | unsigned long *id) |
177 | { | |
178 | if (config->mode == RING_BUFFER_OVERWRITE) | |
179 | *id &= ~SB_ID_NOREF_MASK; | |
180 | } | |
181 | ||
182 | /* | |
183 | * For overwrite mode, cap the number of subbuffers per buffer to: | |
184 | * 2^16 on 32-bit architectures | |
185 | * 2^32 on 64-bit architectures | |
186 | * This is required to fit in the index part of the ID. Return 0 on success, | |
187 | * -EPERM on failure. | |
188 | */ | |
189 | static inline | |
4cfec15c | 190 | int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config, |
852c2936 MD |
191 | unsigned long num_subbuf) |
192 | { | |
193 | if (config->mode == RING_BUFFER_OVERWRITE) | |
194 | return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0; | |
195 | else | |
196 | return 0; | |
197 | } | |
198 | ||
199 | static inline | |
4cfec15c MD |
200 | void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config, |
201 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 202 | unsigned long idx, struct lttng_ust_shm_handle *handle) |
852c2936 MD |
203 | { |
204 | unsigned long sb_bindex; | |
205 | ||
4746ae29 MD |
206 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
207 | v_inc(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); | |
852c2936 MD |
208 | } |
209 | ||
210 | /* | |
211 | * Reader has exclusive subbuffer access for record consumption. No need to | |
212 | * perform the decrement atomically. | |
213 | */ | |
214 | static inline | |
4cfec15c MD |
215 | void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config, |
216 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 217 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
218 | { |
219 | unsigned long sb_bindex; | |
220 | ||
221 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
1d498196 | 222 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
4746ae29 | 223 | !v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread)); |
852c2936 | 224 | /* Non-atomic decrement protected by exclusive subbuffer access */ |
4746ae29 | 225 | _v_dec(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_unread); |
852c2936 MD |
226 | v_inc(config, &bufb->records_read); |
227 | } | |
228 | ||
229 | static inline | |
230 | unsigned long subbuffer_get_records_count( | |
4cfec15c MD |
231 | const struct lttng_ust_lib_ring_buffer_config *config, |
232 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 233 | unsigned long idx, |
38fae1d3 | 234 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
235 | { |
236 | unsigned long sb_bindex; | |
237 | ||
4746ae29 MD |
238 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
239 | return v_read(config, &shmp(handle, shmp_index(handle, bufb->array, sb_bindex)->shmp)->records_commit); | |
852c2936 MD |
240 | } |
241 | ||
242 | /* | |
243 | * Must be executed at subbuffer delivery when the writer has _exclusive_ | |
3d1aec25 MD |
244 | * subbuffer access. See lib_ring_buffer_check_deliver() for details. |
245 | * lib_ring_buffer_get_records_count() must be called to get the records | |
246 | * count before this function, because it resets the records_commit | |
247 | * count. | |
852c2936 MD |
248 | */ |
249 | static inline | |
250 | unsigned long subbuffer_count_records_overrun( | |
4cfec15c MD |
251 | const struct lttng_ust_lib_ring_buffer_config *config, |
252 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 253 | unsigned long idx, |
38fae1d3 | 254 | struct lttng_ust_shm_handle *handle) |
852c2936 | 255 | { |
4cfec15c | 256 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
257 | unsigned long overruns, sb_bindex; |
258 | ||
4746ae29 MD |
259 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
260 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 MD |
261 | overruns = v_read(config, &shmp(handle, pages->shmp)->records_unread); |
262 | v_set(config, &shmp(handle, pages->shmp)->records_unread, | |
263 | v_read(config, &shmp(handle, pages->shmp)->records_commit)); | |
264 | v_set(config, &shmp(handle, pages->shmp)->records_commit, 0); | |
852c2936 MD |
265 | |
266 | return overruns; | |
267 | } | |
268 | ||
269 | static inline | |
4cfec15c MD |
270 | void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config, |
271 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 | 272 | unsigned long idx, |
1d498196 | 273 | unsigned long data_size, |
38fae1d3 | 274 | struct lttng_ust_shm_handle *handle) |
852c2936 | 275 | { |
4cfec15c | 276 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
277 | unsigned long sb_bindex; |
278 | ||
4746ae29 MD |
279 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
280 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 | 281 | shmp(handle, pages->shmp)->data_size = data_size; |
852c2936 MD |
282 | } |
283 | ||
284 | static inline | |
285 | unsigned long subbuffer_get_read_data_size( | |
4cfec15c MD |
286 | const struct lttng_ust_lib_ring_buffer_config *config, |
287 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
38fae1d3 | 288 | struct lttng_ust_shm_handle *handle) |
852c2936 | 289 | { |
4cfec15c | 290 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
291 | unsigned long sb_bindex; |
292 | ||
293 | sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); | |
4746ae29 | 294 | pages = shmp_index(handle, bufb->array, sb_bindex); |
1d498196 | 295 | return shmp(handle, pages->shmp)->data_size; |
852c2936 MD |
296 | } |
297 | ||
298 | static inline | |
299 | unsigned long subbuffer_get_data_size( | |
4cfec15c MD |
300 | const struct lttng_ust_lib_ring_buffer_config *config, |
301 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 302 | unsigned long idx, |
38fae1d3 | 303 | struct lttng_ust_shm_handle *handle) |
852c2936 | 304 | { |
4cfec15c | 305 | struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages; |
852c2936 MD |
306 | unsigned long sb_bindex; |
307 | ||
4746ae29 MD |
308 | sb_bindex = subbuffer_id_get_index(config, shmp_index(handle, bufb->buf_wsb, idx)->id); |
309 | pages = shmp_index(handle, bufb->array, sb_bindex); | |
1d498196 | 310 | return shmp(handle, pages->shmp)->data_size; |
852c2936 MD |
311 | } |
312 | ||
1ff31389 JD |
313 | static inline |
314 | void subbuffer_inc_packet_count(const struct lttng_ust_lib_ring_buffer_config *config, | |
315 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
316 | unsigned long idx, struct lttng_ust_shm_handle *handle) | |
317 | { | |
318 | shmp_index(handle, bufb->buf_cnt, idx)->seq_cnt++; | |
319 | } | |
320 | ||
852c2936 MD |
321 | /** |
322 | * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by | |
323 | * writer. | |
324 | */ | |
325 | static inline | |
4cfec15c MD |
326 | void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config, |
327 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 328 | unsigned long idx, |
38fae1d3 | 329 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
330 | { |
331 | unsigned long id, new_id; | |
332 | ||
333 | if (config->mode != RING_BUFFER_OVERWRITE) | |
334 | return; | |
335 | ||
336 | /* | |
337 | * Performing a volatile access to read the sb_pages, because we want to | |
338 | * read a coherent version of the pointer and the associated noref flag. | |
339 | */ | |
4746ae29 | 340 | id = CMM_ACCESS_ONCE(shmp_index(handle, bufb->buf_wsb, idx)->id); |
852c2936 MD |
341 | for (;;) { |
342 | /* This check is called on the fast path for each record. */ | |
b5a3dfa5 | 343 | if (caa_likely(!subbuffer_id_is_noref(config, id))) { |
852c2936 MD |
344 | /* |
345 | * Store after load dependency ordering the writes to | |
346 | * the subbuffer after load and test of the noref flag | |
347 | * matches the memory barrier implied by the cmpxchg() | |
348 | * in update_read_sb_index(). | |
349 | */ | |
350 | return; /* Already writing to this buffer */ | |
351 | } | |
352 | new_id = id; | |
353 | subbuffer_id_clear_noref(config, &new_id); | |
4746ae29 | 354 | new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, idx)->id, id, new_id); |
b5a3dfa5 | 355 | if (caa_likely(new_id == id)) |
852c2936 MD |
356 | break; |
357 | id = new_id; | |
358 | } | |
359 | } | |
360 | ||
361 | /** | |
362 | * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset, | |
363 | * called by writer. | |
364 | */ | |
365 | static inline | |
4cfec15c MD |
366 | void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config, |
367 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
1d498196 | 368 | unsigned long idx, unsigned long offset, |
38fae1d3 | 369 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
370 | { |
371 | if (config->mode != RING_BUFFER_OVERWRITE) | |
372 | return; | |
373 | ||
374 | /* | |
375 | * Because ring_buffer_set_noref() is only called by a single thread | |
376 | * (the one which updated the cc_sb value), there are no concurrent | |
377 | * updates to take care of: other writers have not updated cc_sb, so | |
378 | * they cannot set the noref flag, and concurrent readers cannot modify | |
379 | * the pointer because the noref flag is not set yet. | |
380 | * The smp_wmb() in ring_buffer_commit() takes care of ordering writes | |
381 | * to the subbuffer before this set noref operation. | |
382 | * subbuffer_set_noref() uses a volatile store to deal with concurrent | |
383 | * readers of the noref flag. | |
384 | */ | |
1d498196 | 385 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
4746ae29 | 386 | subbuffer_id_is_noref(config, shmp_index(handle, bufb->buf_wsb, idx)->id)); |
852c2936 MD |
387 | /* |
388 | * Memory barrier that ensures counter stores are ordered before set | |
389 | * noref and offset. | |
390 | */ | |
14641deb | 391 | cmm_smp_mb(); |
4746ae29 | 392 | subbuffer_id_set_noref_offset(config, &shmp_index(handle, bufb->buf_wsb, idx)->id, offset); |
852c2936 MD |
393 | } |
394 | ||
395 | /** | |
396 | * update_read_sb_index - Read-side subbuffer index update. | |
397 | */ | |
398 | static inline | |
4cfec15c MD |
399 | int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config, |
400 | struct lttng_ust_lib_ring_buffer_backend *bufb, | |
852c2936 MD |
401 | struct channel_backend *chanb, |
402 | unsigned long consumed_idx, | |
1d498196 | 403 | unsigned long consumed_count, |
38fae1d3 | 404 | struct lttng_ust_shm_handle *handle) |
852c2936 MD |
405 | { |
406 | unsigned long old_id, new_id; | |
407 | ||
408 | if (config->mode == RING_BUFFER_OVERWRITE) { | |
409 | /* | |
410 | * Exchange the target writer subbuffer with our own unused | |
14641deb | 411 | * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the |
852c2936 MD |
412 | * old_wpage, because the value read will be confirmed by the |
413 | * following cmpxchg(). | |
414 | */ | |
4746ae29 | 415 | old_id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; |
b5a3dfa5 | 416 | if (caa_unlikely(!subbuffer_id_is_noref(config, old_id))) |
852c2936 MD |
417 | return -EAGAIN; |
418 | /* | |
419 | * Make sure the offset count we are expecting matches the one | |
420 | * indicated by the writer. | |
421 | */ | |
b5a3dfa5 | 422 | if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id, |
852c2936 MD |
423 | consumed_count))) |
424 | return -EAGAIN; | |
1d498196 | 425 | CHAN_WARN_ON(shmp(handle, bufb->chan), |
852c2936 MD |
426 | !subbuffer_id_is_noref(config, bufb->buf_rsb.id)); |
427 | subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id, | |
428 | consumed_count); | |
4746ae29 | 429 | new_id = uatomic_cmpxchg(&shmp_index(handle, bufb->buf_wsb, consumed_idx)->id, old_id, |
852c2936 | 430 | bufb->buf_rsb.id); |
b5a3dfa5 | 431 | if (caa_unlikely(old_id != new_id)) |
852c2936 MD |
432 | return -EAGAIN; |
433 | bufb->buf_rsb.id = new_id; | |
434 | } else { | |
435 | /* No page exchange, use the writer page directly */ | |
4746ae29 | 436 | bufb->buf_rsb.id = shmp_index(handle, bufb->buf_wsb, consumed_idx)->id; |
852c2936 MD |
437 | } |
438 | return 0; | |
439 | } | |
440 | ||
0d4aa2df MD |
441 | #ifndef inline_memcpy |
442 | #define inline_memcpy(dest, src, n) memcpy(dest, src, n) | |
443 | #endif | |
444 | ||
852c2936 MD |
445 | /* |
446 | * Use the architecture-specific memcpy implementation for constant-sized | |
447 | * inputs, but rely on an inline memcpy for length statically unknown. | |
448 | * The function call to memcpy is just way too expensive for a fast path. | |
449 | */ | |
450 | #define lib_ring_buffer_do_copy(config, dest, src, len) \ | |
451 | do { \ | |
452 | size_t __len = (len); \ | |
453 | if (__builtin_constant_p(len)) \ | |
454 | memcpy(dest, src, __len); \ | |
455 | else \ | |
456 | inline_memcpy(dest, src, __len); \ | |
457 | } while (0) | |
458 | ||
a44c74d9 MD |
459 | /* |
460 | * write len bytes to dest with c | |
461 | */ | |
462 | static inline | |
463 | void lib_ring_buffer_do_memset(char *dest, int c, unsigned long len) | |
464 | { | |
465 | unsigned long i; | |
466 | ||
467 | for (i = 0; i < len; i++) | |
468 | dest[i] = c; | |
469 | } | |
470 | ||
b728d87e MD |
471 | /* arch-agnostic implementation */ |
472 | ||
bfd26582 | 473 | static inline int lttng_ust_fls(unsigned int x) |
b728d87e MD |
474 | { |
475 | int r = 32; | |
476 | ||
477 | if (!x) | |
478 | return 0; | |
479 | if (!(x & 0xFFFF0000U)) { | |
480 | x <<= 16; | |
481 | r -= 16; | |
482 | } | |
483 | if (!(x & 0xFF000000U)) { | |
484 | x <<= 8; | |
485 | r -= 8; | |
486 | } | |
487 | if (!(x & 0xF0000000U)) { | |
488 | x <<= 4; | |
489 | r -= 4; | |
490 | } | |
491 | if (!(x & 0xC0000000U)) { | |
492 | x <<= 2; | |
493 | r -= 2; | |
494 | } | |
495 | if (!(x & 0x80000000U)) { | |
e2bd33a5 | 496 | /* No need to bit shift on last operation */ |
b728d87e MD |
497 | r -= 1; |
498 | } | |
499 | return r; | |
500 | } | |
501 | ||
502 | static inline int get_count_order(unsigned int count) | |
503 | { | |
504 | int order; | |
505 | ||
bfd26582 | 506 | order = lttng_ust_fls(count) - 1; |
b728d87e MD |
507 | if (count & (count - 1)) |
508 | order++; | |
509 | return order; | |
510 | } | |
511 | ||
e92f3e28 | 512 | #endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */ |