Implement file-backed ring buffer
[lttng-ust.git] / libringbuffer / ring_buffer_backend.c
1 /*
2 * ring_buffer_backend.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #define _GNU_SOURCE
22 #include <urcu/arch.h>
23 #include <limits.h>
24
25 #include <lttng/ringbuffer-config.h>
26 #include "vatomic.h"
27 #include "backend.h"
28 #include "frontend.h"
29 #include "smp.h"
30 #include "shm.h"
31
32 #define UINT_MAX_STR_LEN 11 /* includes \0 */
33
34 /**
35 * lib_ring_buffer_backend_allocate - allocate a channel buffer
36 * @config: ring buffer instance configuration
37 * @buf: the buffer struct
38 * @size: total size of the buffer
39 * @num_subbuf: number of subbuffers
40 * @extra_reader_sb: need extra subbuffer for reader
41 */
42 static
43 int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
44 struct lttng_ust_lib_ring_buffer_backend *bufb,
45 size_t size, size_t num_subbuf,
46 int extra_reader_sb,
47 struct lttng_ust_shm_handle *handle,
48 struct shm_object *shmobj)
49 {
50 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
51 unsigned long subbuf_size, mmap_offset = 0;
52 unsigned long num_subbuf_alloc;
53 unsigned long i;
54
55 subbuf_size = chanb->subbuf_size;
56 num_subbuf_alloc = num_subbuf;
57
58 if (extra_reader_sb)
59 num_subbuf_alloc++;
60
61 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
62 set_shmp(bufb->array, zalloc_shm(shmobj,
63 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
64 if (caa_unlikely(!shmp(handle, bufb->array)))
65 goto array_error;
66
67 /*
68 * This is the largest element (the buffer pages) which needs to
69 * be aligned on PAGE_SIZE.
70 */
71 align_shm(shmobj, PAGE_SIZE);
72 set_shmp(bufb->memory_map, zalloc_shm(shmobj,
73 subbuf_size * num_subbuf_alloc));
74 if (caa_unlikely(!shmp(handle, bufb->memory_map)))
75 goto memory_map_error;
76
77 /* Allocate backend pages array elements */
78 for (i = 0; i < num_subbuf_alloc; i++) {
79 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
80 set_shmp(shmp_index(handle, bufb->array, i)->shmp,
81 zalloc_shm(shmobj,
82 sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
83 if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
84 goto free_array;
85 }
86
87 /* Allocate write-side subbuffer table */
88 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
89 set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
90 sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
91 * num_subbuf));
92 if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
93 goto free_array;
94
95 for (i = 0; i < num_subbuf; i++)
96 shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
97
98 /* Assign read-side subbuffer table */
99 if (extra_reader_sb)
100 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
101 num_subbuf_alloc - 1);
102 else
103 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
104
105 /* Assign pages to page index */
106 for (i = 0; i < num_subbuf_alloc; i++) {
107 struct shm_ref ref;
108
109 ref.index = bufb->memory_map._ref.index;
110 ref.offset = bufb->memory_map._ref.offset;
111 ref.offset += i * subbuf_size;
112
113 set_shmp(shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->p,
114 ref);
115 if (config->output == RING_BUFFER_MMAP) {
116 shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->mmap_offset = mmap_offset;
117 mmap_offset += subbuf_size;
118 }
119 }
120 return 0;
121
122 free_array:
123 /* bufb->array[i] will be freed by shm teardown */
124 memory_map_error:
125 /* bufb->array will be freed by shm teardown */
126 array_error:
127 return -ENOMEM;
128 }
129
130 int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
131 struct channel_backend *chanb, int cpu,
132 struct lttng_ust_shm_handle *handle,
133 struct shm_object *shmobj)
134 {
135 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
136
137 set_shmp(bufb->chan, handle->chan._ref);
138 bufb->cpu = cpu;
139
140 return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
141 chanb->num_subbuf,
142 chanb->extra_reader_sb,
143 handle, shmobj);
144 }
145
146 void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
147 struct lttng_ust_shm_handle *handle)
148 {
149 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
150 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
151 unsigned long num_subbuf_alloc;
152 unsigned int i;
153
154 num_subbuf_alloc = chanb->num_subbuf;
155 if (chanb->extra_reader_sb)
156 num_subbuf_alloc++;
157
158 for (i = 0; i < chanb->num_subbuf; i++)
159 shmp_index(handle, bufb->buf_wsb, i)->id = subbuffer_id(config, 0, 1, i);
160 if (chanb->extra_reader_sb)
161 bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
162 num_subbuf_alloc - 1);
163 else
164 bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
165
166 for (i = 0; i < num_subbuf_alloc; i++) {
167 /* Don't reset mmap_offset */
168 v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_commit, 0);
169 v_set(config, &shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->records_unread, 0);
170 shmp(handle, shmp_index(handle, bufb->array, i)->shmp)->data_size = 0;
171 /* Don't reset backend page and virt addresses */
172 }
173 /* Don't reset num_pages_per_subbuf, cpu, allocated */
174 v_set(config, &bufb->records_read, 0);
175 }
176
177 /*
178 * The frontend is responsible for also calling ring_buffer_backend_reset for
179 * each buffer when calling channel_backend_reset.
180 */
181 void channel_backend_reset(struct channel_backend *chanb)
182 {
183 struct channel *chan = caa_container_of(chanb, struct channel, backend);
184 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
185
186 /*
187 * Don't reset buf_size, subbuf_size, subbuf_size_order,
188 * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
189 * priv, notifiers, config, cpumask and name.
190 */
191 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
192 }
193
194 /**
195 * channel_backend_init - initialize a channel backend
196 * @chanb: channel backend
197 * @name: channel name
198 * @config: client ring buffer configuration
199 * @parent: dentry of parent directory, %NULL for root directory
200 * @subbuf_size: size of sub-buffers (> PAGE_SIZE, power of 2)
201 * @num_subbuf: number of sub-buffers (power of 2)
202 * @lttng_ust_shm_handle: shared memory handle
203 * @shm_path: shared memory files path
204 *
205 * Returns channel pointer if successful, %NULL otherwise.
206 *
207 * Creates per-cpu channel buffers using the sizes and attributes
208 * specified. The created channel buffer files will be named
209 * name_0...name_N-1. File permissions will be %S_IRUSR.
210 *
211 * Called with CPU hotplug disabled.
212 */
213 int channel_backend_init(struct channel_backend *chanb,
214 const char *name,
215 const struct lttng_ust_lib_ring_buffer_config *config,
216 size_t subbuf_size, size_t num_subbuf,
217 struct lttng_ust_shm_handle *handle,
218 const char *shm_path)
219 {
220 struct channel *chan = caa_container_of(chanb, struct channel, backend);
221 unsigned int i;
222 int ret;
223 size_t shmsize = 0, num_subbuf_alloc;
224
225 if (!name)
226 return -EPERM;
227
228 /* Check that the subbuffer size is larger than a page. */
229 if (subbuf_size < PAGE_SIZE)
230 return -EINVAL;
231
232 /*
233 * Make sure the number of subbuffers and subbuffer size are
234 * power of 2, and nonzero.
235 */
236 if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
237 return -EINVAL;
238 if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
239 return -EINVAL;
240 /*
241 * Overwrite mode buffers require at least 2 subbuffers per
242 * buffer.
243 */
244 if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
245 return -EINVAL;
246
247 ret = subbuffer_id_check_index(config, num_subbuf);
248 if (ret)
249 return ret;
250
251 chanb->buf_size = num_subbuf * subbuf_size;
252 chanb->subbuf_size = subbuf_size;
253 chanb->buf_size_order = get_count_order(chanb->buf_size);
254 chanb->subbuf_size_order = get_count_order(subbuf_size);
255 chanb->num_subbuf_order = get_count_order(num_subbuf);
256 chanb->extra_reader_sb =
257 (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
258 chanb->num_subbuf = num_subbuf;
259 strncpy(chanb->name, name, NAME_MAX);
260 chanb->name[NAME_MAX - 1] = '\0';
261 memcpy(&chanb->config, config, sizeof(*config));
262
263 /* Per-cpu buffer size: control (prior to backend) */
264 shmsize = offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
265 shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
266
267 /* Per-cpu buffer size: backend */
268 /* num_subbuf + 1 is the worse case */
269 num_subbuf_alloc = num_subbuf + 1;
270 shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
271 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
272 shmsize += offset_align(shmsize, PAGE_SIZE);
273 shmsize += subbuf_size * num_subbuf_alloc;
274 shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
275 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
276 shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
277 shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
278 /* Per-cpu buffer size: control (after backend) */
279 shmsize += offset_align(shmsize, __alignof__(struct commit_counters_hot));
280 shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
281 shmsize += offset_align(shmsize, __alignof__(struct commit_counters_cold));
282 shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
283
284 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
285 struct lttng_ust_lib_ring_buffer *buf;
286 /*
287 * We need to allocate for all possible cpus.
288 */
289 for_each_possible_cpu(i) {
290 struct shm_object *shmobj;
291 char shm_buf_path[PATH_MAX];
292
293 if (shm_path) {
294 char cpu_nr[UINT_MAX_STR_LEN]; /* unsigned int max len */
295
296 strncpy(shm_buf_path, shm_path, PATH_MAX);
297 shm_buf_path[PATH_MAX - 1] = '\0';
298 ret = snprintf(cpu_nr, UINT_MAX_STR_LEN, "%u", i);
299 if (ret != 1)
300 goto end;
301 strncat(shm_buf_path, cpu_nr,
302 PATH_MAX - strlen(shm_buf_path) - 1);
303 }
304 shmobj = shm_object_table_alloc(handle->table, shmsize,
305 SHM_OBJECT_SHM, shm_path ? shm_buf_path : NULL);
306 if (!shmobj)
307 goto end;
308 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
309 set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
310 buf = shmp(handle, chanb->buf[i].shmp);
311 if (!buf)
312 goto end;
313 set_shmp(buf->self, chanb->buf[i].shmp._ref);
314 ret = lib_ring_buffer_create(buf, chanb, i,
315 handle, shmobj);
316 if (ret)
317 goto free_bufs; /* cpu hotplug locked */
318 }
319 } else {
320 struct shm_object *shmobj;
321 struct lttng_ust_lib_ring_buffer *buf;
322
323 shmobj = shm_object_table_alloc(handle->table, shmsize,
324 SHM_OBJECT_SHM, shm_path);
325 if (!shmobj)
326 goto end;
327 align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
328 set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
329 buf = shmp(handle, chanb->buf[0].shmp);
330 if (!buf)
331 goto end;
332 set_shmp(buf->self, chanb->buf[0].shmp._ref);
333 ret = lib_ring_buffer_create(buf, chanb, -1,
334 handle, shmobj);
335 if (ret)
336 goto free_bufs;
337 }
338 chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
339
340 return 0;
341
342 free_bufs:
343 /* We only free the buffer data upon shm teardown */
344 end:
345 return -ENOMEM;
346 }
347
348 /**
349 * channel_backend_free - destroy the channel
350 * @chan: the channel
351 *
352 * Destroy all channel buffers and frees the channel.
353 */
354 void channel_backend_free(struct channel_backend *chanb,
355 struct lttng_ust_shm_handle *handle)
356 {
357 /* SHM teardown takes care of everything */
358 }
359
360 /**
361 * lib_ring_buffer_read - read data from ring_buffer_buffer.
362 * @bufb : buffer backend
363 * @offset : offset within the buffer
364 * @dest : destination address
365 * @len : length to copy to destination
366 *
367 * Should be protected by get_subbuf/put_subbuf.
368 * Returns the length copied.
369 */
370 size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
371 void *dest, size_t len, struct lttng_ust_shm_handle *handle)
372 {
373 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
374 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
375 ssize_t orig_len;
376 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
377 unsigned long sb_bindex, id;
378
379 orig_len = len;
380 offset &= chanb->buf_size - 1;
381
382 if (caa_unlikely(!len))
383 return 0;
384 id = bufb->buf_rsb.id;
385 sb_bindex = subbuffer_id_get_index(config, id);
386 rpages = shmp_index(handle, bufb->array, sb_bindex);
387 /*
388 * Underlying layer should never ask for reads across
389 * subbuffers.
390 */
391 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
392 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
393 && subbuffer_id_is_noref(config, id));
394 memcpy(dest, shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1)), len);
395 return orig_len;
396 }
397
398 /**
399 * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
400 * @bufb : buffer backend
401 * @offset : offset within the buffer
402 * @dest : destination address
403 * @len : destination's length
404 *
405 * Return string's length, or -EINVAL on error.
406 * Should be protected by get_subbuf/put_subbuf.
407 * Destination length should be at least 1 to hold '\0'.
408 */
409 int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
410 void *dest, size_t len, struct lttng_ust_shm_handle *handle)
411 {
412 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
413 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
414 ssize_t string_len, orig_offset;
415 char *str;
416 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
417 unsigned long sb_bindex, id;
418
419 if (caa_unlikely(!len))
420 return -EINVAL;
421 offset &= chanb->buf_size - 1;
422 orig_offset = offset;
423 id = bufb->buf_rsb.id;
424 sb_bindex = subbuffer_id_get_index(config, id);
425 rpages = shmp_index(handle, bufb->array, sb_bindex);
426 /*
427 * Underlying layer should never ask for reads across
428 * subbuffers.
429 */
430 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
431 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
432 && subbuffer_id_is_noref(config, id));
433 str = shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
434 string_len = strnlen(str, len);
435 if (dest && len) {
436 memcpy(dest, str, string_len);
437 ((char *)dest)[0] = 0;
438 }
439 return offset - orig_offset;
440 }
441
442 /**
443 * lib_ring_buffer_read_offset_address - get address of a buffer location
444 * @bufb : buffer backend
445 * @offset : offset within the buffer.
446 *
447 * Return the address where a given offset is located (for read).
448 * Should be used to get the current subbuffer header pointer. Given we know
449 * it's never on a page boundary, it's safe to read/write directly
450 * from/to this address, as long as the read/write is never bigger than
451 * a page size.
452 */
453 void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
454 size_t offset,
455 struct lttng_ust_shm_handle *handle)
456 {
457 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
458 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
459 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
460 unsigned long sb_bindex, id;
461
462 offset &= chanb->buf_size - 1;
463 id = bufb->buf_rsb.id;
464 sb_bindex = subbuffer_id_get_index(config, id);
465 rpages = shmp_index(handle, bufb->array, sb_bindex);
466 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
467 && subbuffer_id_is_noref(config, id));
468 return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
469 }
470
471 /**
472 * lib_ring_buffer_offset_address - get address of a location within the buffer
473 * @bufb : buffer backend
474 * @offset : offset within the buffer.
475 *
476 * Return the address where a given offset is located.
477 * Should be used to get the current subbuffer header pointer. Given we know
478 * it's always at the beginning of a page, it's safe to write directly to this
479 * address, as long as the write is never bigger than a page size.
480 */
481 void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
482 size_t offset,
483 struct lttng_ust_shm_handle *handle)
484 {
485 size_t sbidx;
486 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
487 struct channel_backend *chanb = &shmp(handle, bufb->chan)->backend;
488 const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
489 unsigned long sb_bindex, id;
490
491 offset &= chanb->buf_size - 1;
492 sbidx = offset >> chanb->subbuf_size_order;
493 id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
494 sb_bindex = subbuffer_id_get_index(config, id);
495 rpages = shmp_index(handle, bufb->array, sb_bindex);
496 CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
497 && subbuffer_id_is_noref(config, id));
498 return shmp_index(handle, shmp(handle, rpages->shmp)->p, offset & (chanb->subbuf_size - 1));
499 }
This page took 0.039421 seconds and 4 git commands to generate.