2dec003883c7536ce3912b94b4a9987cc5d3b777
[lttng-ust.git] / libringbuffer / backend.h
1 #ifndef _LTTNG_RING_BUFFER_BACKEND_H
2 #define _LTTNG_RING_BUFFER_BACKEND_H
3
4 /*
5 * libringbuffer/backend.h
6 *
7 * Ring buffer backend (API).
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 *
11 * This library is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; only
14 * version 2.1 of the License.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with this library; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
26 * the reader in flight recorder mode.
27 */
28
29 #include <unistd.h>
30
31 /* Internal helpers */
32 #include "backend_internal.h"
33 #include "frontend_internal.h"
34
35 /* Ring buffer backend API */
36
37 /* Ring buffer backend access (read/write) */
38
39 extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
40 size_t offset, void *dest, size_t len,
41 struct lttng_ust_shm_handle *handle);
42
43 extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
44 size_t offset, void *dest, size_t len,
45 struct lttng_ust_shm_handle *handle);
46
47 /*
48 * Return the address where a given offset is located.
49 * Should be used to get the current subbuffer header pointer. Given we know
50 * it's never on a page boundary, it's safe to write directly to this address,
51 * as long as the write is never bigger than a page size.
52 */
53 extern void *
54 lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
55 size_t offset,
56 struct lttng_ust_shm_handle *handle);
57 extern void *
58 lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
59 size_t offset,
60 struct lttng_ust_shm_handle *handle);
61
62 /**
63 * lib_ring_buffer_write - write data to a buffer backend
64 * @config : ring buffer instance configuration
65 * @ctx: ring buffer context. (input arguments only)
66 * @src : source pointer to copy from
67 * @len : length of data to copy
68 *
69 * This function copies "len" bytes of data from a source pointer to a buffer
70 * backend, at the current context offset. This is more or less a buffer
71 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
72 * if copy is crossing a page boundary.
73 */
74 static inline
75 void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
76 struct lttng_ust_lib_ring_buffer_ctx *ctx,
77 const void *src, size_t len)
78 {
79 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
80 struct channel_backend *chanb = &ctx->chan->backend;
81 struct lttng_ust_shm_handle *handle = ctx->handle;
82 size_t sbidx;
83 size_t offset = ctx->buf_offset;
84 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
85 unsigned long sb_bindex, id;
86 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
87 void *p;
88
89 if (caa_unlikely(!len))
90 return;
91 offset &= chanb->buf_size - 1;
92 sbidx = offset >> chanb->subbuf_size_order;
93 id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
94 sb_bindex = subbuffer_id_get_index(config, id);
95 rpages = shmp_index(handle, bufb->array, sb_bindex);
96 if (caa_unlikely(!rpages))
97 return;
98 CHAN_WARN_ON(ctx->chan,
99 config->mode == RING_BUFFER_OVERWRITE
100 && subbuffer_id_is_noref(config, id));
101 /*
102 * Underlying layer should never ask for writes across
103 * subbuffers.
104 */
105 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
106 backend_pages = shmp(handle, rpages->shmp);
107 if (caa_unlikely(!backend_pages))
108 return;
109 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
110 if (caa_unlikely(!p))
111 return;
112 lib_ring_buffer_do_copy(config, p, src, len);
113 ctx->buf_offset += len;
114 }
115
116 /*
117 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
118 * terminating character is found in @src. Returns the number of bytes
119 * copied. Does *not* terminate @dest with NULL terminating character.
120 */
121 static inline
122 size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
123 char *dest, const char *src, size_t len)
124 {
125 size_t count;
126
127 for (count = 0; count < len; count++) {
128 char c;
129
130 /*
131 * Only read source character once, in case it is
132 * modified concurrently.
133 */
134 c = CMM_LOAD_SHARED(src[count]);
135 if (!c)
136 break;
137 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
138 }
139 return count;
140 }
141
142 /**
143 * lib_ring_buffer_strcpy - write string data to a buffer backend
144 * @config : ring buffer instance configuration
145 * @ctx: ring buffer context. (input arguments only)
146 * @src : source pointer to copy from
147 * @len : length of data to copy
148 * @pad : character to use for padding
149 *
150 * This function copies @len - 1 bytes of string data from a source
151 * pointer to a buffer backend, followed by a terminating '\0'
152 * character, at the current context offset. This is more or less a
153 * buffer backend-specific strncpy() operation. If a terminating '\0'
154 * character is found in @src before @len - 1 characters are copied, pad
155 * the buffer with @pad characters (e.g. '#').
156 */
157 static inline
158 void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
159 struct lttng_ust_lib_ring_buffer_ctx *ctx,
160 const char *src, size_t len, int pad)
161 {
162 struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx->buf->backend;
163 struct channel_backend *chanb = &ctx->chan->backend;
164 struct lttng_ust_shm_handle *handle = ctx->handle;
165 size_t sbidx, count;
166 size_t offset = ctx->buf_offset;
167 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
168 unsigned long sb_bindex, id;
169
170 if (caa_unlikely(!len))
171 return;
172 offset &= chanb->buf_size - 1;
173 sbidx = offset >> chanb->subbuf_size_order;
174 id = shmp_index(handle, bufb->buf_wsb, sbidx)->id;
175 sb_bindex = subbuffer_id_get_index(config, id);
176 rpages = shmp_index(handle, bufb->array, sb_bindex);
177 CHAN_WARN_ON(ctx->chan,
178 config->mode == RING_BUFFER_OVERWRITE
179 && subbuffer_id_is_noref(config, id));
180 /*
181 * Underlying layer should never ask for writes across
182 * subbuffers.
183 */
184 CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
185 count = lib_ring_buffer_do_strcpy(config,
186 shmp_index(handle, shmp(handle, rpages->shmp)->p,
187 offset & (chanb->subbuf_size - 1)),
188 src, len - 1);
189 offset += count;
190 /* Padding */
191 if (caa_unlikely(count < len - 1)) {
192 size_t pad_len = len - 1 - count;
193
194 lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p,
195 offset & (chanb->subbuf_size - 1)),
196 pad, pad_len);
197 offset += pad_len;
198 }
199 /* Final '\0' */
200 lib_ring_buffer_do_memset(shmp_index(handle, shmp(handle, rpages->shmp)->p,
201 offset & (chanb->subbuf_size - 1)),
202 '\0', 1);
203 ctx->buf_offset += len;
204 }
205
206 /*
207 * This accessor counts the number of unread records in a buffer.
208 * It only provides a consistent value if no reads not writes are performed
209 * concurrently.
210 */
211 static inline
212 unsigned long lib_ring_buffer_get_records_unread(
213 const struct lttng_ust_lib_ring_buffer_config *config,
214 struct lttng_ust_lib_ring_buffer *buf,
215 struct lttng_ust_shm_handle *handle)
216 {
217 struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
218 unsigned long records_unread = 0, sb_bindex;
219 unsigned int i;
220 struct channel *chan;
221
222 chan = shmp(handle, bufb->chan);
223 if (!chan)
224 return 0;
225 for (i = 0; i < chan->backend.num_subbuf; i++) {
226 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
227 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
228 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
229
230 wsb = shmp_index(handle, bufb->buf_wsb, i);
231 if (!wsb)
232 return 0;
233 sb_bindex = subbuffer_id_get_index(config, wsb->id);
234 rpages = shmp_index(handle, bufb->array, sb_bindex);
235 if (!rpages)
236 return 0;
237 backend_pages = shmp(handle, rpages->shmp);
238 if (!backend_pages)
239 return 0;
240 records_unread += v_read(config, &backend_pages->records_unread);
241 }
242 if (config->mode == RING_BUFFER_OVERWRITE) {
243 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
244 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
245
246 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
247 rpages = shmp_index(handle, bufb->array, sb_bindex);
248 if (!rpages)
249 return 0;
250 backend_pages = shmp(handle, rpages->shmp);
251 if (!backend_pages)
252 return 0;
253 records_unread += v_read(config, &backend_pages->records_unread);
254 }
255 return records_unread;
256 }
257
258 #endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.034505 seconds and 3 git commands to generate.