cleanup: function attribute 'hidden'
[lttng-ust.git] / libringbuffer / backend.h
CommitLineData
852c2936 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
852c2936 3 *
e92f3e28
MD
4 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
c0c0989a 6 * Ring buffer backend (API).
852c2936
MD
7 *
8 * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
9 * the reader in flight recorder mode.
10 */
11
c0c0989a
MJ
12#ifndef _LTTNG_RING_BUFFER_BACKEND_H
13#define _LTTNG_RING_BUFFER_BACKEND_H
14
b4051ad8 15#include <stddef.h>
14641deb
MD
16#include <unistd.h>
17
852c2936 18/* Internal helpers */
4931a13e
MD
19#include "backend_internal.h"
20#include "frontend_internal.h"
852c2936
MD
21
22/* Ring buffer backend API */
23
24/* Ring buffer backend access (read/write) */
25
4cfec15c 26extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 27 size_t offset, void *dest, size_t len,
1d18d519
MJ
28 struct lttng_ust_shm_handle *handle)
29 __attribute__((visibility("hidden")));
852c2936 30
4cfec15c 31extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 32 size_t offset, void *dest, size_t len,
1d18d519
MJ
33 struct lttng_ust_shm_handle *handle)
34 __attribute__((visibility("hidden")));
852c2936 35
852c2936
MD
36/*
37 * Return the address where a given offset is located.
38 * Should be used to get the current subbuffer header pointer. Given we know
39 * it's never on a page boundary, it's safe to write directly to this address,
40 * as long as the write is never bigger than a page size.
41 */
42extern void *
4cfec15c 43lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 44 size_t offset,
1d18d519
MJ
45 struct lttng_ust_shm_handle *handle)
46 __attribute__((visibility("hidden")));
ddabe860 47
852c2936 48extern void *
4cfec15c 49lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
1d498196 50 size_t offset,
1d18d519
MJ
51 struct lttng_ust_shm_handle *handle)
52 __attribute__((visibility("hidden")));
852c2936
MD
53
54/**
55 * lib_ring_buffer_write - write data to a buffer backend
56 * @config : ring buffer instance configuration
57 * @ctx: ring buffer context. (input arguments only)
58 * @src : source pointer to copy from
59 * @len : length of data to copy
60 *
61 * This function copies "len" bytes of data from a source pointer to a buffer
62 * backend, at the current context offset. This is more or less a buffer
63 * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
64 * if copy is crossing a page boundary.
65 */
00d0f8eb 66static inline __attribute__((always_inline))
4cfec15c
MD
67void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
68 struct lttng_ust_lib_ring_buffer_ctx *ctx,
852c2936
MD
69 const void *src, size_t len)
70{
8936b6c0
MD
71 struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
72 struct channel_backend *chanb = &ctx_private->chan->backend;
73 struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
74 size_t offset = ctx_private->buf_offset;
15500a1b
MD
75 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
76 void *p;
852c2936 77
0bf3c920
MD
78 if (caa_unlikely(!len))
79 return;
a6352fd4
MD
80 /*
81 * Underlying layer should never ask for writes across
82 * subbuffers.
83 */
a3492932
MD
84 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
85 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
86 if (caa_unlikely(!backend_pages)) {
87 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
88 return;
89 }
15500a1b
MD
90 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
91 if (caa_unlikely(!p))
92 return;
93 lib_ring_buffer_do_copy(config, p, src, len);
8936b6c0 94 ctx_private->buf_offset += len;
852c2936
MD
95}
96
a44c74d9
MD
97/*
98 * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
99 * terminating character is found in @src. Returns the number of bytes
100 * copied. Does *not* terminate @dest with NULL terminating character.
101 */
00d0f8eb 102static inline __attribute__((always_inline))
a44c74d9
MD
103size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
104 char *dest, const char *src, size_t len)
105{
106 size_t count;
107
108 for (count = 0; count < len; count++) {
109 char c;
110
111 /*
112 * Only read source character once, in case it is
113 * modified concurrently.
114 */
115 c = CMM_LOAD_SHARED(src[count]);
116 if (!c)
117 break;
118 lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
119 }
120 return count;
121}
122
123/**
124 * lib_ring_buffer_strcpy - write string data to a buffer backend
125 * @config : ring buffer instance configuration
126 * @ctx: ring buffer context. (input arguments only)
127 * @src : source pointer to copy from
128 * @len : length of data to copy
129 * @pad : character to use for padding
130 *
131 * This function copies @len - 1 bytes of string data from a source
132 * pointer to a buffer backend, followed by a terminating '\0'
133 * character, at the current context offset. This is more or less a
134 * buffer backend-specific strncpy() operation. If a terminating '\0'
135 * character is found in @src before @len - 1 characters are copied, pad
136 * the buffer with @pad characters (e.g. '#').
137 */
00d0f8eb 138static inline __attribute__((always_inline))
a44c74d9
MD
139void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
140 struct lttng_ust_lib_ring_buffer_ctx *ctx,
b4c8bf2f 141 const char *src, size_t len, char pad)
a44c74d9 142{
8936b6c0
MD
143 struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
144 struct channel_backend *chanb = &ctx_private->chan->backend;
145 struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
a3492932 146 size_t count;
8936b6c0 147 size_t offset = ctx_private->buf_offset;
a3492932
MD
148 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
149 void *p;
a44c74d9
MD
150
151 if (caa_unlikely(!len))
152 return;
a44c74d9
MD
153 /*
154 * Underlying layer should never ask for writes across
155 * subbuffers.
156 */
a3492932
MD
157 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
158 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
159 if (caa_unlikely(!backend_pages)) {
160 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
161 return;
162 }
163 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
164 if (caa_unlikely(!p))
165 return;
166
167 count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
a44c74d9
MD
168 offset += count;
169 /* Padding */
170 if (caa_unlikely(count < len - 1)) {
171 size_t pad_len = len - 1 - count;
172
a3492932
MD
173 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
174 if (caa_unlikely(!p))
175 return;
176 lib_ring_buffer_do_memset(p, pad, pad_len);
a44c74d9
MD
177 offset += pad_len;
178 }
179 /* Final '\0' */
a3492932
MD
180 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
181 if (caa_unlikely(!p))
182 return;
183 lib_ring_buffer_do_memset(p, '\0', 1);
8936b6c0 184 ctx_private->buf_offset += len;
a44c74d9
MD
185}
186
879f9b0a
MD
187/**
188 * lib_ring_buffer_pstrcpy - write to a buffer backend P-string
189 * @config : ring buffer instance configuration
190 * @ctx: ring buffer context. (input arguments only)
191 * @src : source pointer to copy from
192 * @len : length of data to copy
193 * @pad : character to use for padding
194 *
195 * This function copies up to @len bytes of data from a source pointer
196 * to a Pascal String into the buffer backend. If a terminating '\0'
197 * character is found in @src before @len characters are copied, pad the
198 * buffer with @pad characters (e.g. '\0').
199 *
200 * The length of the pascal strings in the ring buffer is explicit: it
201 * is either the array or sequence length.
202 */
203static inline __attribute__((always_inline))
204void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
205 struct lttng_ust_lib_ring_buffer_ctx *ctx,
206 const char *src, size_t len, char pad)
207{
8936b6c0
MD
208 struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
209 struct channel_backend *chanb = &ctx_private->chan->backend;
210 struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
879f9b0a 211 size_t count;
8936b6c0 212 size_t offset = ctx_private->buf_offset;
879f9b0a
MD
213 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
214 void *p;
215
216 if (caa_unlikely(!len))
217 return;
218 /*
219 * Underlying layer should never ask for writes across
220 * subbuffers.
221 */
222 CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
223 backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
224 if (caa_unlikely(!backend_pages)) {
225 if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
226 return;
227 }
228 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
229 if (caa_unlikely(!p))
230 return;
231
232 count = lib_ring_buffer_do_strcpy(config, p, src, len);
233 offset += count;
234 /* Padding */
235 if (caa_unlikely(count < len)) {
236 size_t pad_len = len - count;
237
238 p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
239 if (caa_unlikely(!p))
240 return;
241 lib_ring_buffer_do_memset(p, pad, pad_len);
242 }
8936b6c0 243 ctx_private->buf_offset += len;
879f9b0a
MD
244}
245
852c2936
MD
246/*
247 * This accessor counts the number of unread records in a buffer.
248 * It only provides a consistent value if no reads not writes are performed
249 * concurrently.
250 */
251static inline
252unsigned long lib_ring_buffer_get_records_unread(
4cfec15c
MD
253 const struct lttng_ust_lib_ring_buffer_config *config,
254 struct lttng_ust_lib_ring_buffer *buf,
38fae1d3 255 struct lttng_ust_shm_handle *handle)
852c2936 256{
4cfec15c 257 struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
15500a1b 258 unsigned long records_unread = 0, sb_bindex;
852c2936 259 unsigned int i;
5198080d 260 struct lttng_ust_lib_ring_buffer_channel *chan;
852c2936 261
15500a1b
MD
262 chan = shmp(handle, bufb->chan);
263 if (!chan)
264 return 0;
265 for (i = 0; i < chan->backend.num_subbuf; i++) {
266 struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
267 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
268 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
269
270 wsb = shmp_index(handle, bufb->buf_wsb, i);
271 if (!wsb)
272 return 0;
273 sb_bindex = subbuffer_id_get_index(config, wsb->id);
274 rpages = shmp_index(handle, bufb->array, sb_bindex);
275 if (!rpages)
276 return 0;
277 backend_pages = shmp(handle, rpages->shmp);
278 if (!backend_pages)
279 return 0;
280 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
281 }
282 if (config->mode == RING_BUFFER_OVERWRITE) {
15500a1b
MD
283 struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
284 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
285
286 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
287 rpages = shmp_index(handle, bufb->array, sb_bindex);
288 if (!rpages)
289 return 0;
290 backend_pages = shmp(handle, rpages->shmp);
291 if (!backend_pages)
292 return 0;
293 records_unread += v_read(config, &backend_pages->records_unread);
852c2936
MD
294 }
295 return records_unread;
296}
297
e92f3e28 298#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
This page took 0.051604 seconds and 4 git commands to generate.