Namespace 'struct channel' under 'lttng_ust_lib_ring_buffer_'
[lttng-ust.git] / include / lttng / ringbuffer-config.h
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer configuration header. Note: after declaring the standard inline
7 * functions, clients should also include linux/ringbuffer/api.h.
8 */
9
10 #ifndef _LTTNG_RING_BUFFER_CONFIG_H
11 #define _LTTNG_RING_BUFFER_CONFIG_H
12
13 #include <errno.h>
14 #include "lttng/ust-tracer.h"
15 #include <stdint.h>
16 #include <stddef.h>
17 #include <urcu/arch.h>
18 #include <string.h>
19 #include "lttng/align.h"
20 #include <lttng/ust-compiler.h>
21
22 struct lttng_ust_lib_ring_buffer;
23 struct lttng_ust_lib_ring_buffer_channel;
24 struct lttng_ust_lib_ring_buffer_config;
25 struct lttng_ust_lib_ring_buffer_ctx;
26 struct lttng_ust_shm_handle;
27
28 /*
29 * Ring buffer client callbacks. Only used by slow path, never on fast path.
30 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
31 * provided as inline functions too. These may simply return 0 if not used by
32 * the client.
33 */
34 struct lttng_ust_lib_ring_buffer_client_cb {
35 /* Mandatory callbacks */
36
37 /* A static inline version is also required for fast path */
38 uint64_t (*ring_buffer_clock_read) (struct lttng_ust_lib_ring_buffer_channel *chan);
39 size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
40 struct lttng_ust_lib_ring_buffer_channel *chan,
41 size_t offset,
42 size_t *pre_header_padding,
43 struct lttng_ust_lib_ring_buffer_ctx *ctx,
44 void *client_ctx);
45
46 /* Slow path only, at subbuffer switch */
47 size_t (*subbuffer_header_size) (void);
48 void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
49 unsigned int subbuf_idx,
50 struct lttng_ust_shm_handle *handle);
51 void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
52 unsigned int subbuf_idx, unsigned long data_size,
53 struct lttng_ust_shm_handle *handle);
54
55 /* Optional callbacks (can be set to NULL) */
56
57 /* Called at buffer creation/finalize */
58 int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
59 int cpu, const char *name,
60 struct lttng_ust_shm_handle *handle);
61 /*
62 * Clients should guarantee that no new reader handle can be opened
63 * after finalize.
64 */
65 void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
66 void *priv, int cpu,
67 struct lttng_ust_shm_handle *handle);
68
69 /*
70 * Extract header length, payload length and timestamp from event
71 * record. Used by buffer iterators. Timestamp is only used by channel
72 * iterator.
73 */
74 void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
75 struct lttng_ust_lib_ring_buffer_channel *chan,
76 struct lttng_ust_lib_ring_buffer *buf,
77 size_t offset, size_t *header_len,
78 size_t *payload_len, uint64_t *timestamp,
79 struct lttng_ust_shm_handle *handle);
80 /*
81 * Offset and size of content size field in client.
82 */
83 void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
84 size_t *offset, size_t *length);
85 void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
86 size_t *offset, size_t *length);
87 };
88
89 /*
90 * Ring buffer instance configuration.
91 *
92 * Declare as "static const" within the client object to ensure the inline fast
93 * paths can be optimized.
94 *
95 * alloc/sync pairs:
96 *
97 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
98 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
99 * with preemption disabled (lib_ring_buffer_get_cpu() and
100 * lib_ring_buffer_put_cpu()).
101 *
102 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
103 * Per-cpu buffer with global synchronization. Tracing can be performed with
104 * preemption enabled, statistically stays on the local buffers.
105 *
106 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
107 * Should only be used for buffers belonging to a single thread or protected
108 * by mutual exclusion by the client. Note that periodical sub-buffer switch
109 * should be disabled in this kind of configuration.
110 *
111 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
112 * Global shared buffer with global synchronization.
113 *
114 * wakeup:
115 *
116 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
117 * buffers and wake up readers if data is ready. Mainly useful for tracers which
118 * don't want to call into the wakeup code on the tracing path. Use in
119 * combination with "read_timer_interval" channel_create() argument.
120 *
121 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
122 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
123 * for drivers.
124 *
125 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
126 * has the responsibility to perform wakeups.
127 */
128 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
129
130 enum lttng_ust_lib_ring_buffer_alloc_types {
131 RING_BUFFER_ALLOC_PER_CPU,
132 RING_BUFFER_ALLOC_GLOBAL,
133 };
134
135 enum lttng_ust_lib_ring_buffer_sync_types {
136 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
137 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
138 };
139
140 enum lttng_ust_lib_ring_buffer_mode_types {
141 RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
142 RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
143 };
144
145 enum lttng_ust_lib_ring_buffer_output_types {
146 RING_BUFFER_SPLICE,
147 RING_BUFFER_MMAP,
148 RING_BUFFER_READ, /* TODO */
149 RING_BUFFER_ITERATOR,
150 RING_BUFFER_NONE,
151 };
152
153 enum lttng_ust_lib_ring_buffer_backend_types {
154 RING_BUFFER_PAGE,
155 RING_BUFFER_VMAP, /* TODO */
156 RING_BUFFER_STATIC, /* TODO */
157 };
158
159 enum lttng_ust_lib_ring_buffer_oops_types {
160 RING_BUFFER_NO_OOPS_CONSISTENCY,
161 RING_BUFFER_OOPS_CONSISTENCY,
162 };
163
164 enum lttng_ust_lib_ring_buffer_ipi_types {
165 RING_BUFFER_IPI_BARRIER,
166 RING_BUFFER_NO_IPI_BARRIER,
167 };
168
169 enum lttng_ust_lib_ring_buffer_wakeup_types {
170 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
171 RING_BUFFER_WAKEUP_BY_WRITER, /*
172 * writer wakes up reader,
173 * not lock-free
174 * (takes spinlock).
175 */
176 };
177
178 struct lttng_ust_lib_ring_buffer_config {
179 enum lttng_ust_lib_ring_buffer_alloc_types alloc;
180 enum lttng_ust_lib_ring_buffer_sync_types sync;
181 enum lttng_ust_lib_ring_buffer_mode_types mode;
182 enum lttng_ust_lib_ring_buffer_output_types output;
183 enum lttng_ust_lib_ring_buffer_backend_types backend;
184 enum lttng_ust_lib_ring_buffer_oops_types oops;
185 enum lttng_ust_lib_ring_buffer_ipi_types ipi;
186 enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
187 /*
188 * tsc_bits: timestamp bits saved at each record.
189 * 0 and 64 disable the timestamp compression scheme.
190 */
191 unsigned int tsc_bits;
192 struct lttng_ust_lib_ring_buffer_client_cb cb;
193 /*
194 * client_type is used by the consumer process (which is in a
195 * different address space) to lookup the appropriate client
196 * callbacks and update the cb pointers.
197 */
198 int client_type;
199 int _unused1;
200 const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
201 char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
202 };
203
204 /*
205 * ring buffer context
206 *
207 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
208 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
209 * lib_ring_buffer_write().
210 *
211 * IMPORTANT: this structure is part of the ABI between the probe and
212 * UST. Fields need to be only added at the end, never reordered, never
213 * removed.
214 */
215 #define LTTNG_UST_RING_BUFFER_CTX_PADDING 64
216 struct lttng_ust_lib_ring_buffer_ctx {
217 uint32_t struct_size; /* Size of this structure. */
218
219 /* input received by lib_ring_buffer_reserve(), saved here. */
220 struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
221 void *priv; /* client private data */
222 struct lttng_ust_shm_handle *handle; /* shared-memory handle */
223 size_t data_size; /* size of payload */
224 int largest_align; /*
225 * alignment of the largest element
226 * in the payload
227 */
228 int cpu; /* processor id */
229
230 /* output from lib_ring_buffer_reserve() */
231 struct lttng_ust_lib_ring_buffer *buf; /*
232 * buffer corresponding to processor id
233 * for this channel
234 */
235 size_t slot_size; /* size of the reserved slot */
236 unsigned long buf_offset; /* offset following the record header */
237 unsigned long pre_offset; /*
238 * Initial offset position _before_
239 * the record is written. Positioned
240 * prior to record header alignment
241 * padding.
242 */
243 uint64_t tsc; /* time-stamp counter value */
244 unsigned int rflags; /* reservation flags */
245 void *ip; /* caller ip address */
246 struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
247 };
248
249 /**
250 * lib_ring_buffer_ctx_init - initialize ring buffer context
251 * @ctx: ring buffer context to initialize
252 * @chan: channel
253 * @priv: client private data
254 * @data_size: size of record data payload
255 * @largest_align: largest alignment within data payload types
256 * @cpu: processor id
257 */
258 static inline lttng_ust_notrace
259 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
260 struct lttng_ust_lib_ring_buffer_channel *chan,
261 void *priv, size_t data_size, int largest_align,
262 int cpu, struct lttng_ust_shm_handle *handle);
263 static inline
264 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx *ctx,
265 struct lttng_ust_lib_ring_buffer_channel *chan,
266 void *priv, size_t data_size, int largest_align,
267 int cpu, struct lttng_ust_shm_handle *handle)
268 {
269 ctx->struct_size = sizeof(struct lttng_ust_lib_ring_buffer_ctx);
270 ctx->chan = chan;
271 ctx->priv = priv;
272 ctx->data_size = data_size;
273 ctx->largest_align = largest_align;
274 ctx->cpu = cpu;
275 ctx->rflags = 0;
276 ctx->handle = handle;
277 ctx->ip = 0;
278 }
279
280 /*
281 * Reservation flags.
282 *
283 * RING_BUFFER_RFLAG_FULL_TSC
284 *
285 * This flag is passed to record_header_size() and to the primitive used to
286 * write the record header. It indicates that the full 64-bit time value is
287 * needed in the record header. If this flag is not set, the record header needs
288 * only to contain "tsc_bits" bit of time value.
289 *
290 * Reservation flags can be added by the client, starting from
291 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
292 * record_header_size() to lib_ring_buffer_write_record_header().
293 */
294 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
295 #define RING_BUFFER_RFLAG_END (1U << 1)
296
297 /*
298 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
299 * compile-time. We have to duplicate the "config->align" information and the
300 * definition here because config->align is used both in the slow and fast
301 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
302 */
303 #ifdef RING_BUFFER_ALIGN
304
305 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
306
307 /*
308 * Calculate the offset needed to align the type.
309 * size_of_type must be non-zero.
310 */
311 static inline lttng_ust_notrace
312 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
313 static inline
314 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
315 {
316 return lttng_ust_offset_align(align_drift, size_of_type);
317 }
318
319 #else
320
321 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
322
323 /*
324 * Calculate the offset needed to align the type.
325 * size_of_type must be non-zero.
326 */
327 static inline lttng_ust_notrace
328 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type);
329 static inline
330 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
331 {
332 return 0;
333 }
334
335 #endif
336
337 /**
338 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
339 * @ctx: ring buffer context.
340 */
341 static inline lttng_ust_notrace
342 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
343 size_t alignment);
344 static inline
345 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
346 size_t alignment)
347 {
348 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
349 alignment);
350 }
351
352 /*
353 * lib_ring_buffer_check_config() returns 0 on success.
354 * Used internally to check for valid configurations at channel creation.
355 */
356 static inline lttng_ust_notrace
357 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
358 unsigned int switch_timer_interval,
359 unsigned int read_timer_interval);
360 static inline
361 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
362 unsigned int switch_timer_interval,
363 unsigned int read_timer_interval)
364 {
365 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
366 && config->sync == RING_BUFFER_SYNC_PER_CPU
367 && switch_timer_interval)
368 return -EINVAL;
369 return 0;
370 }
371
372 #endif /* _LTTNG_RING_BUFFER_CONFIG_H */
This page took 0.038091 seconds and 4 git commands to generate.