2 * SPDX-License-Identifier: MIT
4 * Copyright (C) 2010 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 * Ring buffer configuration header. Note: after declaring the standard inline
7 * functions, clients should also include linux/ringbuffer/api.h.
10 #ifndef _LTTNG_RING_BUFFER_CONFIG_H
11 #define _LTTNG_RING_BUFFER_CONFIG_H
14 #include "lttng/ust-tracer.h"
17 #include <urcu/arch.h>
19 #include "lttng/align.h"
20 #include <lttng/ust-compiler.h>
22 struct lttng_ust_lib_ring_buffer
;
24 struct lttng_ust_lib_ring_buffer_config
;
25 struct lttng_ust_lib_ring_buffer_ctx
;
26 struct lttng_ust_shm_handle
;
29 * Ring buffer client callbacks. Only used by slow path, never on fast path.
30 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
31 * provided as inline functions too. These may simply return 0 if not used by
34 struct lttng_ust_lib_ring_buffer_client_cb
{
35 /* Mandatory callbacks */
37 /* A static inline version is also required for fast path */
38 uint64_t (*ring_buffer_clock_read
) (struct channel
*chan
);
39 size_t (*record_header_size
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
40 struct channel
*chan
, size_t offset
,
41 size_t *pre_header_padding
,
42 struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
45 /* Slow path only, at subbuffer switch */
46 size_t (*subbuffer_header_size
) (void);
47 void (*buffer_begin
) (struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
,
48 unsigned int subbuf_idx
,
49 struct lttng_ust_shm_handle
*handle
);
50 void (*buffer_end
) (struct lttng_ust_lib_ring_buffer
*buf
, uint64_t tsc
,
51 unsigned int subbuf_idx
, unsigned long data_size
,
52 struct lttng_ust_shm_handle
*handle
);
54 /* Optional callbacks (can be set to NULL) */
56 /* Called at buffer creation/finalize */
57 int (*buffer_create
) (struct lttng_ust_lib_ring_buffer
*buf
, void *priv
,
58 int cpu
, const char *name
,
59 struct lttng_ust_shm_handle
*handle
);
61 * Clients should guarantee that no new reader handle can be opened
64 void (*buffer_finalize
) (struct lttng_ust_lib_ring_buffer
*buf
,
66 struct lttng_ust_shm_handle
*handle
);
69 * Extract header length, payload length and timestamp from event
70 * record. Used by buffer iterators. Timestamp is only used by channel
73 void (*record_get
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
74 struct channel
*chan
, struct lttng_ust_lib_ring_buffer
*buf
,
75 size_t offset
, size_t *header_len
,
76 size_t *payload_len
, uint64_t *timestamp
,
77 struct lttng_ust_shm_handle
*handle
);
79 * Offset and size of content size field in client.
81 void (*content_size_field
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
82 size_t *offset
, size_t *length
);
83 void (*packet_size_field
) (const struct lttng_ust_lib_ring_buffer_config
*config
,
84 size_t *offset
, size_t *length
);
88 * Ring buffer instance configuration.
90 * Declare as "static const" within the client object to ensure the inline fast
91 * paths can be optimized.
95 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
96 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
97 * with preemption disabled (lib_ring_buffer_get_cpu() and
98 * lib_ring_buffer_put_cpu()).
100 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
101 * Per-cpu buffer with global synchronization. Tracing can be performed with
102 * preemption enabled, statistically stays on the local buffers.
104 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
105 * Should only be used for buffers belonging to a single thread or protected
106 * by mutual exclusion by the client. Note that periodical sub-buffer switch
107 * should be disabled in this kind of configuration.
109 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
110 * Global shared buffer with global synchronization.
114 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
115 * buffers and wake up readers if data is ready. Mainly useful for tracers which
116 * don't want to call into the wakeup code on the tracing path. Use in
117 * combination with "read_timer_interval" channel_create() argument.
119 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
120 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
123 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
124 * has the responsibility to perform wakeups.
126 #define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
128 enum lttng_ust_lib_ring_buffer_alloc_types
{
129 RING_BUFFER_ALLOC_PER_CPU
,
130 RING_BUFFER_ALLOC_GLOBAL
,
133 enum lttng_ust_lib_ring_buffer_sync_types
{
134 RING_BUFFER_SYNC_PER_CPU
, /* Wait-free */
135 RING_BUFFER_SYNC_GLOBAL
, /* Lock-free */
138 enum lttng_ust_lib_ring_buffer_mode_types
{
139 RING_BUFFER_OVERWRITE
= 0, /* Overwrite when buffer full */
140 RING_BUFFER_DISCARD
= 1, /* Discard when buffer full */
143 enum lttng_ust_lib_ring_buffer_output_types
{
146 RING_BUFFER_READ
, /* TODO */
147 RING_BUFFER_ITERATOR
,
151 enum lttng_ust_lib_ring_buffer_backend_types
{
153 RING_BUFFER_VMAP
, /* TODO */
154 RING_BUFFER_STATIC
, /* TODO */
157 enum lttng_ust_lib_ring_buffer_oops_types
{
158 RING_BUFFER_NO_OOPS_CONSISTENCY
,
159 RING_BUFFER_OOPS_CONSISTENCY
,
162 enum lttng_ust_lib_ring_buffer_ipi_types
{
163 RING_BUFFER_IPI_BARRIER
,
164 RING_BUFFER_NO_IPI_BARRIER
,
167 enum lttng_ust_lib_ring_buffer_wakeup_types
{
168 RING_BUFFER_WAKEUP_BY_TIMER
, /* wake up performed by timer */
169 RING_BUFFER_WAKEUP_BY_WRITER
, /*
170 * writer wakes up reader,
176 struct lttng_ust_lib_ring_buffer_config
{
177 enum lttng_ust_lib_ring_buffer_alloc_types alloc
;
178 enum lttng_ust_lib_ring_buffer_sync_types sync
;
179 enum lttng_ust_lib_ring_buffer_mode_types mode
;
180 enum lttng_ust_lib_ring_buffer_output_types output
;
181 enum lttng_ust_lib_ring_buffer_backend_types backend
;
182 enum lttng_ust_lib_ring_buffer_oops_types oops
;
183 enum lttng_ust_lib_ring_buffer_ipi_types ipi
;
184 enum lttng_ust_lib_ring_buffer_wakeup_types wakeup
;
186 * tsc_bits: timestamp bits saved at each record.
187 * 0 and 64 disable the timestamp compression scheme.
189 unsigned int tsc_bits
;
190 struct lttng_ust_lib_ring_buffer_client_cb cb
;
192 * client_type is used by the consumer process (which is in a
193 * different address space) to lookup the appropriate client
194 * callbacks and update the cb pointers.
198 const struct lttng_ust_lib_ring_buffer_client_cb
*cb_ptr
;
199 char padding
[LTTNG_UST_RING_BUFFER_CONFIG_PADDING
];
203 * ring buffer context
205 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
206 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
207 * lib_ring_buffer_write().
209 * IMPORTANT: this structure is part of the ABI between the probe and
210 * UST. Fields need to be only added at the end, never reordered, never
213 #define LTTNG_UST_RING_BUFFER_CTX_PADDING 64
214 struct lttng_ust_lib_ring_buffer_ctx
{
215 uint32_t struct_size
; /* Size of this structure. */
217 /* input received by lib_ring_buffer_reserve(), saved here. */
218 struct channel
*chan
; /* channel */
219 void *priv
; /* client private data */
220 struct lttng_ust_shm_handle
*handle
; /* shared-memory handle */
221 size_t data_size
; /* size of payload */
222 int largest_align
; /*
223 * alignment of the largest element
226 int cpu
; /* processor id */
228 /* output from lib_ring_buffer_reserve() */
229 struct lttng_ust_lib_ring_buffer
*buf
; /*
230 * buffer corresponding to processor id
233 size_t slot_size
; /* size of the reserved slot */
234 unsigned long buf_offset
; /* offset following the record header */
235 unsigned long pre_offset
; /*
236 * Initial offset position _before_
237 * the record is written. Positioned
238 * prior to record header alignment
241 uint64_t tsc
; /* time-stamp counter value */
242 unsigned int rflags
; /* reservation flags */
243 void *ip
; /* caller ip address */
244 struct lttng_ust_lib_ring_buffer_backend_pages
*backend_pages
;
248 * lib_ring_buffer_ctx_init - initialize ring buffer context
249 * @ctx: ring buffer context to initialize
251 * @priv: client private data
252 * @data_size: size of record data payload
253 * @largest_align: largest alignment within data payload types
256 static inline lttng_ust_notrace
257 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
258 struct channel
*chan
, void *priv
,
259 size_t data_size
, int largest_align
,
260 int cpu
, struct lttng_ust_shm_handle
*handle
);
262 void lib_ring_buffer_ctx_init(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
263 struct channel
*chan
, void *priv
,
264 size_t data_size
, int largest_align
,
265 int cpu
, struct lttng_ust_shm_handle
*handle
)
267 ctx
->struct_size
= sizeof(struct lttng_ust_lib_ring_buffer_ctx
);
270 ctx
->data_size
= data_size
;
271 ctx
->largest_align
= largest_align
;
274 ctx
->handle
= handle
;
281 * RING_BUFFER_RFLAG_FULL_TSC
283 * This flag is passed to record_header_size() and to the primitive used to
284 * write the record header. It indicates that the full 64-bit time value is
285 * needed in the record header. If this flag is not set, the record header needs
286 * only to contain "tsc_bits" bit of time value.
288 * Reservation flags can be added by the client, starting from
289 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
290 * record_header_size() to lib_ring_buffer_write_record_header().
292 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
293 #define RING_BUFFER_RFLAG_END (1U << 1)
296 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
297 * compile-time. We have to duplicate the "config->align" information and the
298 * definition here because config->align is used both in the slow and fast
299 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
301 #ifdef RING_BUFFER_ALIGN
303 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
306 * Calculate the offset needed to align the type.
307 * size_of_type must be non-zero.
309 static inline lttng_ust_notrace
310 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
);
312 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
)
314 return lttng_ust_offset_align(align_drift
, size_of_type
);
319 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
322 * Calculate the offset needed to align the type.
323 * size_of_type must be non-zero.
325 static inline lttng_ust_notrace
326 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
);
328 unsigned int lib_ring_buffer_align(size_t align_drift
, size_t size_of_type
)
336 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
337 * @ctx: ring buffer context.
339 static inline lttng_ust_notrace
340 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
343 void lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx
*ctx
,
346 ctx
->buf_offset
+= lib_ring_buffer_align(ctx
->buf_offset
,
351 * lib_ring_buffer_check_config() returns 0 on success.
352 * Used internally to check for valid configurations at channel creation.
354 static inline lttng_ust_notrace
355 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config
*config
,
356 unsigned int switch_timer_interval
,
357 unsigned int read_timer_interval
);
359 int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config
*config
,
360 unsigned int switch_timer_interval
,
361 unsigned int read_timer_interval
)
363 if (config
->alloc
== RING_BUFFER_ALLOC_GLOBAL
364 && config
->sync
== RING_BUFFER_SYNC_PER_CPU
365 && switch_timer_interval
)
370 #endif /* _LTTNG_RING_BUFFER_CONFIG_H */