Fix: Add support for 4.9.27-rt18 kernel
[lttng-modules.git] / lib / ringbuffer / config.h
CommitLineData
886d51a3
MD
1#ifndef _LIB_RING_BUFFER_CONFIG_H
2#define _LIB_RING_BUFFER_CONFIG_H
f3bc08c5
MD
3
4/*
886d51a3 5 * lib/ringbuffer/config.h
f3bc08c5
MD
6 *
7 * Ring buffer configuration header. Note: after declaring the standard inline
8 * functions, clients should also include linux/ringbuffer/api.h.
9 *
886d51a3
MD
10 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; only
15 * version 2.1 of the License.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
f3bc08c5
MD
25 */
26
27#include <linux/types.h>
28#include <linux/percpu.h>
5671a661
MD
29#include <lib/align.h>
30#include <lttng-tracer-core.h>
f3bc08c5
MD
31
32struct lib_ring_buffer;
33struct channel;
34struct lib_ring_buffer_config;
35struct lib_ring_buffer_ctx;
36
37/*
38 * Ring buffer client callbacks. Only used by slow path, never on fast path.
39 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
40 * provided as inline functions too. These may simply return 0 if not used by
41 * the client.
42 */
43struct lib_ring_buffer_client_cb {
44 /* Mandatory callbacks */
45
46 /* A static inline version is also required for fast path */
47 u64 (*ring_buffer_clock_read) (struct channel *chan);
48 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
49 struct channel *chan, size_t offset,
f3bc08c5 50 size_t *pre_header_padding,
f3bc08c5
MD
51 struct lib_ring_buffer_ctx *ctx);
52
53 /* Slow path only, at subbuffer switch */
54 size_t (*subbuffer_header_size) (void);
55 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
56 unsigned int subbuf_idx);
57 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
58 unsigned int subbuf_idx, unsigned long data_size);
59
60 /* Optional callbacks (can be set to NULL) */
61
62 /* Called at buffer creation/finalize */
63 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
64 int cpu, const char *name);
65 /*
66 * Clients should guarantee that no new reader handle can be opened
67 * after finalize.
68 */
69 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
70
71 /*
72 * Extract header length, payload length and timestamp from event
73 * record. Used by buffer iterators. Timestamp is only used by channel
74 * iterator.
75 */
76 void (*record_get) (const struct lib_ring_buffer_config *config,
77 struct channel *chan, struct lib_ring_buffer *buf,
78 size_t offset, size_t *header_len,
79 size_t *payload_len, u64 *timestamp);
80};
81
82/*
83 * Ring buffer instance configuration.
84 *
85 * Declare as "static const" within the client object to ensure the inline fast
86 * paths can be optimized.
87 *
88 * alloc/sync pairs:
89 *
90 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
91 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
92 * with preemption disabled (lib_ring_buffer_get_cpu() and
93 * lib_ring_buffer_put_cpu()).
94 *
95 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
96 * Per-cpu buffer with global synchronization. Tracing can be performed with
97 * preemption enabled, statistically stays on the local buffers.
98 *
99 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
100 * Should only be used for buffers belonging to a single thread or protected
101 * by mutual exclusion by the client. Note that periodical sub-buffer switch
102 * should be disabled in this kind of configuration.
103 *
104 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
105 * Global shared buffer with global synchronization.
106 *
107 * wakeup:
108 *
da9f3fb7 109 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
f3bc08c5
MD
110 * buffers and wake up readers if data is ready. Mainly useful for tracers which
111 * don't want to call into the wakeup code on the tracing path. Use in
112 * combination with "read_timer_interval" channel_create() argument.
113 *
114 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
115 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
116 * for drivers.
117 *
118 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
119 * has the responsibility to perform wakeups.
120 */
121struct lib_ring_buffer_config {
122 enum {
123 RING_BUFFER_ALLOC_PER_CPU,
124 RING_BUFFER_ALLOC_GLOBAL,
125 } alloc;
126 enum {
127 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
128 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
129 } sync;
130 enum {
131 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
132 RING_BUFFER_DISCARD, /* Discard when buffer full */
133 } mode;
134 enum {
135 RING_BUFFER_SPLICE,
136 RING_BUFFER_MMAP,
137 RING_BUFFER_READ, /* TODO */
138 RING_BUFFER_ITERATOR,
139 RING_BUFFER_NONE,
140 } output;
141 enum {
142 RING_BUFFER_PAGE,
143 RING_BUFFER_VMAP, /* TODO */
144 RING_BUFFER_STATIC, /* TODO */
145 } backend;
146 enum {
147 RING_BUFFER_NO_OOPS_CONSISTENCY,
148 RING_BUFFER_OOPS_CONSISTENCY,
149 } oops;
150 enum {
151 RING_BUFFER_IPI_BARRIER,
152 RING_BUFFER_NO_IPI_BARRIER,
153 } ipi;
154 enum {
155 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
156 RING_BUFFER_WAKEUP_BY_WRITER, /*
157 * writer wakes up reader,
158 * not lock-free
159 * (takes spinlock).
160 */
161 } wakeup;
162 /*
163 * tsc_bits: timestamp bits saved at each record.
164 * 0 and 64 disable the timestamp compression scheme.
165 */
166 unsigned int tsc_bits;
167 struct lib_ring_buffer_client_cb cb;
168};
169
170/*
171 * ring buffer context
172 *
173 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
174 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
175 * lib_ring_buffer_write().
176 */
177struct lib_ring_buffer_ctx {
178 /* input received by lib_ring_buffer_reserve(), saved here. */
179 struct channel *chan; /* channel */
180 void *priv; /* client private data */
181 size_t data_size; /* size of payload */
182 int largest_align; /*
183 * alignment of the largest element
184 * in the payload
185 */
186 int cpu; /* processor id */
187
188 /* output from lib_ring_buffer_reserve() */
189 struct lib_ring_buffer *buf; /*
190 * buffer corresponding to processor id
191 * for this channel
192 */
193 size_t slot_size; /* size of the reserved slot */
194 unsigned long buf_offset; /* offset following the record header */
195 unsigned long pre_offset; /*
196 * Initial offset position _before_
197 * the record is written. Positioned
198 * prior to record header alignment
199 * padding.
200 */
201 u64 tsc; /* time-stamp counter value */
202 unsigned int rflags; /* reservation flags */
85a07c33
MD
203 /* Cache backend pages pointer chasing. */
204 struct lib_ring_buffer_backend_pages *backend_pages;
f3bc08c5
MD
205};
206
207/**
208 * lib_ring_buffer_ctx_init - initialize ring buffer context
209 * @ctx: ring buffer context to initialize
210 * @chan: channel
211 * @priv: client private data
3cae7f22 212 * @data_size: size of record data payload. It must be greater than 0.
f3bc08c5
MD
213 * @largest_align: largest alignment within data payload types
214 * @cpu: processor id
215 */
216static inline
217void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
218 struct channel *chan, void *priv,
219 size_t data_size, int largest_align,
220 int cpu)
221{
222 ctx->chan = chan;
223 ctx->priv = priv;
224 ctx->data_size = data_size;
225 ctx->largest_align = largest_align;
226 ctx->cpu = cpu;
64c796d8 227 ctx->rflags = 0;
85a07c33 228 ctx->backend_pages = NULL;
f3bc08c5
MD
229}
230
231/*
232 * Reservation flags.
233 *
234 * RING_BUFFER_RFLAG_FULL_TSC
235 *
236 * This flag is passed to record_header_size() and to the primitive used to
237 * write the record header. It indicates that the full 64-bit time value is
238 * needed in the record header. If this flag is not set, the record header needs
239 * only to contain "tsc_bits" bit of time value.
240 *
241 * Reservation flags can be added by the client, starting from
242 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
243 * record_header_size() to lib_ring_buffer_write_record_header().
244 */
245#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
246#define RING_BUFFER_RFLAG_END (1U << 1)
247
ae090dc5
MD
248#ifndef LTTNG_TRACER_CORE_H
249#error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
250#endif
251
f3bc08c5
MD
252/*
253 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
254 * compile-time. We have to duplicate the "config->align" information and the
255 * definition here because config->align is used both in the slow and fast
256 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
257 */
258#ifdef RING_BUFFER_ALIGN
259
260# define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
261
262/*
263 * Calculate the offset needed to align the type.
264 * size_of_type must be non-zero.
265 */
266static inline
267unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
268{
269 return offset_align(align_drift, size_of_type);
270}
271
272#else
273
274# define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
275
276/*
277 * Calculate the offset needed to align the type.
278 * size_of_type must be non-zero.
279 */
280static inline
281unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
282{
283 return 0;
284}
285
286#endif
287
288/**
289 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
290 * @ctx: ring buffer context.
291 */
292static inline
293void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
294 size_t alignment)
295{
296 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
297 alignment);
298}
299
300/*
301 * lib_ring_buffer_check_config() returns 0 on success.
302 * Used internally to check for valid configurations at channel creation.
303 */
304static inline
305int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
306 unsigned int switch_timer_interval,
307 unsigned int read_timer_interval)
308{
309 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
310 && config->sync == RING_BUFFER_SYNC_PER_CPU
311 && switch_timer_interval)
312 return -EINVAL;
313 return 0;
314}
315
c075712b 316#include <wrapper/ringbuffer/vatomic.h>
f3bc08c5 317
886d51a3 318#endif /* _LIB_RING_BUFFER_CONFIG_H */
This page took 0.043519 seconds and 4 git commands to generate.