05985197ff930ee3b6a889f36abbbc5f6599aa06
[lttng-modules.git] / lib / ringbuffer / config.h
1 #ifndef _LIB_RING_BUFFER_CONFIG_H
2 #define _LIB_RING_BUFFER_CONFIG_H
3
4 /*
5 * lib/ringbuffer/config.h
6 *
7 * Ring buffer configuration header. Note: after declaring the standard inline
8 * functions, clients should also include linux/ringbuffer/api.h.
9 *
10 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; only
15 * version 2.1 of the License.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <linux/types.h>
28 #include <linux/percpu.h>
29 #include <lib/align.h>
30 #include <lttng-tracer-core.h>
31
32 struct lib_ring_buffer;
33 struct channel;
34 struct lib_ring_buffer_config;
35 struct lib_ring_buffer_ctx;
36
37 /*
38 * Ring buffer client callbacks. Only used by slow path, never on fast path.
39 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
40 * provided as inline functions too. These may simply return 0 if not used by
41 * the client.
42 */
43 struct lib_ring_buffer_client_cb {
44 /* Mandatory callbacks */
45
46 /* A static inline version is also required for fast path */
47 u64 (*ring_buffer_clock_read) (struct channel *chan);
48 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
49 struct channel *chan, size_t offset,
50 size_t *pre_header_padding,
51 struct lib_ring_buffer_ctx *ctx,
52 void *client_ctx);
53
54 /* Slow path only, at subbuffer switch */
55 size_t (*subbuffer_header_size) (void);
56 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
57 unsigned int subbuf_idx);
58 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
59 unsigned int subbuf_idx, unsigned long data_size);
60
61 /* Optional callbacks (can be set to NULL) */
62
63 /* Called at buffer creation/finalize */
64 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
65 int cpu, const char *name);
66 /*
67 * Clients should guarantee that no new reader handle can be opened
68 * after finalize.
69 */
70 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
71
72 /*
73 * Extract header length, payload length and timestamp from event
74 * record. Used by buffer iterators. Timestamp is only used by channel
75 * iterator.
76 */
77 void (*record_get) (const struct lib_ring_buffer_config *config,
78 struct channel *chan, struct lib_ring_buffer *buf,
79 size_t offset, size_t *header_len,
80 size_t *payload_len, u64 *timestamp);
81 };
82
83 /*
84 * Ring buffer instance configuration.
85 *
86 * Declare as "static const" within the client object to ensure the inline fast
87 * paths can be optimized.
88 *
89 * alloc/sync pairs:
90 *
91 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
92 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
93 * with preemption disabled (lib_ring_buffer_get_cpu() and
94 * lib_ring_buffer_put_cpu()).
95 *
96 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
97 * Per-cpu buffer with global synchronization. Tracing can be performed with
98 * preemption enabled, statistically stays on the local buffers.
99 *
100 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
101 * Should only be used for buffers belonging to a single thread or protected
102 * by mutual exclusion by the client. Note that periodical sub-buffer switch
103 * should be disabled in this kind of configuration.
104 *
105 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
106 * Global shared buffer with global synchronization.
107 *
108 * wakeup:
109 *
110 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu timers to poll the
111 * buffers and wake up readers if data is ready. Mainly useful for tracers which
112 * don't want to call into the wakeup code on the tracing path. Use in
113 * combination with "read_timer_interval" channel_create() argument.
114 *
115 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
116 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
117 * for drivers.
118 *
119 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
120 * has the responsibility to perform wakeups.
121 */
122 struct lib_ring_buffer_config {
123 enum {
124 RING_BUFFER_ALLOC_PER_CPU,
125 RING_BUFFER_ALLOC_GLOBAL,
126 } alloc;
127 enum {
128 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
129 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
130 } sync;
131 enum {
132 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
133 RING_BUFFER_DISCARD, /* Discard when buffer full */
134 } mode;
135 enum {
136 RING_BUFFER_SPLICE,
137 RING_BUFFER_MMAP,
138 RING_BUFFER_READ, /* TODO */
139 RING_BUFFER_ITERATOR,
140 RING_BUFFER_NONE,
141 } output;
142 enum {
143 RING_BUFFER_PAGE,
144 RING_BUFFER_VMAP, /* TODO */
145 RING_BUFFER_STATIC, /* TODO */
146 } backend;
147 enum {
148 RING_BUFFER_NO_OOPS_CONSISTENCY,
149 RING_BUFFER_OOPS_CONSISTENCY,
150 } oops;
151 enum {
152 RING_BUFFER_IPI_BARRIER,
153 RING_BUFFER_NO_IPI_BARRIER,
154 } ipi;
155 enum {
156 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
157 RING_BUFFER_WAKEUP_BY_WRITER, /*
158 * writer wakes up reader,
159 * not lock-free
160 * (takes spinlock).
161 */
162 } wakeup;
163 /*
164 * tsc_bits: timestamp bits saved at each record.
165 * 0 and 64 disable the timestamp compression scheme.
166 */
167 unsigned int tsc_bits;
168 struct lib_ring_buffer_client_cb cb;
169 };
170
171 /*
172 * ring buffer context
173 *
174 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
175 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
176 * lib_ring_buffer_write().
177 */
178 struct lib_ring_buffer_ctx {
179 /* input received by lib_ring_buffer_reserve(), saved here. */
180 struct channel *chan; /* channel */
181 void *priv; /* client private data */
182 size_t data_size; /* size of payload */
183 int largest_align; /*
184 * alignment of the largest element
185 * in the payload
186 */
187 int cpu; /* processor id */
188
189 /* output from lib_ring_buffer_reserve() */
190 struct lib_ring_buffer *buf; /*
191 * buffer corresponding to processor id
192 * for this channel
193 */
194 size_t slot_size; /* size of the reserved slot */
195 unsigned long buf_offset; /* offset following the record header */
196 unsigned long pre_offset; /*
197 * Initial offset position _before_
198 * the record is written. Positioned
199 * prior to record header alignment
200 * padding.
201 */
202 u64 tsc; /* time-stamp counter value */
203 unsigned int rflags; /* reservation flags */
204 /* Cache backend pages pointer chasing. */
205 struct lib_ring_buffer_backend_pages *backend_pages;
206 };
207
208 /**
209 * lib_ring_buffer_ctx_init - initialize ring buffer context
210 * @ctx: ring buffer context to initialize
211 * @chan: channel
212 * @priv: client private data
213 * @data_size: size of record data payload. It must be greater than 0.
214 * @largest_align: largest alignment within data payload types
215 * @cpu: processor id
216 */
217 static inline
218 void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
219 struct channel *chan, void *priv,
220 size_t data_size, int largest_align,
221 int cpu)
222 {
223 ctx->chan = chan;
224 ctx->priv = priv;
225 ctx->data_size = data_size;
226 ctx->largest_align = largest_align;
227 ctx->cpu = cpu;
228 ctx->rflags = 0;
229 ctx->backend_pages = NULL;
230 }
231
232 /*
233 * Reservation flags.
234 *
235 * RING_BUFFER_RFLAG_FULL_TSC
236 *
237 * This flag is passed to record_header_size() and to the primitive used to
238 * write the record header. It indicates that the full 64-bit time value is
239 * needed in the record header. If this flag is not set, the record header needs
240 * only to contain "tsc_bits" bit of time value.
241 *
242 * Reservation flags can be added by the client, starting from
243 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
244 * record_header_size() to lib_ring_buffer_write_record_header().
245 */
246 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
247 #define RING_BUFFER_RFLAG_END (1U << 1)
248
249 #ifndef LTTNG_TRACER_CORE_H
250 #error "lttng-tracer-core.h is needed for RING_BUFFER_ALIGN define"
251 #endif
252
253 /*
254 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
255 * compile-time. We have to duplicate the "config->align" information and the
256 * definition here because config->align is used both in the slow and fast
257 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
258 */
259 #ifdef RING_BUFFER_ALIGN
260
261 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
262
263 /*
264 * Calculate the offset needed to align the type.
265 * size_of_type must be non-zero.
266 */
267 static inline
268 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
269 {
270 return offset_align(align_drift, size_of_type);
271 }
272
273 #else
274
275 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
276
277 /*
278 * Calculate the offset needed to align the type.
279 * size_of_type must be non-zero.
280 */
281 static inline
282 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
283 {
284 return 0;
285 }
286
287 #endif
288
289 /**
290 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
291 * @ctx: ring buffer context.
292 */
293 static inline
294 void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
295 size_t alignment)
296 {
297 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
298 alignment);
299 }
300
301 /*
302 * lib_ring_buffer_check_config() returns 0 on success.
303 * Used internally to check for valid configurations at channel creation.
304 */
305 static inline
306 int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
307 unsigned int switch_timer_interval,
308 unsigned int read_timer_interval)
309 {
310 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
311 && config->sync == RING_BUFFER_SYNC_PER_CPU
312 && switch_timer_interval)
313 return -EINVAL;
314 return 0;
315 }
316
317 #include <wrapper/ringbuffer/vatomic.h>
318
319 #endif /* _LIB_RING_BUFFER_CONFIG_H */
This page took 0.034856 seconds and 3 git commands to generate.