License cleanup, ifdef namespace cleanup
[lttng-modules.git] / lib / ringbuffer / config.h
1 #ifndef _LIB_RING_BUFFER_CONFIG_H
2 #define _LIB_RING_BUFFER_CONFIG_H
3
4 /*
5 * lib/ringbuffer/config.h
6 *
7 * Ring buffer configuration header. Note: after declaring the standard inline
8 * functions, clients should also include linux/ringbuffer/api.h.
9 *
10 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; only
15 * version 2.1 of the License.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <linux/types.h>
28 #include <linux/percpu.h>
29 #include "../align.h"
30
31 struct lib_ring_buffer;
32 struct channel;
33 struct lib_ring_buffer_config;
34 struct lib_ring_buffer_ctx;
35
36 /*
37 * Ring buffer client callbacks. Only used by slow path, never on fast path.
38 * For the fast path, record_header_size(), ring_buffer_clock_read() should be
39 * provided as inline functions too. These may simply return 0 if not used by
40 * the client.
41 */
42 struct lib_ring_buffer_client_cb {
43 /* Mandatory callbacks */
44
45 /* A static inline version is also required for fast path */
46 u64 (*ring_buffer_clock_read) (struct channel *chan);
47 size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
48 struct channel *chan, size_t offset,
49 size_t *pre_header_padding,
50 struct lib_ring_buffer_ctx *ctx);
51
52 /* Slow path only, at subbuffer switch */
53 size_t (*subbuffer_header_size) (void);
54 void (*buffer_begin) (struct lib_ring_buffer *buf, u64 tsc,
55 unsigned int subbuf_idx);
56 void (*buffer_end) (struct lib_ring_buffer *buf, u64 tsc,
57 unsigned int subbuf_idx, unsigned long data_size);
58
59 /* Optional callbacks (can be set to NULL) */
60
61 /* Called at buffer creation/finalize */
62 int (*buffer_create) (struct lib_ring_buffer *buf, void *priv,
63 int cpu, const char *name);
64 /*
65 * Clients should guarantee that no new reader handle can be opened
66 * after finalize.
67 */
68 void (*buffer_finalize) (struct lib_ring_buffer *buf, void *priv, int cpu);
69
70 /*
71 * Extract header length, payload length and timestamp from event
72 * record. Used by buffer iterators. Timestamp is only used by channel
73 * iterator.
74 */
75 void (*record_get) (const struct lib_ring_buffer_config *config,
76 struct channel *chan, struct lib_ring_buffer *buf,
77 size_t offset, size_t *header_len,
78 size_t *payload_len, u64 *timestamp);
79 };
80
81 /*
82 * Ring buffer instance configuration.
83 *
84 * Declare as "static const" within the client object to ensure the inline fast
85 * paths can be optimized.
86 *
87 * alloc/sync pairs:
88 *
89 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
90 * Per-cpu buffers with per-cpu synchronization. Tracing must be performed
91 * with preemption disabled (lib_ring_buffer_get_cpu() and
92 * lib_ring_buffer_put_cpu()).
93 *
94 * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
95 * Per-cpu buffer with global synchronization. Tracing can be performed with
96 * preemption enabled, statistically stays on the local buffers.
97 *
98 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
99 * Should only be used for buffers belonging to a single thread or protected
100 * by mutual exclusion by the client. Note that periodical sub-buffer switch
101 * should be disabled in this kind of configuration.
102 *
103 * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
104 * Global shared buffer with global synchronization.
105 *
106 * wakeup:
107 *
108 * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
109 * buffers and wake up readers if data is ready. Mainly useful for tracers which
110 * don't want to call into the wakeup code on the tracing path. Use in
111 * combination with "read_timer_interval" channel_create() argument.
112 *
113 * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
114 * ready to read. Lower latencies before the reader is woken up. Mainly suitable
115 * for drivers.
116 *
117 * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
118 * has the responsibility to perform wakeups.
119 */
120 struct lib_ring_buffer_config {
121 enum {
122 RING_BUFFER_ALLOC_PER_CPU,
123 RING_BUFFER_ALLOC_GLOBAL,
124 } alloc;
125 enum {
126 RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
127 RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
128 } sync;
129 enum {
130 RING_BUFFER_OVERWRITE, /* Overwrite when buffer full */
131 RING_BUFFER_DISCARD, /* Discard when buffer full */
132 } mode;
133 enum {
134 RING_BUFFER_SPLICE,
135 RING_BUFFER_MMAP,
136 RING_BUFFER_READ, /* TODO */
137 RING_BUFFER_ITERATOR,
138 RING_BUFFER_NONE,
139 } output;
140 enum {
141 RING_BUFFER_PAGE,
142 RING_BUFFER_VMAP, /* TODO */
143 RING_BUFFER_STATIC, /* TODO */
144 } backend;
145 enum {
146 RING_BUFFER_NO_OOPS_CONSISTENCY,
147 RING_BUFFER_OOPS_CONSISTENCY,
148 } oops;
149 enum {
150 RING_BUFFER_IPI_BARRIER,
151 RING_BUFFER_NO_IPI_BARRIER,
152 } ipi;
153 enum {
154 RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
155 RING_BUFFER_WAKEUP_BY_WRITER, /*
156 * writer wakes up reader,
157 * not lock-free
158 * (takes spinlock).
159 */
160 } wakeup;
161 /*
162 * tsc_bits: timestamp bits saved at each record.
163 * 0 and 64 disable the timestamp compression scheme.
164 */
165 unsigned int tsc_bits;
166 struct lib_ring_buffer_client_cb cb;
167 };
168
169 /*
170 * ring buffer context
171 *
172 * Context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
173 * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and
174 * lib_ring_buffer_write().
175 */
176 struct lib_ring_buffer_ctx {
177 /* input received by lib_ring_buffer_reserve(), saved here. */
178 struct channel *chan; /* channel */
179 void *priv; /* client private data */
180 size_t data_size; /* size of payload */
181 int largest_align; /*
182 * alignment of the largest element
183 * in the payload
184 */
185 int cpu; /* processor id */
186
187 /* output from lib_ring_buffer_reserve() */
188 struct lib_ring_buffer *buf; /*
189 * buffer corresponding to processor id
190 * for this channel
191 */
192 size_t slot_size; /* size of the reserved slot */
193 unsigned long buf_offset; /* offset following the record header */
194 unsigned long pre_offset; /*
195 * Initial offset position _before_
196 * the record is written. Positioned
197 * prior to record header alignment
198 * padding.
199 */
200 u64 tsc; /* time-stamp counter value */
201 unsigned int rflags; /* reservation flags */
202 };
203
204 /**
205 * lib_ring_buffer_ctx_init - initialize ring buffer context
206 * @ctx: ring buffer context to initialize
207 * @chan: channel
208 * @priv: client private data
209 * @data_size: size of record data payload
210 * @largest_align: largest alignment within data payload types
211 * @cpu: processor id
212 */
213 static inline
214 void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx,
215 struct channel *chan, void *priv,
216 size_t data_size, int largest_align,
217 int cpu)
218 {
219 ctx->chan = chan;
220 ctx->priv = priv;
221 ctx->data_size = data_size;
222 ctx->largest_align = largest_align;
223 ctx->cpu = cpu;
224 ctx->rflags = 0;
225 }
226
227 /*
228 * Reservation flags.
229 *
230 * RING_BUFFER_RFLAG_FULL_TSC
231 *
232 * This flag is passed to record_header_size() and to the primitive used to
233 * write the record header. It indicates that the full 64-bit time value is
234 * needed in the record header. If this flag is not set, the record header needs
235 * only to contain "tsc_bits" bit of time value.
236 *
237 * Reservation flags can be added by the client, starting from
238 * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
239 * record_header_size() to lib_ring_buffer_write_record_header().
240 */
241 #define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
242 #define RING_BUFFER_RFLAG_END (1U << 1)
243
244 /*
245 * We need to define RING_BUFFER_ALIGN_ATTR so it is known early at
246 * compile-time. We have to duplicate the "config->align" information and the
247 * definition here because config->align is used both in the slow and fast
248 * paths, but RING_BUFFER_ALIGN_ATTR is only available for the client code.
249 */
250 #ifdef RING_BUFFER_ALIGN
251
252 # define RING_BUFFER_ALIGN_ATTR /* Default arch alignment */
253
254 /*
255 * Calculate the offset needed to align the type.
256 * size_of_type must be non-zero.
257 */
258 static inline
259 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
260 {
261 return offset_align(align_drift, size_of_type);
262 }
263
264 #else
265
266 # define RING_BUFFER_ALIGN_ATTR __attribute__((packed))
267
268 /*
269 * Calculate the offset needed to align the type.
270 * size_of_type must be non-zero.
271 */
272 static inline
273 unsigned int lib_ring_buffer_align(size_t align_drift, size_t size_of_type)
274 {
275 return 0;
276 }
277
278 #endif
279
280 /**
281 * lib_ring_buffer_align_ctx - Align context offset on "alignment"
282 * @ctx: ring buffer context.
283 */
284 static inline
285 void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx,
286 size_t alignment)
287 {
288 ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset,
289 alignment);
290 }
291
292 /*
293 * lib_ring_buffer_check_config() returns 0 on success.
294 * Used internally to check for valid configurations at channel creation.
295 */
296 static inline
297 int lib_ring_buffer_check_config(const struct lib_ring_buffer_config *config,
298 unsigned int switch_timer_interval,
299 unsigned int read_timer_interval)
300 {
301 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
302 && config->sync == RING_BUFFER_SYNC_PER_CPU
303 && switch_timer_interval)
304 return -EINVAL;
305 return 0;
306 }
307
308 #include "../../wrapper/ringbuffer/vatomic.h"
309
310 #endif /* _LIB_RING_BUFFER_CONFIG_H */
This page took 0.037279 seconds and 5 git commands to generate.