Import lib ring buffer into LTTng modules
[lttng-modules.git] / lib / ringbuffer / frontend.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_H
2 #define _LINUX_RING_BUFFER_FRONTEND_H
3
4 /*
5 * linux/ringbuffer/frontend.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 *
16 * Dual LGPL v2.1/GPL v2 license.
17 */
18
19 #include <linux/pipe_fs_i.h>
20 #include <linux/rcupdate.h>
21 #include <linux/cpumask.h>
22 #include <linux/module.h>
23 #include <linux/bitops.h>
24 #include <linux/splice.h>
25 #include <linux/string.h>
26 #include <linux/timer.h>
27 #include <linux/sched.h>
28 #include <linux/cache.h>
29 #include <linux/time.h>
30 #include <linux/slab.h>
31 #include <linux/init.h>
32 #include <linux/stat.h>
33 #include <linux/cpu.h>
34 #include <linux/fs.h>
35
36 #include <asm/atomic.h>
37 #include <asm/local.h>
38
39 /* Internal helpers */
40 #include "../../wrapper/ringbuffer/frontend_internal.h"
41
42 /* Buffer creation/removal and setup operations */
43
44 /*
45 * switch_timer_interval is the time interval (in us) to fill sub-buffers with
46 * padding to let readers get those sub-buffers. Used for live streaming.
47 *
48 * read_timer_interval is the time interval (in us) to wake up pending readers.
49 *
50 * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
51 * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
52 * be set to NULL for other backends.
53 */
54
55 extern
56 struct channel *channel_create(const struct lib_ring_buffer_config *config,
57 const char *name, void *priv,
58 void *buf_addr,
59 size_t subbuf_size, size_t num_subbuf,
60 unsigned int switch_timer_interval,
61 unsigned int read_timer_interval);
62
63 /*
64 * channel_destroy returns the private data pointer. It finalizes all channel's
65 * buffers, waits for readers to release all references, and destroys the
66 * channel.
67 */
68 extern
69 void *channel_destroy(struct channel *chan);
70
71
72 /* Buffer read operations */
73
74 /*
75 * Iteration on channel cpumask needs to issue a read barrier to match the write
76 * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
77 * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
78 * only performed at channel destruction.
79 */
80 #define for_each_channel_cpu(cpu, chan) \
81 for ((cpu) = -1; \
82 ({ (cpu) = cpumask_next(cpu, (chan)->backend.cpumask); \
83 smp_read_barrier_depends(); (cpu) < nr_cpu_ids; });)
84
85 extern struct lib_ring_buffer *channel_get_ring_buffer(
86 const struct lib_ring_buffer_config *config,
87 struct channel *chan, int cpu);
88 extern int lib_ring_buffer_open_read(struct lib_ring_buffer *buf);
89 extern void lib_ring_buffer_release_read(struct lib_ring_buffer *buf);
90
91 /*
92 * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
93 */
94 extern int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
95 unsigned long *consumed,
96 unsigned long *produced);
97 extern void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
98 unsigned long consumed_new);
99
100 extern int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
101 unsigned long consumed);
102 extern void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf);
103
104 /*
105 * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
106 * to read sub-buffers sequentially.
107 */
108 static inline int lib_ring_buffer_get_next_subbuf(struct lib_ring_buffer *buf)
109 {
110 int ret;
111
112 ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
113 &buf->prod_snapshot);
114 if (ret)
115 return ret;
116 ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot);
117 return ret;
118 }
119
120 static inline void lib_ring_buffer_put_next_subbuf(struct lib_ring_buffer *buf)
121 {
122 lib_ring_buffer_put_subbuf(buf);
123 lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot,
124 buf->backend.chan));
125 }
126
127 extern void channel_reset(struct channel *chan);
128 extern void lib_ring_buffer_reset(struct lib_ring_buffer *buf);
129
130 static inline
131 unsigned long lib_ring_buffer_get_offset(const struct lib_ring_buffer_config *config,
132 struct lib_ring_buffer *buf)
133 {
134 return v_read(config, &buf->offset);
135 }
136
137 static inline
138 unsigned long lib_ring_buffer_get_consumed(const struct lib_ring_buffer_config *config,
139 struct lib_ring_buffer *buf)
140 {
141 return atomic_long_read(&buf->consumed);
142 }
143
144 /*
145 * Must call lib_ring_buffer_is_finalized before reading counters (memory
146 * ordering enforced with respect to trace teardown).
147 */
148 static inline
149 int lib_ring_buffer_is_finalized(const struct lib_ring_buffer_config *config,
150 struct lib_ring_buffer *buf)
151 {
152 int finalized = ACCESS_ONCE(buf->finalized);
153 /*
154 * Read finalized before counters.
155 */
156 smp_rmb();
157 return finalized;
158 }
159
160 static inline
161 unsigned long lib_ring_buffer_get_read_data_size(
162 const struct lib_ring_buffer_config *config,
163 struct lib_ring_buffer *buf)
164 {
165 return subbuffer_get_read_data_size(config, &buf->backend);
166 }
167
168 static inline
169 unsigned long lib_ring_buffer_get_records_count(
170 const struct lib_ring_buffer_config *config,
171 struct lib_ring_buffer *buf)
172 {
173 return v_read(config, &buf->records_count);
174 }
175
176 static inline
177 unsigned long lib_ring_buffer_get_records_overrun(
178 const struct lib_ring_buffer_config *config,
179 struct lib_ring_buffer *buf)
180 {
181 return v_read(config, &buf->records_overrun);
182 }
183
184 static inline
185 unsigned long lib_ring_buffer_get_records_lost_full(
186 const struct lib_ring_buffer_config *config,
187 struct lib_ring_buffer *buf)
188 {
189 return v_read(config, &buf->records_lost_full);
190 }
191
192 static inline
193 unsigned long lib_ring_buffer_get_records_lost_wrap(
194 const struct lib_ring_buffer_config *config,
195 struct lib_ring_buffer *buf)
196 {
197 return v_read(config, &buf->records_lost_wrap);
198 }
199
200 static inline
201 unsigned long lib_ring_buffer_get_records_lost_big(
202 const struct lib_ring_buffer_config *config,
203 struct lib_ring_buffer *buf)
204 {
205 return v_read(config, &buf->records_lost_big);
206 }
207
208 static inline
209 unsigned long lib_ring_buffer_get_records_read(
210 const struct lib_ring_buffer_config *config,
211 struct lib_ring_buffer *buf)
212 {
213 return v_read(config, &buf->backend.records_read);
214 }
215
216 static inline
217 void *channel_get_private(struct channel *chan)
218 {
219 return chan->backend.priv;
220 }
221
222 #endif /* _LINUX_RING_BUFFER_FRONTEND_H */
This page took 0.033463 seconds and 5 git commands to generate.