Use -EIO as tsc value for nmi error (and drop event)
[lttng-modules.git] / lib / ringbuffer / frontend_api.h
1 #ifndef _LINUX_RING_BUFFER_FRONTEND_API_H
2 #define _LINUX_RING_BUFFER_FRONTEND_API_H
3
4 /*
5 * linux/ringbuffer/frontend_api.h
6 *
7 * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 *
9 * Ring Buffer Library Synchronization Header (buffer write API).
10 *
11 * Author:
12 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
13 *
14 * See ring_buffer_frontend.c for more information on wait-free algorithms.
15 * See linux/ringbuffer/frontend.h for channel allocation and read-side API.
16 *
17 * Dual LGPL v2.1/GPL v2 license.
18 */
19
20 #include "../../wrapper/ringbuffer/frontend.h"
21 #include <linux/errno.h>
22
23 /**
24 * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
25 *
26 * Disables preemption (acts as a RCU read-side critical section) and keeps a
27 * ring buffer nesting count as supplementary safety net to ensure tracer client
28 * code will never trigger an endless recursion. Returns the processor ID on
29 * success, -EPERM on failure (nesting count too high).
30 *
31 * asm volatile and "memory" clobber prevent the compiler from moving
32 * instructions out of the ring buffer nesting count. This is required to ensure
33 * that probe side-effects which can cause recursion (e.g. unforeseen traps,
34 * divisions by 0, ...) are triggered within the incremented nesting count
35 * section.
36 */
37 static inline
38 int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
39 {
40 int cpu, nesting;
41
42 rcu_read_lock_sched_notrace();
43 cpu = smp_processor_id();
44 nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
45 barrier();
46
47 if (unlikely(nesting > 4)) {
48 WARN_ON_ONCE(1);
49 per_cpu(lib_ring_buffer_nesting, cpu)--;
50 rcu_read_unlock_sched_notrace();
51 return -EPERM;
52 } else
53 return cpu;
54 }
55
56 /**
57 * lib_ring_buffer_put_cpu - Follows ring buffer reserve/commit.
58 */
59 static inline
60 void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
61 {
62 barrier();
63 __get_cpu_var(lib_ring_buffer_nesting)--;
64 rcu_read_unlock_sched_notrace();
65 }
66
67 /*
68 * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
69 * part of the API per se.
70 *
71 * returns 0 if reserve ok, or 1 if the slow path must be taken.
72 */
73 static inline
74 int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
75 struct lib_ring_buffer_ctx *ctx,
76 unsigned long *o_begin, unsigned long *o_end,
77 unsigned long *o_old, size_t *before_hdr_pad)
78 {
79 struct channel *chan = ctx->chan;
80 struct lib_ring_buffer *buf = ctx->buf;
81 *o_begin = v_read(config, &buf->offset);
82 *o_old = *o_begin;
83
84 ctx->tsc = lib_ring_buffer_clock_read(chan);
85 if ((int64_t) ctx->tsc == -EIO)
86 return 1;
87
88 /*
89 * Prefetch cacheline for read because we have to read the previous
90 * commit counter to increment it and commit seq value to compare it to
91 * the commit counter.
92 */
93 prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
94
95 if (last_tsc_overflow(config, buf, ctx->tsc))
96 ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC;
97
98 if (unlikely(subbuf_offset(*o_begin, chan) == 0))
99 return 1;
100
101 ctx->slot_size = record_header_size(config, chan, *o_begin,
102 ctx->data_size, before_hdr_pad,
103 ctx->rflags, ctx);
104 ctx->slot_size +=
105 lib_ring_buffer_align(*o_begin + ctx->slot_size,
106 ctx->largest_align) + ctx->data_size;
107 if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size)
108 > chan->backend.subbuf_size))
109 return 1;
110
111 /*
112 * Record fits in the current buffer and we are not on a switch
113 * boundary. It's safe to write.
114 */
115 *o_end = *o_begin + ctx->slot_size;
116
117 if (unlikely((subbuf_offset(*o_end, chan)) == 0))
118 /*
119 * The offset_end will fall at the very beginning of the next
120 * subbuffer.
121 */
122 return 1;
123
124 return 0;
125 }
126
127 /**
128 * lib_ring_buffer_reserve - Reserve space in a ring buffer.
129 * @config: ring buffer instance configuration.
130 * @ctx: ring buffer context. (input and output) Must be already initialized.
131 *
132 * Atomic wait-free slot reservation. The reserved space starts at the context
133 * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
134 *
135 * Return :
136 * 0 on success.
137 * -EAGAIN if channel is disabled.
138 * -ENOSPC if event size is too large for packet.
139 * -ENOBUFS if there is currently not enough space in buffer for the event.
140 * -EIO if data cannot be written into the buffer for any other reason.
141 */
142
143 static inline
144 int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
145 struct lib_ring_buffer_ctx *ctx)
146 {
147 struct channel *chan = ctx->chan;
148 struct lib_ring_buffer *buf;
149 unsigned long o_begin, o_end, o_old;
150 size_t before_hdr_pad = 0;
151
152 if (atomic_read(&chan->record_disabled))
153 return -EAGAIN;
154
155 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
156 buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
157 else
158 buf = chan->backend.buf;
159 if (atomic_read(&buf->record_disabled))
160 return -EAGAIN;
161 ctx->buf = buf;
162
163 /*
164 * Perform retryable operations.
165 */
166 if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
167 &o_end, &o_old, &before_hdr_pad)))
168 goto slow_path;
169
170 if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end)
171 != o_old))
172 goto slow_path;
173
174 /*
175 * Atomically update last_tsc. This update races against concurrent
176 * atomic updates, but the race will always cause supplementary full TSC
177 * record headers, never the opposite (missing a full TSC record header
178 * when it would be needed).
179 */
180 save_last_tsc(config, ctx->buf, ctx->tsc);
181
182 /*
183 * Push the reader if necessary
184 */
185 lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1);
186
187 /*
188 * Clear noref flag for this subbuffer.
189 */
190 lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
191 subbuf_index(o_end - 1, chan));
192
193 ctx->pre_offset = o_begin;
194 ctx->buf_offset = o_begin + before_hdr_pad;
195 return 0;
196 slow_path:
197 return lib_ring_buffer_reserve_slow(ctx);
198 }
199
200 /**
201 * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
202 * @config: ring buffer instance configuration.
203 * @buf: buffer
204 * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
205 *
206 * This operation is completely reentrant : can be called while tracing is
207 * active with absolutely no lock held.
208 *
209 * Note, however, that as a v_cmpxchg is used for some atomic operations and
210 * requires to be executed locally for per-CPU buffers, this function must be
211 * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
212 * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
213 */
214 static inline
215 void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
216 struct lib_ring_buffer *buf, enum switch_mode mode)
217 {
218 lib_ring_buffer_switch_slow(buf, mode);
219 }
220
221 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
222
223 /**
224 * lib_ring_buffer_commit - Commit an record.
225 * @config: ring buffer instance configuration.
226 * @ctx: ring buffer context. (input arguments only)
227 *
228 * Atomic unordered slot commit. Increments the commit count in the
229 * specified sub-buffer, and delivers it if necessary.
230 */
231 static inline
232 void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
233 const struct lib_ring_buffer_ctx *ctx)
234 {
235 struct channel *chan = ctx->chan;
236 struct lib_ring_buffer *buf = ctx->buf;
237 unsigned long offset_end = ctx->buf_offset;
238 unsigned long endidx = subbuf_index(offset_end - 1, chan);
239 unsigned long commit_count;
240
241 /*
242 * Must count record before incrementing the commit count.
243 */
244 subbuffer_count_record(config, &buf->backend, endidx);
245
246 /*
247 * Order all writes to buffer before the commit count update that will
248 * determine that the subbuffer is full.
249 */
250 if (config->ipi == RING_BUFFER_IPI_BARRIER) {
251 /*
252 * Must write slot data before incrementing commit count. This
253 * compiler barrier is upgraded into a smp_mb() by the IPI sent
254 * by get_subbuf().
255 */
256 barrier();
257 } else
258 smp_wmb();
259
260 v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
261
262 /*
263 * commit count read can race with concurrent OOO commit count updates.
264 * This is only needed for lib_ring_buffer_check_deliver (for
265 * non-polling delivery only) and for
266 * lib_ring_buffer_write_commit_counter. The race can only cause the
267 * counter to be read with the same value more than once, which could
268 * cause :
269 * - Multiple delivery for the same sub-buffer (which is handled
270 * gracefully by the reader code) if the value is for a full
271 * sub-buffer. It's important that we can never miss a sub-buffer
272 * delivery. Re-reading the value after the v_add ensures this.
273 * - Reading a commit_count with a higher value that what was actually
274 * added to it for the lib_ring_buffer_write_commit_counter call
275 * (again caused by a concurrent committer). It does not matter,
276 * because this function is interested in the fact that the commit
277 * count reaches back the reserve offset for a specific sub-buffer,
278 * which is completely independent of the order.
279 */
280 commit_count = v_read(config, &buf->commit_hot[endidx].cc);
281
282 lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
283 commit_count, endidx);
284 /*
285 * Update used size at each commit. It's needed only for extracting
286 * ring_buffer buffers from vmcore, after crash.
287 */
288 lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
289 ctx->buf_offset, commit_count,
290 ctx->slot_size);
291 }
292
293 /**
294 * lib_ring_buffer_try_discard_reserve - Try discarding a record.
295 * @config: ring buffer instance configuration.
296 * @ctx: ring buffer context. (input arguments only)
297 *
298 * Only succeeds if no other record has been written after the record to
299 * discard. If discard fails, the record must be committed to the buffer.
300 *
301 * Returns 0 upon success, -EPERM if the record cannot be discarded.
302 */
303 static inline
304 int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config,
305 const struct lib_ring_buffer_ctx *ctx)
306 {
307 struct lib_ring_buffer *buf = ctx->buf;
308 unsigned long end_offset = ctx->pre_offset + ctx->slot_size;
309
310 /*
311 * We need to ensure that if the cmpxchg succeeds and discards the
312 * record, the next record will record a full TSC, because it cannot
313 * rely on the last_tsc associated with the discarded record to detect
314 * overflows. The only way to ensure this is to set the last_tsc to 0
315 * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
316 * timestamp in the next record.
317 *
318 * Note: if discard fails, we must leave the TSC in the record header.
319 * It is needed to keep track of TSC overflows for the following
320 * records.
321 */
322 save_last_tsc(config, buf, 0ULL);
323
324 if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset)
325 != end_offset))
326 return -EPERM;
327 else
328 return 0;
329 }
330
331 static inline
332 void channel_record_disable(const struct lib_ring_buffer_config *config,
333 struct channel *chan)
334 {
335 atomic_inc(&chan->record_disabled);
336 }
337
338 static inline
339 void channel_record_enable(const struct lib_ring_buffer_config *config,
340 struct channel *chan)
341 {
342 atomic_dec(&chan->record_disabled);
343 }
344
345 static inline
346 void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
347 struct lib_ring_buffer *buf)
348 {
349 atomic_inc(&buf->record_disabled);
350 }
351
352 static inline
353 void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
354 struct lib_ring_buffer *buf)
355 {
356 atomic_dec(&buf->record_disabled);
357 }
358
359 #endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
This page took 0.036742 seconds and 5 git commands to generate.