Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
204141ee | 23 | #include <unistd.h> |
b5b073e2 PMF |
24 | #include <sys/mman.h> |
25 | #include <sys/ipc.h> | |
26 | #include <sys/shm.h> | |
27 | #include <fcntl.h> | |
28 | #include <ust/kernelcompat.h> | |
29 | #include <kcompat/kref.h> | |
30 | #include "buffers.h" | |
31 | #include "channels.h" | |
32 | #include "tracer.h" | |
33 | #include "tracercore.h" | |
34 | #include "usterr.h" | |
35 | ||
36 | static DEFINE_MUTEX(ust_buffers_channels_mutex); | |
37 | static LIST_HEAD(ust_buffers_channels); | |
38 | ||
204141ee PMF |
39 | static int get_n_cpus(void) |
40 | { | |
41 | int result; | |
42 | static int n_cpus = 0; | |
43 | ||
44 | if(n_cpus) { | |
45 | return n_cpus; | |
46 | } | |
47 | ||
a0243ab1 PMF |
48 | /* On Linux, when some processors are offline |
49 | * _SC_NPROCESSORS_CONF counts the offline | |
50 | * processors, whereas _SC_NPROCESSORS_ONLN | |
51 | * does not. If we used _SC_NPROCESSORS_ONLN, | |
52 | * getcpu() could return a value greater than | |
53 | * this sysconf, in which case the arrays | |
54 | * indexed by processor would overflow. | |
55 | */ | |
56 | result = sysconf(_SC_NPROCESSORS_CONF); | |
204141ee PMF |
57 | if(result == -1) { |
58 | return -1; | |
59 | } | |
60 | ||
61 | n_cpus = result; | |
62 | ||
63 | return result; | |
64 | } | |
65 | ||
b5b073e2 PMF |
66 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, |
67 | struct ust_channel *ltt_chan, | |
68 | struct ust_buffer *buf, | |
69 | unsigned int n_subbufs); | |
70 | ||
71 | static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size) | |
72 | { | |
73 | void *ptr; | |
74 | int result; | |
75 | ||
76 | *size = PAGE_ALIGN(*size); | |
77 | ||
78 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
79 | if(result == -1 && errno == EINVAL) { | |
80 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); | |
81 | return -1; | |
82 | } | |
83 | else if(result == -1) { | |
84 | PERROR("shmget"); | |
85 | return -1; | |
86 | } | |
87 | ||
204141ee | 88 | /* FIXME: should have matching call to shmdt */ |
b5b073e2 PMF |
89 | ptr = shmat(buf->shmid, NULL, 0); |
90 | if(ptr == (void *) -1) { | |
91 | perror("shmat"); | |
92 | goto destroy_shmem; | |
93 | } | |
94 | ||
95 | /* Already mark the shared memory for destruction. This will occur only | |
96 | * when all users have detached. | |
97 | */ | |
98 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
99 | if(result == -1) { | |
100 | perror("shmctl"); | |
101 | return -1; | |
102 | } | |
103 | ||
104 | buf->buf_data = ptr; | |
105 | buf->buf_size = *size; | |
106 | ||
107 | return 0; | |
108 | ||
109 | destroy_shmem: | |
110 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
111 | if(result == -1) { | |
112 | perror("shmctl"); | |
113 | } | |
114 | ||
115 | return -1; | |
116 | } | |
117 | ||
204141ee | 118 | int ust_buffers_create_buf(struct ust_channel *channel, int cpu) |
b5b073e2 PMF |
119 | { |
120 | int result; | |
204141ee | 121 | struct ust_buffer *buf = channel->buf[cpu]; |
b5b073e2 | 122 | |
204141ee PMF |
123 | buf->cpu = cpu; |
124 | result = ust_buffers_alloc_buf(buf, &channel->alloc_size); | |
b5b073e2 | 125 | if(result) |
204141ee | 126 | return -1; |
b5b073e2 | 127 | |
204141ee | 128 | buf->chan = channel; |
b5b073e2 | 129 | kref_get(&channel->kref); |
204141ee | 130 | return 0; |
b5b073e2 PMF |
131 | } |
132 | ||
133 | static void ust_buffers_destroy_channel(struct kref *kref) | |
134 | { | |
135 | struct ust_channel *chan = container_of(kref, struct ust_channel, kref); | |
136 | free(chan); | |
137 | } | |
138 | ||
139 | static void ust_buffers_destroy_buf(struct ust_buffer *buf) | |
140 | { | |
141 | struct ust_channel *chan = buf->chan; | |
142 | int result; | |
143 | ||
144 | result = munmap(buf->buf_data, buf->buf_size); | |
145 | if(result == -1) { | |
146 | PERROR("munmap"); | |
147 | } | |
148 | ||
204141ee | 149 | //ust// chan->buf[buf->cpu] = NULL; |
b5b073e2 PMF |
150 | free(buf); |
151 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
152 | } | |
153 | ||
154 | /* called from kref_put */ | |
155 | static void ust_buffers_remove_buf(struct kref *kref) | |
156 | { | |
157 | struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref); | |
158 | ust_buffers_destroy_buf(buf); | |
159 | } | |
160 | ||
204141ee | 161 | int ust_buffers_open_buf(struct ust_channel *chan, int cpu) |
b5b073e2 | 162 | { |
204141ee | 163 | int result; |
b5b073e2 | 164 | |
204141ee PMF |
165 | result = ust_buffers_create_buf(chan, cpu); |
166 | if (result == -1) | |
167 | return -1; | |
b5b073e2 | 168 | |
204141ee | 169 | kref_init(&chan->buf[cpu]->kref); |
b5b073e2 | 170 | |
204141ee PMF |
171 | result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt); |
172 | if(result == -1) | |
173 | return -1; | |
b5b073e2 | 174 | |
204141ee | 175 | return 0; |
b5b073e2 PMF |
176 | |
177 | /* FIXME: decrementally destroy on error? */ | |
178 | } | |
179 | ||
180 | /** | |
181 | * ust_buffers_close_buf - close a channel buffer | |
182 | * @buf: buffer | |
183 | */ | |
184 | static void ust_buffers_close_buf(struct ust_buffer *buf) | |
185 | { | |
186 | kref_put(&buf->kref, ust_buffers_remove_buf); | |
187 | } | |
188 | ||
189 | int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt) | |
190 | { | |
204141ee PMF |
191 | int i; |
192 | int result; | |
193 | ||
b5b073e2 PMF |
194 | if(subbuf_size == 0 || subbuf_cnt == 0) |
195 | return -1; | |
196 | ||
197 | chan->version = UST_CHANNEL_VERSION; | |
198 | chan->subbuf_cnt = subbuf_cnt; | |
199 | chan->subbuf_size = subbuf_size; | |
200 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
201 | chan->alloc_size = FIX_SIZE(subbuf_size * subbuf_cnt); | |
204141ee | 202 | |
b5b073e2 PMF |
203 | kref_init(&chan->kref); |
204 | ||
205 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
206 | for(i=0; i<chan->n_cpus; i++) { |
207 | result = ust_buffers_open_buf(chan, i); | |
208 | if (result == -1) | |
209 | goto error; | |
210 | } | |
b5b073e2 PMF |
211 | list_add(&chan->list, &ust_buffers_channels); |
212 | mutex_unlock(&ust_buffers_channels_mutex); | |
213 | ||
214 | return 0; | |
215 | ||
204141ee PMF |
216 | /* Jump directly inside the loop to close the buffers that were already |
217 | * opened. */ | |
218 | for(; i>=0; i--) { | |
219 | ust_buffers_close_buf(chan->buf[i]); | |
220 | error: | |
221 | } | |
222 | ||
b5b073e2 PMF |
223 | kref_put(&chan->kref, ust_buffers_destroy_channel); |
224 | mutex_unlock(&ust_buffers_channels_mutex); | |
225 | return -1; | |
226 | } | |
227 | ||
228 | void ust_buffers_channel_close(struct ust_channel *chan) | |
229 | { | |
204141ee PMF |
230 | int i; |
231 | if(!chan) | |
b5b073e2 PMF |
232 | return; |
233 | ||
234 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
235 | for(i=0; i<chan->n_cpus; i++) { |
236 | /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't | |
237 | * initialize to NULL so we cannot use this check. Should we? */ | |
238 | //ust// if (chan->buf[i]) | |
239 | ust_buffers_close_buf(chan->buf[i]); | |
240 | } | |
b5b073e2 PMF |
241 | |
242 | list_del(&chan->list); | |
243 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
244 | mutex_unlock(&ust_buffers_channels_mutex); | |
245 | } | |
246 | ||
247 | /* _ust_buffers_write() | |
248 | * | |
249 | * @buf: destination buffer | |
250 | * @offset: offset in destination | |
251 | * @src: source buffer | |
252 | * @len: length of source | |
253 | * @cpy: already copied | |
254 | */ | |
255 | ||
256 | void _ust_buffers_write(struct ust_buffer *buf, size_t offset, | |
257 | const void *src, size_t len, ssize_t cpy) | |
258 | { | |
259 | do { | |
260 | len -= cpy; | |
261 | src += cpy; | |
262 | offset += cpy; | |
204141ee | 263 | |
b5b073e2 PMF |
264 | WARN_ON(offset >= buf->buf_size); |
265 | ||
266 | cpy = min_t(size_t, len, buf->buf_size - offset); | |
267 | ust_buffers_do_copy(buf->buf_data + offset, src, cpy); | |
268 | } while (unlikely(len != cpy)); | |
269 | } | |
270 | ||
b5b073e2 PMF |
271 | void *ltt_buffers_offset_address(struct ust_buffer *buf, size_t offset) |
272 | { | |
273 | return ((char *)buf->buf_data)+offset; | |
274 | } | |
275 | ||
276 | /* | |
277 | * ------- | |
278 | */ | |
279 | ||
280 | /* | |
281 | * Last TSC comparison functions. Check if the current TSC overflows | |
282 | * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc | |
283 | * atomically. | |
284 | */ | |
285 | ||
286 | /* FIXME: does this test work properly? */ | |
287 | #if (BITS_PER_LONG == 32) | |
c9dab68a | 288 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
289 | u64 tsc) |
290 | { | |
291 | ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS); | |
292 | } | |
293 | ||
c9dab68a | 294 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
295 | u64 tsc) |
296 | { | |
297 | unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS); | |
298 | ||
299 | if (unlikely((tsc_shifted - ltt_buf->last_tsc))) | |
300 | return 1; | |
301 | else | |
302 | return 0; | |
303 | } | |
304 | #else | |
305 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, | |
306 | u64 tsc) | |
307 | { | |
308 | ltt_buf->last_tsc = (unsigned long)tsc; | |
309 | } | |
310 | ||
311 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, | |
312 | u64 tsc) | |
313 | { | |
314 | if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS)) | |
315 | return 1; | |
316 | else | |
317 | return 0; | |
318 | } | |
319 | #endif | |
320 | ||
321 | /* | |
322 | * A switch is done during tracing or as a final flush after tracing (so it | |
323 | * won't write in the new sub-buffer). | |
324 | */ | |
325 | enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH }; | |
326 | ||
204141ee | 327 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu); |
b5b073e2 PMF |
328 | |
329 | static void ltt_force_switch(struct ust_buffer *buf, | |
330 | enum force_switch_mode mode); | |
331 | ||
332 | /* | |
333 | * Trace callbacks | |
334 | */ | |
335 | static void ltt_buffer_begin_callback(struct ust_buffer *buf, | |
336 | u64 tsc, unsigned int subbuf_idx) | |
337 | { | |
338 | struct ust_channel *channel = buf->chan; | |
339 | struct ltt_subbuffer_header *header = | |
340 | (struct ltt_subbuffer_header *) | |
341 | ltt_buffers_offset_address(buf, | |
342 | subbuf_idx * buf->chan->subbuf_size); | |
343 | ||
344 | header->cycle_count_begin = tsc; | |
345 | header->lost_size = 0xFFFFFFFF; /* for debugging */ | |
346 | header->buf_size = buf->chan->subbuf_size; | |
347 | ltt_write_trace_header(channel->trace, header); | |
348 | } | |
349 | ||
350 | /* | |
351 | * offset is assumed to never be 0 here : never deliver a completely empty | |
352 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
353 | */ | |
354 | static notrace void ltt_buffer_end_callback(struct ust_buffer *buf, | |
355 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) | |
356 | { | |
357 | struct ltt_subbuffer_header *header = | |
358 | (struct ltt_subbuffer_header *) | |
359 | ltt_buffers_offset_address(buf, | |
360 | subbuf_idx * buf->chan->subbuf_size); | |
361 | ||
362 | header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset), | |
363 | buf->chan); | |
364 | header->cycle_count_end = tsc; | |
365 | header->events_lost = local_read(&buf->events_lost); | |
366 | header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers); | |
367 | ||
368 | } | |
369 | ||
370 | void (*wake_consumer)(void *, int) = NULL; | |
371 | ||
372 | void relay_set_wake_consumer(void (*wake)(void *, int)) | |
373 | { | |
374 | wake_consumer = wake; | |
375 | } | |
376 | ||
377 | void relay_wake_consumer(void *arg, int finished) | |
378 | { | |
379 | if(wake_consumer) | |
380 | wake_consumer(arg, finished); | |
381 | } | |
382 | ||
383 | static notrace void ltt_deliver(struct ust_buffer *buf, unsigned int subbuf_idx, | |
384 | long commit_count) | |
385 | { | |
386 | int result; | |
387 | ||
388 | //ust// #ifdef CONFIG_LTT_VMCORE | |
389 | local_set(&buf->commit_seq[subbuf_idx], commit_count); | |
390 | //ust// #endif | |
391 | ||
392 | /* wakeup consumer */ | |
393 | result = write(buf->data_ready_fd_write, "1", 1); | |
394 | if(result == -1) { | |
395 | PERROR("write (in ltt_relay_buffer_flush)"); | |
396 | ERR("this should never happen!"); | |
397 | } | |
398 | //ust// atomic_set(<t_buf->wakeup_readers, 1); | |
399 | } | |
400 | ||
401 | /* | |
402 | * This function should not be called from NMI interrupt context | |
403 | */ | |
404 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
405 | unsigned int subbuf_idx, | |
406 | long offset) | |
407 | { | |
408 | //ust// struct ltt_channel_struct *ltt_channel = | |
409 | //ust// (struct ltt_channel_struct *)buf->chan->private_data; | |
410 | //ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf; | |
411 | //ust// | |
412 | //ust// ltt_relay_wake_writers(ltt_buf); | |
413 | } | |
414 | ||
415 | int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old) | |
416 | { | |
417 | struct ust_channel *channel = buf->chan; | |
418 | long consumed_old, consumed_idx, commit_count, write_offset; | |
419 | consumed_old = atomic_long_read(&buf->consumed); | |
420 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); | |
421 | commit_count = local_read(&buf->commit_count[consumed_idx]); | |
422 | /* | |
423 | * Make sure we read the commit count before reading the buffer | |
424 | * data and the write offset. Correct consumed offset ordering | |
425 | * wrt commit count is insured by the use of cmpxchg to update | |
426 | * the consumed offset. | |
427 | */ | |
428 | smp_rmb(); | |
429 | write_offset = local_read(&buf->offset); | |
430 | /* | |
431 | * Check that the subbuffer we are trying to consume has been | |
432 | * already fully committed. | |
433 | */ | |
434 | if (((commit_count - buf->chan->subbuf_size) | |
435 | & channel->commit_count_mask) | |
436 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
437 | >> channel->n_subbufs_order) | |
438 | != 0) { | |
439 | return -EAGAIN; | |
440 | } | |
441 | /* | |
442 | * Check that we are not about to read the same subbuffer in | |
443 | * which the writer head is. | |
444 | */ | |
445 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
446 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
447 | == 0) { | |
448 | return -EAGAIN; | |
449 | } | |
450 | ||
451 | *pconsumed_old = consumed_old; | |
452 | return 0; | |
453 | } | |
454 | ||
455 | int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old) | |
456 | { | |
457 | long consumed_new, consumed_old; | |
458 | ||
459 | consumed_old = atomic_long_read(&buf->consumed); | |
460 | consumed_old = consumed_old & (~0xFFFFFFFFL); | |
461 | consumed_old = consumed_old | uconsumed_old; | |
462 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
463 | ||
464 | //ust// spin_lock(<t_buf->full_lock); | |
465 | if (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
466 | consumed_new) | |
467 | != consumed_old) { | |
468 | /* We have been pushed by the writer : the last | |
469 | * buffer read _is_ corrupted! It can also | |
470 | * happen if this is a buffer we never got. */ | |
471 | //ust// spin_unlock(<t_buf->full_lock); | |
472 | return -EIO; | |
473 | } else { | |
474 | /* tell the client that buffer is now unfull */ | |
475 | int index; | |
476 | long data; | |
477 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
478 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
479 | ltt_buf_unfull(buf, index, data); | |
480 | //ust// spin_unlock(<t_buf->full_lock); | |
481 | } | |
482 | return 0; | |
483 | } | |
484 | ||
485 | static void ltt_relay_print_subbuffer_errors( | |
486 | struct ust_channel *channel, | |
204141ee | 487 | long cons_off, int cpu) |
b5b073e2 | 488 | { |
204141ee | 489 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
490 | long cons_idx, commit_count, write_offset; |
491 | ||
492 | cons_idx = SUBBUF_INDEX(cons_off, channel); | |
493 | commit_count = local_read(<t_buf->commit_count[cons_idx]); | |
494 | /* | |
495 | * No need to order commit_count and write_offset reads because we | |
496 | * execute after trace is stopped when there are no readers left. | |
497 | */ | |
498 | write_offset = local_read(<t_buf->offset); | |
499 | WARN( "LTT : unread channel %s offset is %ld " | |
500 | "and cons_off : %ld\n", | |
501 | channel->channel_name, write_offset, cons_off); | |
502 | /* Check each sub-buffer for non filled commit count */ | |
503 | if (((commit_count - channel->subbuf_size) & channel->commit_count_mask) | |
504 | - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) { | |
505 | ERR("LTT : %s : subbuffer %lu has non filled " | |
506 | "commit count %lu.\n", | |
507 | channel->channel_name, cons_idx, commit_count); | |
508 | } | |
509 | ERR("LTT : %s : commit count : %lu, subbuf size %zd\n", | |
510 | channel->channel_name, commit_count, | |
511 | channel->subbuf_size); | |
512 | } | |
513 | ||
514 | static void ltt_relay_print_errors(struct ltt_trace_struct *trace, | |
204141ee | 515 | struct ust_channel *channel, int cpu) |
b5b073e2 | 516 | { |
204141ee | 517 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
518 | long cons_off; |
519 | ||
4292ed8a PMF |
520 | /* |
521 | * Can be called in the error path of allocation when | |
522 | * trans_channel_data is not yet set. | |
523 | */ | |
524 | if (!channel) | |
525 | return; | |
526 | ||
b5b073e2 PMF |
527 | for (cons_off = atomic_long_read(<t_buf->consumed); |
528 | (SUBBUF_TRUNC(local_read(<t_buf->offset), | |
529 | channel) | |
530 | - cons_off) > 0; | |
531 | cons_off = SUBBUF_ALIGN(cons_off, channel)) | |
204141ee | 532 | ltt_relay_print_subbuffer_errors(channel, cons_off, cpu); |
b5b073e2 PMF |
533 | } |
534 | ||
204141ee | 535 | static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) |
b5b073e2 PMF |
536 | { |
537 | struct ltt_trace_struct *trace = channel->trace; | |
204141ee | 538 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
539 | |
540 | if (local_read(<t_buf->events_lost)) | |
c1f20530 | 541 | ERR("channel %s: %ld events lost", |
b5b073e2 | 542 | channel->channel_name, |
c1f20530 | 543 | local_read(<t_buf->events_lost)); |
b5b073e2 | 544 | if (local_read(<t_buf->corrupted_subbuffers)) |
c1f20530 | 545 | ERR("channel %s : %ld corrupted subbuffers", |
b5b073e2 | 546 | channel->channel_name, |
c1f20530 | 547 | local_read(<t_buf->corrupted_subbuffers)); |
b5b073e2 | 548 | |
204141ee | 549 | ltt_relay_print_errors(trace, channel, cpu); |
b5b073e2 PMF |
550 | } |
551 | ||
552 | static void ltt_relay_release_channel(struct kref *kref) | |
553 | { | |
554 | struct ust_channel *ltt_chan = container_of(kref, | |
555 | struct ust_channel, kref); | |
556 | free(ltt_chan->buf); | |
557 | } | |
558 | ||
559 | /* | |
560 | * Create ltt buffer. | |
561 | */ | |
562 | //ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace, | |
563 | //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf, | |
564 | //ust// unsigned int cpu, unsigned int n_subbufs) | |
565 | //ust// { | |
566 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
567 | //ust// percpu_ptr(ltt_chan->buf, cpu); | |
568 | //ust// unsigned int j; | |
569 | //ust// | |
570 | //ust// ltt_buf->commit_count = | |
571 | //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs, | |
572 | //ust// GFP_KERNEL, cpu_to_node(cpu)); | |
573 | //ust// if (!ltt_buf->commit_count) | |
574 | //ust// return -ENOMEM; | |
575 | //ust// kref_get(&trace->kref); | |
576 | //ust// kref_get(&trace->ltt_transport_kref); | |
577 | //ust// kref_get(<t_chan->kref); | |
578 | //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size()); | |
579 | //ust// atomic_long_set(<t_buf->consumed, 0); | |
580 | //ust// atomic_long_set(<t_buf->active_readers, 0); | |
581 | //ust// for (j = 0; j < n_subbufs; j++) | |
582 | //ust// local_set(<t_buf->commit_count[j], 0); | |
583 | //ust// init_waitqueue_head(<t_buf->write_wait); | |
584 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
585 | //ust// spin_lock_init(<t_buf->full_lock); | |
586 | //ust// | |
587 | //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
588 | //ust// /* atomic_add made on local variable on data that belongs to | |
589 | //ust// * various CPUs : ok because tracing not started (for this cpu). */ | |
590 | //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]); | |
591 | //ust// | |
592 | //ust// local_set(<t_buf->events_lost, 0); | |
593 | //ust// local_set(<t_buf->corrupted_subbuffers, 0); | |
594 | //ust// | |
595 | //ust// return 0; | |
596 | //ust// } | |
597 | ||
598 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, | |
599 | struct ust_channel *ltt_chan, struct ust_buffer *buf, | |
600 | unsigned int n_subbufs) | |
601 | { | |
602 | unsigned int j; | |
603 | int fds[2]; | |
604 | int result; | |
605 | ||
606 | buf->commit_count = | |
607 | zmalloc(sizeof(buf->commit_count) * n_subbufs); | |
608 | if (!buf->commit_count) | |
609 | return -ENOMEM; | |
610 | kref_get(&trace->kref); | |
611 | kref_get(&trace->ltt_transport_kref); | |
612 | kref_get(<t_chan->kref); | |
613 | local_set(&buf->offset, ltt_subbuffer_header_size()); | |
614 | atomic_long_set(&buf->consumed, 0); | |
615 | atomic_long_set(&buf->active_readers, 0); | |
616 | for (j = 0; j < n_subbufs; j++) | |
617 | local_set(&buf->commit_count[j], 0); | |
618 | //ust// init_waitqueue_head(&buf->write_wait); | |
619 | //ust// atomic_set(&buf->wakeup_readers, 0); | |
620 | //ust// spin_lock_init(&buf->full_lock); | |
621 | ||
622 | ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
623 | ||
624 | local_add(ltt_subbuffer_header_size(), &buf->commit_count[0]); | |
625 | ||
626 | local_set(&buf->events_lost, 0); | |
627 | local_set(&buf->corrupted_subbuffers, 0); | |
628 | ||
629 | result = pipe(fds); | |
630 | if(result == -1) { | |
631 | PERROR("pipe"); | |
632 | return -1; | |
633 | } | |
634 | buf->data_ready_fd_read = fds[0]; | |
635 | buf->data_ready_fd_write = fds[1]; | |
636 | ||
637 | /* FIXME: do we actually need this? */ | |
638 | result = fcntl(fds[0], F_SETFL, O_NONBLOCK); | |
639 | if(result == -1) { | |
640 | PERROR("fcntl"); | |
641 | } | |
642 | ||
643 | //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); | |
644 | //ust// if(!ltt_buf->commit_seq) { | |
645 | //ust// return -1; | |
646 | //ust// } | |
647 | ||
648 | /* FIXME: decrementally destroy on error */ | |
649 | ||
650 | return 0; | |
651 | } | |
652 | ||
653 | /* FIXME: use this function */ | |
204141ee | 654 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu) |
b5b073e2 PMF |
655 | { |
656 | struct ltt_trace_struct *trace = ltt_chan->trace; | |
204141ee | 657 | struct ust_buffer *ltt_buf = ltt_chan->buf[cpu]; |
b5b073e2 PMF |
658 | |
659 | kref_put(<t_chan->trace->ltt_transport_kref, | |
660 | ltt_release_transport); | |
204141ee | 661 | ltt_relay_print_buffer_errors(ltt_chan, cpu); |
b5b073e2 PMF |
662 | //ust// free(ltt_buf->commit_seq); |
663 | kfree(ltt_buf->commit_count); | |
664 | ltt_buf->commit_count = NULL; | |
665 | kref_put(<t_chan->kref, ltt_relay_release_channel); | |
666 | kref_put(&trace->kref, ltt_release_trace); | |
667 | //ust// wake_up_interruptible(&trace->kref_wq); | |
668 | } | |
669 | ||
204141ee | 670 | static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan) |
b5b073e2 PMF |
671 | { |
672 | void *ptr; | |
673 | int result; | |
204141ee PMF |
674 | size_t size; |
675 | int i; | |
b5b073e2 | 676 | |
204141ee | 677 | size = PAGE_ALIGN(1); |
b5b073e2 | 678 | |
204141ee | 679 | for(i=0; i<chan->n_cpus; i++) { |
b5b073e2 | 680 | |
204141ee PMF |
681 | result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); |
682 | if(result == -1) { | |
683 | PERROR("shmget"); | |
684 | goto destroy_previous; | |
685 | } | |
b5b073e2 | 686 | |
204141ee PMF |
687 | /* FIXME: should have matching call to shmdt */ |
688 | ptr = shmat(chan->buf_struct_shmids[i], NULL, 0); | |
689 | if(ptr == (void *) -1) { | |
690 | perror("shmat"); | |
691 | goto destroy_shm; | |
692 | } | |
693 | ||
694 | /* Already mark the shared memory for destruction. This will occur only | |
695 | * when all users have detached. | |
696 | */ | |
697 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
698 | if(result == -1) { | |
699 | perror("shmctl"); | |
700 | goto destroy_previous; | |
701 | } | |
702 | ||
703 | chan->buf[i] = ptr; | |
b5b073e2 PMF |
704 | } |
705 | ||
204141ee | 706 | return 0; |
b5b073e2 | 707 | |
204141ee PMF |
708 | /* Jumping inside this loop occurs from within the other loop above with i as |
709 | * counter, so it unallocates the structures for the cpu = current_i down to | |
710 | * zero. */ | |
711 | for(; i>=0; i--) { | |
712 | destroy_shm: | |
713 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
714 | if(result == -1) { | |
715 | perror("shmctl"); | |
716 | } | |
b5b073e2 | 717 | |
204141ee PMF |
718 | destroy_previous: |
719 | continue; | |
b5b073e2 PMF |
720 | } |
721 | ||
204141ee | 722 | return -1; |
b5b073e2 PMF |
723 | } |
724 | ||
725 | /* | |
726 | * Create channel. | |
727 | */ | |
728 | static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_struct *trace, | |
729 | const char *channel_name, struct ust_channel *ltt_chan, | |
730 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) | |
731 | { | |
b5b073e2 PMF |
732 | int result; |
733 | ||
734 | kref_init(<t_chan->kref); | |
735 | ||
736 | ltt_chan->trace = trace; | |
737 | ltt_chan->buffer_begin = ltt_buffer_begin_callback; | |
738 | ltt_chan->buffer_end = ltt_buffer_end_callback; | |
739 | ltt_chan->overwrite = overwrite; | |
740 | ltt_chan->n_subbufs_order = get_count_order(n_subbufs); | |
741 | ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); | |
204141ee | 742 | ltt_chan->n_cpus = get_n_cpus(); |
b5b073e2 | 743 | //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); |
204141ee PMF |
744 | ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *)); |
745 | if(ltt_chan->buf == NULL) { | |
746 | goto error; | |
747 | } | |
748 | ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int)); | |
749 | if(ltt_chan->buf_struct_shmids == NULL) | |
750 | goto free_buf; | |
b5b073e2 | 751 | |
204141ee PMF |
752 | result = ust_buffers_alloc_channel_buf_structs(ltt_chan); |
753 | if(result != 0) { | |
754 | goto free_buf_struct_shmids; | |
755 | } | |
b5b073e2 | 756 | |
b5b073e2 | 757 | result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs); |
204141ee | 758 | if (result != 0) { |
c1f20530 | 759 | ERR("Cannot open channel for trace %s", trace_name); |
204141ee | 760 | goto unalloc_buf_structs; |
b5b073e2 PMF |
761 | } |
762 | ||
204141ee PMF |
763 | return 0; |
764 | ||
765 | unalloc_buf_structs: | |
766 | /* FIXME: put a call here to unalloc the buf structs! */ | |
767 | ||
768 | free_buf_struct_shmids: | |
769 | free(ltt_chan->buf_struct_shmids); | |
b5b073e2 | 770 | |
204141ee PMF |
771 | free_buf: |
772 | free(ltt_chan->buf); | |
773 | ||
774 | error: | |
775 | return -1; | |
b5b073e2 PMF |
776 | } |
777 | ||
778 | /* | |
779 | * LTTng channel flush function. | |
780 | * | |
781 | * Must be called when no tracing is active in the channel, because of | |
782 | * accesses across CPUs. | |
783 | */ | |
784 | static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) | |
785 | { | |
786 | int result; | |
787 | ||
788 | //ust// buf->finalized = 1; | |
789 | ltt_force_switch(buf, FORCE_FLUSH); | |
790 | ||
791 | result = write(buf->data_ready_fd_write, "1", 1); | |
792 | if(result == -1) { | |
793 | PERROR("write (in ltt_relay_buffer_flush)"); | |
794 | ERR("this should never happen!"); | |
795 | } | |
796 | } | |
797 | ||
798 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) | |
799 | { | |
800 | //ust// unsigned int i; | |
801 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
802 | //ust// | |
803 | //ust// for_each_possible_cpu(i) { | |
804 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
805 | //ust// percpu_ptr(ltt_channel->buf, i); | |
806 | //ust// | |
807 | //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) { | |
808 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
809 | //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); | |
810 | //ust// } | |
811 | //ust// } | |
812 | } | |
813 | ||
204141ee | 814 | static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu) |
b5b073e2 PMF |
815 | { |
816 | // int result; | |
817 | ||
204141ee PMF |
818 | if (channel->buf[cpu]) { |
819 | struct ust_buffer *buf = channel->buf[cpu]; | |
b5b073e2 PMF |
820 | ltt_relay_buffer_flush(buf); |
821 | //ust// ltt_relay_wake_writers(ltt_buf); | |
822 | /* closing the pipe tells the consumer the buffer is finished */ | |
823 | ||
824 | //result = write(ltt_buf->data_ready_fd_write, "D", 1); | |
825 | //if(result == -1) { | |
826 | // PERROR("write (in ltt_relay_finish_buffer)"); | |
827 | // ERR("this should never happen!"); | |
828 | //} | |
829 | close(buf->data_ready_fd_write); | |
830 | } | |
831 | } | |
832 | ||
833 | ||
834 | static void ltt_relay_finish_channel(struct ust_channel *channel) | |
835 | { | |
204141ee | 836 | unsigned int i; |
b5b073e2 | 837 | |
204141ee PMF |
838 | for(i=0; i<channel->n_cpus; i++) { |
839 | ltt_relay_finish_buffer(channel, i); | |
840 | } | |
b5b073e2 PMF |
841 | } |
842 | ||
843 | static void ltt_relay_remove_channel(struct ust_channel *channel) | |
844 | { | |
845 | ust_buffers_channel_close(channel); | |
846 | kref_put(&channel->kref, ltt_relay_release_channel); | |
847 | } | |
848 | ||
849 | struct ltt_reserve_switch_offsets { | |
850 | long begin, end, old; | |
851 | long begin_switch, end_switch_current, end_switch_old; | |
852 | long commit_count, reserve_commit_diff; | |
853 | size_t before_hdr_pad, size; | |
854 | }; | |
855 | ||
856 | /* | |
857 | * Returns : | |
858 | * 0 if ok | |
859 | * !0 if execution must be aborted. | |
860 | */ | |
861 | static inline int ltt_relay_try_reserve( | |
862 | struct ust_channel *channel, struct ust_buffer *buf, | |
863 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
864 | u64 *tsc, unsigned int *rflags, int largest_align) | |
865 | { | |
866 | offsets->begin = local_read(&buf->offset); | |
867 | offsets->old = offsets->begin; | |
868 | offsets->begin_switch = 0; | |
869 | offsets->end_switch_current = 0; | |
870 | offsets->end_switch_old = 0; | |
871 | ||
872 | *tsc = trace_clock_read64(); | |
873 | if (last_tsc_overflow(buf, *tsc)) | |
874 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
875 | ||
876 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { | |
877 | offsets->begin_switch = 1; /* For offsets->begin */ | |
878 | } else { | |
879 | offsets->size = ust_get_header_size(channel, | |
880 | offsets->begin, data_size, | |
881 | &offsets->before_hdr_pad, *rflags); | |
882 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
883 | largest_align) | |
884 | + data_size; | |
885 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
886 | > buf->chan->subbuf_size) { | |
887 | offsets->end_switch_old = 1; /* For offsets->old */ | |
888 | offsets->begin_switch = 1; /* For offsets->begin */ | |
889 | } | |
890 | } | |
891 | if (offsets->begin_switch) { | |
892 | long subbuf_index; | |
893 | ||
894 | if (offsets->end_switch_old) | |
895 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
896 | buf->chan); | |
897 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
898 | /* Test new buffer integrity */ | |
899 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
900 | offsets->reserve_commit_diff = | |
901 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
902 | >> channel->n_subbufs_order) | |
903 | - (local_read(&buf->commit_count[subbuf_index]) | |
904 | & channel->commit_count_mask); | |
905 | if (offsets->reserve_commit_diff == 0) { | |
906 | long consumed; | |
907 | ||
908 | consumed = atomic_long_read(&buf->consumed); | |
909 | ||
910 | /* Next buffer not corrupted. */ | |
911 | if (!channel->overwrite && | |
912 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
913 | - SUBBUF_TRUNC(consumed, buf->chan)) | |
914 | >= channel->alloc_size) { | |
915 | ||
916 | long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); | |
917 | long commit_count = local_read(&buf->commit_count[consumed_idx]); | |
918 | if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { | |
919 | WARN("Event dropped. Caused by non-committed event."); | |
920 | } | |
921 | else { | |
922 | WARN("Event dropped. Caused by non-consumed buffer."); | |
923 | } | |
924 | /* | |
925 | * We do not overwrite non consumed buffers | |
926 | * and we are full : event is lost. | |
927 | */ | |
928 | local_inc(&buf->events_lost); | |
929 | return -1; | |
930 | } else { | |
931 | /* | |
932 | * next buffer not corrupted, we are either in | |
933 | * overwrite mode or the buffer is not full. | |
934 | * It's safe to write in this new subbuffer. | |
935 | */ | |
936 | } | |
937 | } else { | |
938 | /* | |
939 | * Next subbuffer corrupted. Force pushing reader even | |
940 | * in normal mode. It's safe to write in this new | |
941 | * subbuffer. | |
942 | */ | |
943 | } | |
944 | offsets->size = ust_get_header_size(channel, | |
945 | offsets->begin, data_size, | |
946 | &offsets->before_hdr_pad, *rflags); | |
947 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
948 | largest_align) | |
949 | + data_size; | |
950 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
951 | > buf->chan->subbuf_size) { | |
952 | /* | |
953 | * Event too big for subbuffers, report error, don't | |
954 | * complete the sub-buffer switch. | |
955 | */ | |
956 | local_inc(&buf->events_lost); | |
957 | return -1; | |
958 | } else { | |
959 | /* | |
960 | * We just made a successful buffer switch and the event | |
961 | * fits in the new subbuffer. Let's write. | |
962 | */ | |
963 | } | |
964 | } else { | |
965 | /* | |
966 | * Event fits in the current buffer and we are not on a switch | |
967 | * boundary. It's safe to write. | |
968 | */ | |
969 | } | |
970 | offsets->end = offsets->begin + offsets->size; | |
971 | ||
972 | if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { | |
973 | /* | |
974 | * The offset_end will fall at the very beginning of the next | |
975 | * subbuffer. | |
976 | */ | |
977 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
978 | } | |
979 | return 0; | |
980 | } | |
981 | ||
982 | /* | |
983 | * Returns : | |
984 | * 0 if ok | |
985 | * !0 if execution must be aborted. | |
986 | */ | |
987 | static inline int ltt_relay_try_switch( | |
988 | enum force_switch_mode mode, | |
989 | struct ust_channel *channel, | |
990 | struct ust_buffer *buf, | |
991 | struct ltt_reserve_switch_offsets *offsets, | |
992 | u64 *tsc) | |
993 | { | |
994 | long subbuf_index; | |
995 | ||
996 | offsets->begin = local_read(&buf->offset); | |
997 | offsets->old = offsets->begin; | |
998 | offsets->begin_switch = 0; | |
999 | offsets->end_switch_old = 0; | |
1000 | ||
1001 | *tsc = trace_clock_read64(); | |
1002 | ||
1003 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
1004 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
1005 | offsets->end_switch_old = 1; | |
1006 | } else { | |
1007 | /* we do not have to switch : buffer is empty */ | |
1008 | return -1; | |
1009 | } | |
1010 | if (mode == FORCE_ACTIVE) | |
1011 | offsets->begin += ltt_subbuffer_header_size(); | |
1012 | /* | |
1013 | * Always begin_switch in FORCE_ACTIVE mode. | |
1014 | * Test new buffer integrity | |
1015 | */ | |
1016 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
1017 | offsets->reserve_commit_diff = | |
1018 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
1019 | >> channel->n_subbufs_order) | |
1020 | - (local_read(&buf->commit_count[subbuf_index]) | |
1021 | & channel->commit_count_mask); | |
1022 | if (offsets->reserve_commit_diff == 0) { | |
1023 | /* Next buffer not corrupted. */ | |
1024 | if (mode == FORCE_ACTIVE | |
1025 | && !channel->overwrite | |
1026 | && offsets->begin - atomic_long_read(&buf->consumed) | |
1027 | >= channel->alloc_size) { | |
1028 | /* | |
1029 | * We do not overwrite non consumed buffers and we are | |
1030 | * full : ignore switch while tracing is active. | |
1031 | */ | |
1032 | return -1; | |
1033 | } | |
1034 | } else { | |
1035 | /* | |
1036 | * Next subbuffer corrupted. Force pushing reader even in normal | |
1037 | * mode | |
1038 | */ | |
1039 | } | |
1040 | offsets->end = offsets->begin; | |
1041 | return 0; | |
1042 | } | |
1043 | ||
1044 | static inline void ltt_reserve_push_reader( | |
1045 | struct ust_channel *channel, | |
1046 | struct ust_buffer *buf, | |
1047 | struct ltt_reserve_switch_offsets *offsets) | |
1048 | { | |
1049 | long consumed_old, consumed_new; | |
1050 | ||
1051 | do { | |
1052 | consumed_old = atomic_long_read(&buf->consumed); | |
1053 | /* | |
1054 | * If buffer is in overwrite mode, push the reader consumed | |
1055 | * count if the write position has reached it and we are not | |
1056 | * at the first iteration (don't push the reader farther than | |
1057 | * the writer). This operation can be done concurrently by many | |
1058 | * writers in the same buffer, the writer being at the farthest | |
1059 | * write position sub-buffer index in the buffer being the one | |
1060 | * which will win this loop. | |
1061 | * If the buffer is not in overwrite mode, pushing the reader | |
1062 | * only happens if a sub-buffer is corrupted. | |
1063 | */ | |
1064 | if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) | |
1065 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
1066 | >= channel->alloc_size) | |
1067 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
1068 | else { | |
1069 | consumed_new = consumed_old; | |
1070 | break; | |
1071 | } | |
1072 | } while (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
1073 | consumed_new) != consumed_old); | |
1074 | ||
1075 | if (consumed_old != consumed_new) { | |
1076 | /* | |
1077 | * Reader pushed : we are the winner of the push, we can | |
1078 | * therefore reequilibrate reserve and commit. Atomic increment | |
1079 | * of the commit count permits other writers to play around | |
1080 | * with this variable before us. We keep track of | |
1081 | * corrupted_subbuffers even in overwrite mode : | |
1082 | * we never want to write over a non completely committed | |
1083 | * sub-buffer : possible causes : the buffer size is too low | |
1084 | * compared to the unordered data input, or there is a writer | |
1085 | * that died between the reserve and the commit. | |
1086 | */ | |
1087 | if (offsets->reserve_commit_diff) { | |
1088 | /* | |
1089 | * We have to alter the sub-buffer commit count. | |
1090 | * We do not deliver the previous subbuffer, given it | |
1091 | * was either corrupted or not consumed (overwrite | |
1092 | * mode). | |
1093 | */ | |
1094 | local_add(offsets->reserve_commit_diff, | |
1095 | &buf->commit_count[ | |
1096 | SUBBUF_INDEX(offsets->begin, | |
1097 | buf->chan)]); | |
1098 | if (!channel->overwrite | |
1099 | || offsets->reserve_commit_diff | |
1100 | != channel->subbuf_size) { | |
1101 | /* | |
1102 | * The reserve commit diff was not subbuf_size : | |
1103 | * it means the subbuffer was partly written to | |
1104 | * and is therefore corrupted. If it is multiple | |
1105 | * of subbuffer size and we are in flight | |
1106 | * recorder mode, we are skipping over a whole | |
1107 | * subbuffer. | |
1108 | */ | |
1109 | local_inc(&buf->corrupted_subbuffers); | |
1110 | } | |
1111 | } | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | ||
1116 | /* | |
1117 | * ltt_reserve_switch_old_subbuf: switch old subbuffer | |
1118 | * | |
1119 | * Concurrency safe because we are the last and only thread to alter this | |
1120 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1121 | * alter the offset, alter the reserve_count or call the | |
1122 | * client_buffer_end_callback on this sub-buffer. | |
1123 | * | |
1124 | * The only remaining threads could be the ones with pending commits. They will | |
1125 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1126 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1127 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1128 | * | |
1129 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1130 | * switches in, finding out it's corrupted. The result will be than the old | |
1131 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1132 | * will be declared corrupted too because of the commit count adjustment. | |
1133 | * | |
1134 | * Note : offset_old should never be 0 here. | |
1135 | */ | |
1136 | static inline void ltt_reserve_switch_old_subbuf( | |
1137 | struct ust_channel *channel, | |
1138 | struct ust_buffer *buf, | |
1139 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1140 | { | |
1141 | long oldidx = SUBBUF_INDEX(offsets->old - 1, channel); | |
1142 | ||
1143 | channel->buffer_end(buf, *tsc, offsets->old, oldidx); | |
1144 | /* Must write buffer end before incrementing commit count */ | |
1145 | smp_wmb(); | |
1146 | offsets->commit_count = | |
1147 | local_add_return(channel->subbuf_size | |
1148 | - (SUBBUF_OFFSET(offsets->old - 1, channel) | |
1149 | + 1), | |
1150 | &buf->commit_count[oldidx]); | |
1151 | if ((BUFFER_TRUNC(offsets->old - 1, channel) | |
1152 | >> channel->n_subbufs_order) | |
1153 | - ((offsets->commit_count - channel->subbuf_size) | |
1154 | & channel->commit_count_mask) == 0) | |
1155 | ltt_deliver(buf, oldidx, offsets->commit_count); | |
1156 | } | |
1157 | ||
1158 | /* | |
1159 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
1160 | * | |
1161 | * This code can be executed unordered : writers may already have written to the | |
1162 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
1163 | * that this code is executed before the deliver of this sub-buffer. | |
1164 | */ | |
1165 | static /*inline*/ void ltt_reserve_switch_new_subbuf( | |
1166 | struct ust_channel *channel, | |
1167 | struct ust_buffer *buf, | |
1168 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1169 | { | |
1170 | long beginidx = SUBBUF_INDEX(offsets->begin, channel); | |
1171 | ||
1172 | channel->buffer_begin(buf, *tsc, beginidx); | |
1173 | /* Must write buffer end before incrementing commit count */ | |
1174 | smp_wmb(); | |
1175 | offsets->commit_count = local_add_return(ltt_subbuffer_header_size(), | |
1176 | &buf->commit_count[beginidx]); | |
1177 | /* Check if the written buffer has to be delivered */ | |
1178 | if ((BUFFER_TRUNC(offsets->begin, channel) | |
1179 | >> channel->n_subbufs_order) | |
1180 | - ((offsets->commit_count - channel->subbuf_size) | |
1181 | & channel->commit_count_mask) == 0) | |
1182 | ltt_deliver(buf, beginidx, offsets->commit_count); | |
1183 | } | |
1184 | ||
1185 | ||
1186 | /* | |
1187 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
1188 | * | |
1189 | * Concurrency safe because we are the last and only thread to alter this | |
1190 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1191 | * alter the offset, alter the reserve_count or call the | |
1192 | * client_buffer_end_callback on this sub-buffer. | |
1193 | * | |
1194 | * The only remaining threads could be the ones with pending commits. They will | |
1195 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1196 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1197 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1198 | * | |
1199 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1200 | * switches in, finding out it's corrupted. The result will be than the old | |
1201 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1202 | * will be declared corrupted too because of the commit count adjustment. | |
1203 | */ | |
1204 | static inline void ltt_reserve_end_switch_current( | |
1205 | struct ust_channel *channel, | |
1206 | struct ust_buffer *buf, | |
1207 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1208 | { | |
1209 | long endidx = SUBBUF_INDEX(offsets->end - 1, channel); | |
1210 | ||
1211 | channel->buffer_end(buf, *tsc, offsets->end, endidx); | |
1212 | /* Must write buffer begin before incrementing commit count */ | |
1213 | smp_wmb(); | |
1214 | offsets->commit_count = | |
1215 | local_add_return(channel->subbuf_size | |
1216 | - (SUBBUF_OFFSET(offsets->end - 1, channel) | |
1217 | + 1), | |
1218 | &buf->commit_count[endidx]); | |
1219 | if ((BUFFER_TRUNC(offsets->end - 1, channel) | |
1220 | >> channel->n_subbufs_order) | |
1221 | - ((offsets->commit_count - channel->subbuf_size) | |
1222 | & channel->commit_count_mask) == 0) | |
1223 | ltt_deliver(buf, endidx, offsets->commit_count); | |
1224 | } | |
1225 | ||
1226 | /** | |
1227 | * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. | |
1228 | * @trace: the trace structure to log to. | |
1229 | * @ltt_channel: channel structure | |
1230 | * @transport_data: data structure specific to ltt relay | |
1231 | * @data_size: size of the variable length data to log. | |
1232 | * @slot_size: pointer to total size of the slot (out) | |
1233 | * @buf_offset : pointer to reserved buffer offset (out) | |
1234 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1235 | * @cpu: cpuid | |
1236 | * | |
1237 | * Return : -ENOSPC if not enough space, else returns 0. | |
1238 | * It will take care of sub-buffer switching. | |
1239 | */ | |
1240 | static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace, | |
1241 | struct ust_channel *channel, void **transport_data, | |
1242 | size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, | |
204141ee | 1243 | unsigned int *rflags, int largest_align, int cpu) |
b5b073e2 | 1244 | { |
204141ee | 1245 | struct ust_buffer *buf = *transport_data = channel->buf[cpu]; |
b5b073e2 PMF |
1246 | struct ltt_reserve_switch_offsets offsets; |
1247 | ||
1248 | offsets.reserve_commit_diff = 0; | |
1249 | offsets.size = 0; | |
1250 | ||
1251 | /* | |
1252 | * Perform retryable operations. | |
1253 | */ | |
1254 | if (ltt_nesting > 4) { | |
1255 | local_inc(&buf->events_lost); | |
1256 | return -EPERM; | |
1257 | } | |
1258 | do { | |
1259 | if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, | |
1260 | largest_align)) | |
1261 | return -ENOSPC; | |
1262 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1263 | offsets.end) != offsets.old); | |
1264 | ||
1265 | /* | |
1266 | * Atomically update last_tsc. This update races against concurrent | |
1267 | * atomic updates, but the race will always cause supplementary full TSC | |
1268 | * events, never the opposite (missing a full TSC event when it would be | |
1269 | * needed). | |
1270 | */ | |
1271 | save_last_tsc(buf, *tsc); | |
1272 | ||
1273 | /* | |
1274 | * Push the reader if necessary | |
1275 | */ | |
1276 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1277 | ||
1278 | /* | |
1279 | * Switch old subbuffer if needed. | |
1280 | */ | |
1281 | if (offsets.end_switch_old) | |
1282 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); | |
1283 | ||
1284 | /* | |
1285 | * Populate new subbuffer. | |
1286 | */ | |
1287 | if (offsets.begin_switch) | |
1288 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); | |
1289 | ||
1290 | if (offsets.end_switch_current) | |
1291 | ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); | |
1292 | ||
1293 | *slot_size = offsets.size; | |
1294 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1295 | return 0; | |
1296 | } | |
1297 | ||
1298 | /* | |
1299 | * Force a sub-buffer switch for a per-cpu buffer. This operation is | |
1300 | * completely reentrant : can be called while tracing is active with | |
1301 | * absolutely no lock held. | |
1302 | * | |
1303 | * Note, however, that as a local_cmpxchg is used for some atomic | |
1304 | * operations, this function must be called from the CPU which owns the buffer | |
1305 | * for a ACTIVE flush. | |
1306 | */ | |
1307 | static notrace void ltt_force_switch(struct ust_buffer *buf, | |
1308 | enum force_switch_mode mode) | |
1309 | { | |
1310 | struct ust_channel *channel = buf->chan; | |
1311 | struct ltt_reserve_switch_offsets offsets; | |
1312 | u64 tsc; | |
1313 | ||
1314 | offsets.reserve_commit_diff = 0; | |
1315 | offsets.size = 0; | |
1316 | ||
1317 | /* | |
1318 | * Perform retryable operations. | |
1319 | */ | |
1320 | do { | |
1321 | if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) | |
1322 | return; | |
1323 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1324 | offsets.end) != offsets.old); | |
1325 | ||
1326 | /* | |
1327 | * Atomically update last_tsc. This update races against concurrent | |
1328 | * atomic updates, but the race will always cause supplementary full TSC | |
1329 | * events, never the opposite (missing a full TSC event when it would be | |
1330 | * needed). | |
1331 | */ | |
1332 | save_last_tsc(buf, tsc); | |
1333 | ||
1334 | /* | |
1335 | * Push the reader if necessary | |
1336 | */ | |
1337 | if (mode == FORCE_ACTIVE) | |
1338 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1339 | ||
1340 | /* | |
1341 | * Switch old subbuffer if needed. | |
1342 | */ | |
1343 | if (offsets.end_switch_old) | |
1344 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); | |
1345 | ||
1346 | /* | |
1347 | * Populate new subbuffer. | |
1348 | */ | |
1349 | if (mode == FORCE_ACTIVE) | |
1350 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); | |
1351 | } | |
1352 | ||
b5b073e2 PMF |
1353 | static struct ltt_transport ust_relay_transport = { |
1354 | .name = "ustrelay", | |
1355 | .ops = { | |
1356 | .create_channel = ust_buffers_create_channel, | |
1357 | .finish_channel = ltt_relay_finish_channel, | |
1358 | .remove_channel = ltt_relay_remove_channel, | |
1359 | .wakeup_channel = ltt_relay_async_wakeup_chan, | |
1360 | // .commit_slot = ltt_relay_commit_slot, | |
1361 | .reserve_slot = ltt_relay_reserve_slot, | |
b5b073e2 PMF |
1362 | }, |
1363 | }; | |
1364 | ||
1365 | /* | |
1366 | * for flight recording. must be called after relay_commit. | |
1367 | * This function decrements de subbuffer's lost_size each time the commit count | |
1368 | * reaches back the reserve offset (module subbuffer size). It is useful for | |
1369 | * crash dump. | |
1370 | */ | |
1371 | static /* inline */ void ltt_write_commit_counter(struct ust_buffer *buf, | |
1372 | struct ust_buffer *ltt_buf, | |
1373 | long idx, long buf_offset, long commit_count, size_t data_size) | |
1374 | { | |
1375 | long offset; | |
1376 | long commit_seq_old; | |
1377 | ||
1378 | offset = buf_offset + data_size; | |
1379 | ||
1380 | /* | |
1381 | * SUBBUF_OFFSET includes commit_count_mask. We can simply | |
1382 | * compare the offsets within the subbuffer without caring about | |
1383 | * buffer full/empty mismatch because offset is never zero here | |
1384 | * (subbuffer header and event headers have non-zero length). | |
1385 | */ | |
1386 | if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan))) | |
1387 | return; | |
1388 | ||
1389 | commit_seq_old = local_read(<t_buf->commit_seq[idx]); | |
1390 | while (commit_seq_old < commit_count) | |
1391 | commit_seq_old = local_cmpxchg(<t_buf->commit_seq[idx], | |
1392 | commit_seq_old, commit_count); | |
1393 | } | |
1394 | ||
1395 | /* | |
1396 | * Atomic unordered slot commit. Increments the commit count in the | |
1397 | * specified sub-buffer, and delivers it if necessary. | |
1398 | * | |
1399 | * Parameters: | |
1400 | * | |
1401 | * @ltt_channel : channel structure | |
1402 | * @transport_data: transport-specific data | |
1403 | * @buf_offset : offset following the event header. | |
1404 | * @data_size : size of the event data. | |
1405 | * @slot_size : size of the reserved slot. | |
1406 | */ | |
1407 | /* FIXME: make this function static inline in the .h! */ | |
1408 | /*static*/ /* inline */ notrace void ltt_commit_slot( | |
1409 | struct ust_channel *channel, | |
1410 | void **transport_data, long buf_offset, | |
1411 | size_t data_size, size_t slot_size) | |
1412 | { | |
1413 | struct ust_buffer *buf = *transport_data; | |
1414 | long offset_end = buf_offset; | |
1415 | long endidx = SUBBUF_INDEX(offset_end - 1, channel); | |
1416 | long commit_count; | |
1417 | ||
1418 | /* Must write slot data before incrementing commit count */ | |
1419 | smp_wmb(); | |
1420 | commit_count = local_add_return(slot_size, | |
1421 | &buf->commit_count[endidx]); | |
1422 | /* Check if all commits have been done */ | |
1423 | if ((BUFFER_TRUNC(offset_end - 1, channel) | |
1424 | >> channel->n_subbufs_order) | |
1425 | - ((commit_count - channel->subbuf_size) | |
1426 | & channel->commit_count_mask) == 0) | |
1427 | ltt_deliver(buf, endidx, commit_count); | |
1428 | /* | |
1429 | * Update lost_size for each commit. It's needed only for extracting | |
1430 | * ltt buffers from vmcore, after crash. | |
1431 | */ | |
1432 | ltt_write_commit_counter(buf, buf, endidx, | |
1433 | buf_offset, commit_count, data_size); | |
1434 | } | |
1435 | ||
1436 | ||
1437 | static char initialized = 0; | |
1438 | ||
1439 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1440 | { | |
1441 | if(!initialized) { | |
1442 | ltt_transport_register(&ust_relay_transport); | |
1443 | initialized = 1; | |
1444 | } | |
1445 | } | |
1446 | ||
1447 | static void __attribute__((destructor)) ltt_relay_exit(void) | |
1448 | { | |
1449 | ltt_transport_unregister(&ust_relay_transport); | |
1450 | } |