Commit | Line | Data |
---|---|---|
b5b073e2 PMF |
1 | /* |
2 | * buffers.c | |
3 | * LTTng userspace tracer buffering system | |
4 | * | |
5 | * Copyright (C) 2009 - Pierre-Marc Fournier (pierre-marc dot fournier at polymtl dot ca) | |
6 | * Copyright (C) 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
21 | */ | |
22 | ||
204141ee | 23 | #include <unistd.h> |
b5b073e2 PMF |
24 | #include <sys/mman.h> |
25 | #include <sys/ipc.h> | |
26 | #include <sys/shm.h> | |
27 | #include <fcntl.h> | |
28 | #include <ust/kernelcompat.h> | |
29 | #include <kcompat/kref.h> | |
30 | #include "buffers.h" | |
31 | #include "channels.h" | |
32 | #include "tracer.h" | |
33 | #include "tracercore.h" | |
34 | #include "usterr.h" | |
35 | ||
36 | static DEFINE_MUTEX(ust_buffers_channels_mutex); | |
37 | static LIST_HEAD(ust_buffers_channels); | |
38 | ||
204141ee PMF |
39 | static int get_n_cpus(void) |
40 | { | |
41 | int result; | |
42 | static int n_cpus = 0; | |
43 | ||
44 | if(n_cpus) { | |
45 | return n_cpus; | |
46 | } | |
47 | ||
48 | result = sysconf(_SC_NPROCESSORS_ONLN); | |
49 | if(result == -1) { | |
50 | return -1; | |
51 | } | |
52 | ||
53 | n_cpus = result; | |
54 | ||
55 | return result; | |
56 | } | |
57 | ||
b5b073e2 PMF |
58 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, |
59 | struct ust_channel *ltt_chan, | |
60 | struct ust_buffer *buf, | |
61 | unsigned int n_subbufs); | |
62 | ||
63 | static int ust_buffers_alloc_buf(struct ust_buffer *buf, size_t *size) | |
64 | { | |
65 | void *ptr; | |
66 | int result; | |
67 | ||
68 | *size = PAGE_ALIGN(*size); | |
69 | ||
70 | result = buf->shmid = shmget(getpid(), *size, IPC_CREAT | IPC_EXCL | 0700); | |
71 | if(result == -1 && errno == EINVAL) { | |
72 | ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); | |
73 | return -1; | |
74 | } | |
75 | else if(result == -1) { | |
76 | PERROR("shmget"); | |
77 | return -1; | |
78 | } | |
79 | ||
204141ee | 80 | /* FIXME: should have matching call to shmdt */ |
b5b073e2 PMF |
81 | ptr = shmat(buf->shmid, NULL, 0); |
82 | if(ptr == (void *) -1) { | |
83 | perror("shmat"); | |
84 | goto destroy_shmem; | |
85 | } | |
86 | ||
87 | /* Already mark the shared memory for destruction. This will occur only | |
88 | * when all users have detached. | |
89 | */ | |
90 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
91 | if(result == -1) { | |
92 | perror("shmctl"); | |
93 | return -1; | |
94 | } | |
95 | ||
96 | buf->buf_data = ptr; | |
97 | buf->buf_size = *size; | |
98 | ||
99 | return 0; | |
100 | ||
101 | destroy_shmem: | |
102 | result = shmctl(buf->shmid, IPC_RMID, NULL); | |
103 | if(result == -1) { | |
104 | perror("shmctl"); | |
105 | } | |
106 | ||
107 | return -1; | |
108 | } | |
109 | ||
204141ee | 110 | int ust_buffers_create_buf(struct ust_channel *channel, int cpu) |
b5b073e2 PMF |
111 | { |
112 | int result; | |
204141ee | 113 | struct ust_buffer *buf = channel->buf[cpu]; |
b5b073e2 | 114 | |
204141ee PMF |
115 | buf->cpu = cpu; |
116 | result = ust_buffers_alloc_buf(buf, &channel->alloc_size); | |
b5b073e2 | 117 | if(result) |
204141ee | 118 | return -1; |
b5b073e2 | 119 | |
204141ee | 120 | buf->chan = channel; |
b5b073e2 | 121 | kref_get(&channel->kref); |
204141ee | 122 | return 0; |
b5b073e2 PMF |
123 | } |
124 | ||
125 | static void ust_buffers_destroy_channel(struct kref *kref) | |
126 | { | |
127 | struct ust_channel *chan = container_of(kref, struct ust_channel, kref); | |
128 | free(chan); | |
129 | } | |
130 | ||
131 | static void ust_buffers_destroy_buf(struct ust_buffer *buf) | |
132 | { | |
133 | struct ust_channel *chan = buf->chan; | |
134 | int result; | |
135 | ||
136 | result = munmap(buf->buf_data, buf->buf_size); | |
137 | if(result == -1) { | |
138 | PERROR("munmap"); | |
139 | } | |
140 | ||
204141ee | 141 | //ust// chan->buf[buf->cpu] = NULL; |
b5b073e2 PMF |
142 | free(buf); |
143 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
144 | } | |
145 | ||
146 | /* called from kref_put */ | |
147 | static void ust_buffers_remove_buf(struct kref *kref) | |
148 | { | |
149 | struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref); | |
150 | ust_buffers_destroy_buf(buf); | |
151 | } | |
152 | ||
204141ee | 153 | int ust_buffers_open_buf(struct ust_channel *chan, int cpu) |
b5b073e2 | 154 | { |
204141ee | 155 | int result; |
b5b073e2 | 156 | |
204141ee PMF |
157 | result = ust_buffers_create_buf(chan, cpu); |
158 | if (result == -1) | |
159 | return -1; | |
b5b073e2 | 160 | |
204141ee | 161 | kref_init(&chan->buf[cpu]->kref); |
b5b073e2 | 162 | |
204141ee PMF |
163 | result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt); |
164 | if(result == -1) | |
165 | return -1; | |
b5b073e2 | 166 | |
204141ee | 167 | return 0; |
b5b073e2 PMF |
168 | |
169 | /* FIXME: decrementally destroy on error? */ | |
170 | } | |
171 | ||
172 | /** | |
173 | * ust_buffers_close_buf - close a channel buffer | |
174 | * @buf: buffer | |
175 | */ | |
176 | static void ust_buffers_close_buf(struct ust_buffer *buf) | |
177 | { | |
178 | kref_put(&buf->kref, ust_buffers_remove_buf); | |
179 | } | |
180 | ||
181 | int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt) | |
182 | { | |
204141ee PMF |
183 | int i; |
184 | int result; | |
185 | ||
b5b073e2 PMF |
186 | if(subbuf_size == 0 || subbuf_cnt == 0) |
187 | return -1; | |
188 | ||
189 | chan->version = UST_CHANNEL_VERSION; | |
190 | chan->subbuf_cnt = subbuf_cnt; | |
191 | chan->subbuf_size = subbuf_size; | |
192 | chan->subbuf_size_order = get_count_order(subbuf_size); | |
193 | chan->alloc_size = FIX_SIZE(subbuf_size * subbuf_cnt); | |
204141ee | 194 | |
b5b073e2 PMF |
195 | kref_init(&chan->kref); |
196 | ||
197 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
198 | for(i=0; i<chan->n_cpus; i++) { |
199 | result = ust_buffers_open_buf(chan, i); | |
200 | if (result == -1) | |
201 | goto error; | |
202 | } | |
b5b073e2 PMF |
203 | list_add(&chan->list, &ust_buffers_channels); |
204 | mutex_unlock(&ust_buffers_channels_mutex); | |
205 | ||
206 | return 0; | |
207 | ||
204141ee PMF |
208 | /* Jump directly inside the loop to close the buffers that were already |
209 | * opened. */ | |
210 | for(; i>=0; i--) { | |
211 | ust_buffers_close_buf(chan->buf[i]); | |
212 | error: | |
213 | } | |
214 | ||
b5b073e2 PMF |
215 | kref_put(&chan->kref, ust_buffers_destroy_channel); |
216 | mutex_unlock(&ust_buffers_channels_mutex); | |
217 | return -1; | |
218 | } | |
219 | ||
220 | void ust_buffers_channel_close(struct ust_channel *chan) | |
221 | { | |
204141ee PMF |
222 | int i; |
223 | if(!chan) | |
b5b073e2 PMF |
224 | return; |
225 | ||
226 | mutex_lock(&ust_buffers_channels_mutex); | |
204141ee PMF |
227 | for(i=0; i<chan->n_cpus; i++) { |
228 | /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't | |
229 | * initialize to NULL so we cannot use this check. Should we? */ | |
230 | //ust// if (chan->buf[i]) | |
231 | ust_buffers_close_buf(chan->buf[i]); | |
232 | } | |
b5b073e2 PMF |
233 | |
234 | list_del(&chan->list); | |
235 | kref_put(&chan->kref, ust_buffers_destroy_channel); | |
236 | mutex_unlock(&ust_buffers_channels_mutex); | |
237 | } | |
238 | ||
239 | /* _ust_buffers_write() | |
240 | * | |
241 | * @buf: destination buffer | |
242 | * @offset: offset in destination | |
243 | * @src: source buffer | |
244 | * @len: length of source | |
245 | * @cpy: already copied | |
246 | */ | |
247 | ||
248 | void _ust_buffers_write(struct ust_buffer *buf, size_t offset, | |
249 | const void *src, size_t len, ssize_t cpy) | |
250 | { | |
251 | do { | |
252 | len -= cpy; | |
253 | src += cpy; | |
254 | offset += cpy; | |
204141ee | 255 | |
b5b073e2 PMF |
256 | WARN_ON(offset >= buf->buf_size); |
257 | ||
258 | cpy = min_t(size_t, len, buf->buf_size - offset); | |
259 | ust_buffers_do_copy(buf->buf_data + offset, src, cpy); | |
260 | } while (unlikely(len != cpy)); | |
261 | } | |
262 | ||
b5b073e2 PMF |
263 | void *ltt_buffers_offset_address(struct ust_buffer *buf, size_t offset) |
264 | { | |
265 | return ((char *)buf->buf_data)+offset; | |
266 | } | |
267 | ||
268 | /* | |
269 | * ------- | |
270 | */ | |
271 | ||
272 | /* | |
273 | * Last TSC comparison functions. Check if the current TSC overflows | |
274 | * LTT_TSC_BITS bits from the last TSC read. Reads and writes last_tsc | |
275 | * atomically. | |
276 | */ | |
277 | ||
278 | /* FIXME: does this test work properly? */ | |
279 | #if (BITS_PER_LONG == 32) | |
c9dab68a | 280 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
281 | u64 tsc) |
282 | { | |
283 | ltt_buf->last_tsc = (unsigned long)(tsc >> LTT_TSC_BITS); | |
284 | } | |
285 | ||
c9dab68a | 286 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, |
b5b073e2 PMF |
287 | u64 tsc) |
288 | { | |
289 | unsigned long tsc_shifted = (unsigned long)(tsc >> LTT_TSC_BITS); | |
290 | ||
291 | if (unlikely((tsc_shifted - ltt_buf->last_tsc))) | |
292 | return 1; | |
293 | else | |
294 | return 0; | |
295 | } | |
296 | #else | |
297 | static inline void save_last_tsc(struct ust_buffer *ltt_buf, | |
298 | u64 tsc) | |
299 | { | |
300 | ltt_buf->last_tsc = (unsigned long)tsc; | |
301 | } | |
302 | ||
303 | static inline int last_tsc_overflow(struct ust_buffer *ltt_buf, | |
304 | u64 tsc) | |
305 | { | |
306 | if (unlikely((tsc - ltt_buf->last_tsc) >> LTT_TSC_BITS)) | |
307 | return 1; | |
308 | else | |
309 | return 0; | |
310 | } | |
311 | #endif | |
312 | ||
313 | /* | |
314 | * A switch is done during tracing or as a final flush after tracing (so it | |
315 | * won't write in the new sub-buffer). | |
316 | */ | |
317 | enum force_switch_mode { FORCE_ACTIVE, FORCE_FLUSH }; | |
318 | ||
204141ee | 319 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu); |
b5b073e2 PMF |
320 | |
321 | static void ltt_force_switch(struct ust_buffer *buf, | |
322 | enum force_switch_mode mode); | |
323 | ||
324 | /* | |
325 | * Trace callbacks | |
326 | */ | |
327 | static void ltt_buffer_begin_callback(struct ust_buffer *buf, | |
328 | u64 tsc, unsigned int subbuf_idx) | |
329 | { | |
330 | struct ust_channel *channel = buf->chan; | |
331 | struct ltt_subbuffer_header *header = | |
332 | (struct ltt_subbuffer_header *) | |
333 | ltt_buffers_offset_address(buf, | |
334 | subbuf_idx * buf->chan->subbuf_size); | |
335 | ||
336 | header->cycle_count_begin = tsc; | |
337 | header->lost_size = 0xFFFFFFFF; /* for debugging */ | |
338 | header->buf_size = buf->chan->subbuf_size; | |
339 | ltt_write_trace_header(channel->trace, header); | |
340 | } | |
341 | ||
342 | /* | |
343 | * offset is assumed to never be 0 here : never deliver a completely empty | |
344 | * subbuffer. The lost size is between 0 and subbuf_size-1. | |
345 | */ | |
346 | static notrace void ltt_buffer_end_callback(struct ust_buffer *buf, | |
347 | u64 tsc, unsigned int offset, unsigned int subbuf_idx) | |
348 | { | |
349 | struct ltt_subbuffer_header *header = | |
350 | (struct ltt_subbuffer_header *) | |
351 | ltt_buffers_offset_address(buf, | |
352 | subbuf_idx * buf->chan->subbuf_size); | |
353 | ||
354 | header->lost_size = SUBBUF_OFFSET((buf->chan->subbuf_size - offset), | |
355 | buf->chan); | |
356 | header->cycle_count_end = tsc; | |
357 | header->events_lost = local_read(&buf->events_lost); | |
358 | header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers); | |
359 | ||
360 | } | |
361 | ||
362 | void (*wake_consumer)(void *, int) = NULL; | |
363 | ||
364 | void relay_set_wake_consumer(void (*wake)(void *, int)) | |
365 | { | |
366 | wake_consumer = wake; | |
367 | } | |
368 | ||
369 | void relay_wake_consumer(void *arg, int finished) | |
370 | { | |
371 | if(wake_consumer) | |
372 | wake_consumer(arg, finished); | |
373 | } | |
374 | ||
375 | static notrace void ltt_deliver(struct ust_buffer *buf, unsigned int subbuf_idx, | |
376 | long commit_count) | |
377 | { | |
378 | int result; | |
379 | ||
380 | //ust// #ifdef CONFIG_LTT_VMCORE | |
381 | local_set(&buf->commit_seq[subbuf_idx], commit_count); | |
382 | //ust// #endif | |
383 | ||
384 | /* wakeup consumer */ | |
385 | result = write(buf->data_ready_fd_write, "1", 1); | |
386 | if(result == -1) { | |
387 | PERROR("write (in ltt_relay_buffer_flush)"); | |
388 | ERR("this should never happen!"); | |
389 | } | |
390 | //ust// atomic_set(<t_buf->wakeup_readers, 1); | |
391 | } | |
392 | ||
393 | /* | |
394 | * This function should not be called from NMI interrupt context | |
395 | */ | |
396 | static notrace void ltt_buf_unfull(struct ust_buffer *buf, | |
397 | unsigned int subbuf_idx, | |
398 | long offset) | |
399 | { | |
400 | //ust// struct ltt_channel_struct *ltt_channel = | |
401 | //ust// (struct ltt_channel_struct *)buf->chan->private_data; | |
402 | //ust// struct ltt_channel_buf_struct *ltt_buf = ltt_channel->buf; | |
403 | //ust// | |
404 | //ust// ltt_relay_wake_writers(ltt_buf); | |
405 | } | |
406 | ||
407 | int ust_buffers_do_get_subbuf(struct ust_buffer *buf, long *pconsumed_old) | |
408 | { | |
409 | struct ust_channel *channel = buf->chan; | |
410 | long consumed_old, consumed_idx, commit_count, write_offset; | |
411 | consumed_old = atomic_long_read(&buf->consumed); | |
412 | consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); | |
413 | commit_count = local_read(&buf->commit_count[consumed_idx]); | |
414 | /* | |
415 | * Make sure we read the commit count before reading the buffer | |
416 | * data and the write offset. Correct consumed offset ordering | |
417 | * wrt commit count is insured by the use of cmpxchg to update | |
418 | * the consumed offset. | |
419 | */ | |
420 | smp_rmb(); | |
421 | write_offset = local_read(&buf->offset); | |
422 | /* | |
423 | * Check that the subbuffer we are trying to consume has been | |
424 | * already fully committed. | |
425 | */ | |
426 | if (((commit_count - buf->chan->subbuf_size) | |
427 | & channel->commit_count_mask) | |
428 | - (BUFFER_TRUNC(consumed_old, buf->chan) | |
429 | >> channel->n_subbufs_order) | |
430 | != 0) { | |
431 | return -EAGAIN; | |
432 | } | |
433 | /* | |
434 | * Check that we are not about to read the same subbuffer in | |
435 | * which the writer head is. | |
436 | */ | |
437 | if ((SUBBUF_TRUNC(write_offset, buf->chan) | |
438 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
439 | == 0) { | |
440 | return -EAGAIN; | |
441 | } | |
442 | ||
443 | *pconsumed_old = consumed_old; | |
444 | return 0; | |
445 | } | |
446 | ||
447 | int ust_buffers_do_put_subbuf(struct ust_buffer *buf, u32 uconsumed_old) | |
448 | { | |
449 | long consumed_new, consumed_old; | |
450 | ||
451 | consumed_old = atomic_long_read(&buf->consumed); | |
452 | consumed_old = consumed_old & (~0xFFFFFFFFL); | |
453 | consumed_old = consumed_old | uconsumed_old; | |
454 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
455 | ||
456 | //ust// spin_lock(<t_buf->full_lock); | |
457 | if (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
458 | consumed_new) | |
459 | != consumed_old) { | |
460 | /* We have been pushed by the writer : the last | |
461 | * buffer read _is_ corrupted! It can also | |
462 | * happen if this is a buffer we never got. */ | |
463 | //ust// spin_unlock(<t_buf->full_lock); | |
464 | return -EIO; | |
465 | } else { | |
466 | /* tell the client that buffer is now unfull */ | |
467 | int index; | |
468 | long data; | |
469 | index = SUBBUF_INDEX(consumed_old, buf->chan); | |
470 | data = BUFFER_OFFSET(consumed_old, buf->chan); | |
471 | ltt_buf_unfull(buf, index, data); | |
472 | //ust// spin_unlock(<t_buf->full_lock); | |
473 | } | |
474 | return 0; | |
475 | } | |
476 | ||
477 | static void ltt_relay_print_subbuffer_errors( | |
478 | struct ust_channel *channel, | |
204141ee | 479 | long cons_off, int cpu) |
b5b073e2 | 480 | { |
204141ee | 481 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
482 | long cons_idx, commit_count, write_offset; |
483 | ||
484 | cons_idx = SUBBUF_INDEX(cons_off, channel); | |
485 | commit_count = local_read(<t_buf->commit_count[cons_idx]); | |
486 | /* | |
487 | * No need to order commit_count and write_offset reads because we | |
488 | * execute after trace is stopped when there are no readers left. | |
489 | */ | |
490 | write_offset = local_read(<t_buf->offset); | |
491 | WARN( "LTT : unread channel %s offset is %ld " | |
492 | "and cons_off : %ld\n", | |
493 | channel->channel_name, write_offset, cons_off); | |
494 | /* Check each sub-buffer for non filled commit count */ | |
495 | if (((commit_count - channel->subbuf_size) & channel->commit_count_mask) | |
496 | - (BUFFER_TRUNC(cons_off, channel) >> channel->n_subbufs_order) != 0) { | |
497 | ERR("LTT : %s : subbuffer %lu has non filled " | |
498 | "commit count %lu.\n", | |
499 | channel->channel_name, cons_idx, commit_count); | |
500 | } | |
501 | ERR("LTT : %s : commit count : %lu, subbuf size %zd\n", | |
502 | channel->channel_name, commit_count, | |
503 | channel->subbuf_size); | |
504 | } | |
505 | ||
506 | static void ltt_relay_print_errors(struct ltt_trace_struct *trace, | |
204141ee | 507 | struct ust_channel *channel, int cpu) |
b5b073e2 | 508 | { |
204141ee | 509 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
510 | long cons_off; |
511 | ||
4292ed8a PMF |
512 | /* |
513 | * Can be called in the error path of allocation when | |
514 | * trans_channel_data is not yet set. | |
515 | */ | |
516 | if (!channel) | |
517 | return; | |
518 | ||
b5b073e2 PMF |
519 | for (cons_off = atomic_long_read(<t_buf->consumed); |
520 | (SUBBUF_TRUNC(local_read(<t_buf->offset), | |
521 | channel) | |
522 | - cons_off) > 0; | |
523 | cons_off = SUBBUF_ALIGN(cons_off, channel)) | |
204141ee | 524 | ltt_relay_print_subbuffer_errors(channel, cons_off, cpu); |
b5b073e2 PMF |
525 | } |
526 | ||
204141ee | 527 | static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) |
b5b073e2 PMF |
528 | { |
529 | struct ltt_trace_struct *trace = channel->trace; | |
204141ee | 530 | struct ust_buffer *ltt_buf = channel->buf[cpu]; |
b5b073e2 PMF |
531 | |
532 | if (local_read(<t_buf->events_lost)) | |
c1f20530 | 533 | ERR("channel %s: %ld events lost", |
b5b073e2 | 534 | channel->channel_name, |
c1f20530 | 535 | local_read(<t_buf->events_lost)); |
b5b073e2 | 536 | if (local_read(<t_buf->corrupted_subbuffers)) |
c1f20530 | 537 | ERR("channel %s : %ld corrupted subbuffers", |
b5b073e2 | 538 | channel->channel_name, |
c1f20530 | 539 | local_read(<t_buf->corrupted_subbuffers)); |
b5b073e2 | 540 | |
204141ee | 541 | ltt_relay_print_errors(trace, channel, cpu); |
b5b073e2 PMF |
542 | } |
543 | ||
544 | static void ltt_relay_release_channel(struct kref *kref) | |
545 | { | |
546 | struct ust_channel *ltt_chan = container_of(kref, | |
547 | struct ust_channel, kref); | |
548 | free(ltt_chan->buf); | |
549 | } | |
550 | ||
551 | /* | |
552 | * Create ltt buffer. | |
553 | */ | |
554 | //ust// static int ltt_relay_create_buffer(struct ltt_trace_struct *trace, | |
555 | //ust// struct ltt_channel_struct *ltt_chan, struct rchan_buf *buf, | |
556 | //ust// unsigned int cpu, unsigned int n_subbufs) | |
557 | //ust// { | |
558 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
559 | //ust// percpu_ptr(ltt_chan->buf, cpu); | |
560 | //ust// unsigned int j; | |
561 | //ust// | |
562 | //ust// ltt_buf->commit_count = | |
563 | //ust// kzalloc_node(sizeof(ltt_buf->commit_count) * n_subbufs, | |
564 | //ust// GFP_KERNEL, cpu_to_node(cpu)); | |
565 | //ust// if (!ltt_buf->commit_count) | |
566 | //ust// return -ENOMEM; | |
567 | //ust// kref_get(&trace->kref); | |
568 | //ust// kref_get(&trace->ltt_transport_kref); | |
569 | //ust// kref_get(<t_chan->kref); | |
570 | //ust// local_set(<t_buf->offset, ltt_subbuffer_header_size()); | |
571 | //ust// atomic_long_set(<t_buf->consumed, 0); | |
572 | //ust// atomic_long_set(<t_buf->active_readers, 0); | |
573 | //ust// for (j = 0; j < n_subbufs; j++) | |
574 | //ust// local_set(<t_buf->commit_count[j], 0); | |
575 | //ust// init_waitqueue_head(<t_buf->write_wait); | |
576 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
577 | //ust// spin_lock_init(<t_buf->full_lock); | |
578 | //ust// | |
579 | //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
580 | //ust// /* atomic_add made on local variable on data that belongs to | |
581 | //ust// * various CPUs : ok because tracing not started (for this cpu). */ | |
582 | //ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]); | |
583 | //ust// | |
584 | //ust// local_set(<t_buf->events_lost, 0); | |
585 | //ust// local_set(<t_buf->corrupted_subbuffers, 0); | |
586 | //ust// | |
587 | //ust// return 0; | |
588 | //ust// } | |
589 | ||
590 | static int ust_buffers_init_buffer(struct ltt_trace_struct *trace, | |
591 | struct ust_channel *ltt_chan, struct ust_buffer *buf, | |
592 | unsigned int n_subbufs) | |
593 | { | |
594 | unsigned int j; | |
595 | int fds[2]; | |
596 | int result; | |
597 | ||
598 | buf->commit_count = | |
599 | zmalloc(sizeof(buf->commit_count) * n_subbufs); | |
600 | if (!buf->commit_count) | |
601 | return -ENOMEM; | |
602 | kref_get(&trace->kref); | |
603 | kref_get(&trace->ltt_transport_kref); | |
604 | kref_get(<t_chan->kref); | |
605 | local_set(&buf->offset, ltt_subbuffer_header_size()); | |
606 | atomic_long_set(&buf->consumed, 0); | |
607 | atomic_long_set(&buf->active_readers, 0); | |
608 | for (j = 0; j < n_subbufs; j++) | |
609 | local_set(&buf->commit_count[j], 0); | |
610 | //ust// init_waitqueue_head(&buf->write_wait); | |
611 | //ust// atomic_set(&buf->wakeup_readers, 0); | |
612 | //ust// spin_lock_init(&buf->full_lock); | |
613 | ||
614 | ltt_buffer_begin_callback(buf, trace->start_tsc, 0); | |
615 | ||
616 | local_add(ltt_subbuffer_header_size(), &buf->commit_count[0]); | |
617 | ||
618 | local_set(&buf->events_lost, 0); | |
619 | local_set(&buf->corrupted_subbuffers, 0); | |
620 | ||
621 | result = pipe(fds); | |
622 | if(result == -1) { | |
623 | PERROR("pipe"); | |
624 | return -1; | |
625 | } | |
626 | buf->data_ready_fd_read = fds[0]; | |
627 | buf->data_ready_fd_write = fds[1]; | |
628 | ||
629 | /* FIXME: do we actually need this? */ | |
630 | result = fcntl(fds[0], F_SETFL, O_NONBLOCK); | |
631 | if(result == -1) { | |
632 | PERROR("fcntl"); | |
633 | } | |
634 | ||
635 | //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); | |
636 | //ust// if(!ltt_buf->commit_seq) { | |
637 | //ust// return -1; | |
638 | //ust// } | |
639 | ||
640 | /* FIXME: decrementally destroy on error */ | |
641 | ||
642 | return 0; | |
643 | } | |
644 | ||
645 | /* FIXME: use this function */ | |
204141ee | 646 | static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu) |
b5b073e2 PMF |
647 | { |
648 | struct ltt_trace_struct *trace = ltt_chan->trace; | |
204141ee | 649 | struct ust_buffer *ltt_buf = ltt_chan->buf[cpu]; |
b5b073e2 PMF |
650 | |
651 | kref_put(<t_chan->trace->ltt_transport_kref, | |
652 | ltt_release_transport); | |
204141ee | 653 | ltt_relay_print_buffer_errors(ltt_chan, cpu); |
b5b073e2 PMF |
654 | //ust// free(ltt_buf->commit_seq); |
655 | kfree(ltt_buf->commit_count); | |
656 | ltt_buf->commit_count = NULL; | |
657 | kref_put(<t_chan->kref, ltt_relay_release_channel); | |
658 | kref_put(&trace->kref, ltt_release_trace); | |
659 | //ust// wake_up_interruptible(&trace->kref_wq); | |
660 | } | |
661 | ||
204141ee | 662 | static int ust_buffers_alloc_channel_buf_structs(struct ust_channel *chan) |
b5b073e2 PMF |
663 | { |
664 | void *ptr; | |
665 | int result; | |
204141ee PMF |
666 | size_t size; |
667 | int i; | |
b5b073e2 | 668 | |
204141ee | 669 | size = PAGE_ALIGN(1); |
b5b073e2 | 670 | |
204141ee | 671 | for(i=0; i<chan->n_cpus; i++) { |
b5b073e2 | 672 | |
204141ee PMF |
673 | result = chan->buf_struct_shmids[i] = shmget(getpid(), size, IPC_CREAT | IPC_EXCL | 0700); |
674 | if(result == -1) { | |
675 | PERROR("shmget"); | |
676 | goto destroy_previous; | |
677 | } | |
b5b073e2 | 678 | |
204141ee PMF |
679 | /* FIXME: should have matching call to shmdt */ |
680 | ptr = shmat(chan->buf_struct_shmids[i], NULL, 0); | |
681 | if(ptr == (void *) -1) { | |
682 | perror("shmat"); | |
683 | goto destroy_shm; | |
684 | } | |
685 | ||
686 | /* Already mark the shared memory for destruction. This will occur only | |
687 | * when all users have detached. | |
688 | */ | |
689 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
690 | if(result == -1) { | |
691 | perror("shmctl"); | |
692 | goto destroy_previous; | |
693 | } | |
694 | ||
695 | chan->buf[i] = ptr; | |
b5b073e2 PMF |
696 | } |
697 | ||
204141ee | 698 | return 0; |
b5b073e2 | 699 | |
204141ee PMF |
700 | /* Jumping inside this loop occurs from within the other loop above with i as |
701 | * counter, so it unallocates the structures for the cpu = current_i down to | |
702 | * zero. */ | |
703 | for(; i>=0; i--) { | |
704 | destroy_shm: | |
705 | result = shmctl(chan->buf_struct_shmids[i], IPC_RMID, NULL); | |
706 | if(result == -1) { | |
707 | perror("shmctl"); | |
708 | } | |
b5b073e2 | 709 | |
204141ee PMF |
710 | destroy_previous: |
711 | continue; | |
b5b073e2 PMF |
712 | } |
713 | ||
204141ee | 714 | return -1; |
b5b073e2 PMF |
715 | } |
716 | ||
717 | /* | |
718 | * Create channel. | |
719 | */ | |
720 | static int ust_buffers_create_channel(const char *trace_name, struct ltt_trace_struct *trace, | |
721 | const char *channel_name, struct ust_channel *ltt_chan, | |
722 | unsigned int subbuf_size, unsigned int n_subbufs, int overwrite) | |
723 | { | |
b5b073e2 PMF |
724 | int result; |
725 | ||
726 | kref_init(<t_chan->kref); | |
727 | ||
728 | ltt_chan->trace = trace; | |
729 | ltt_chan->buffer_begin = ltt_buffer_begin_callback; | |
730 | ltt_chan->buffer_end = ltt_buffer_end_callback; | |
731 | ltt_chan->overwrite = overwrite; | |
732 | ltt_chan->n_subbufs_order = get_count_order(n_subbufs); | |
733 | ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); | |
204141ee | 734 | ltt_chan->n_cpus = get_n_cpus(); |
b5b073e2 | 735 | //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); |
204141ee PMF |
736 | ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *)); |
737 | if(ltt_chan->buf == NULL) { | |
738 | goto error; | |
739 | } | |
740 | ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int)); | |
741 | if(ltt_chan->buf_struct_shmids == NULL) | |
742 | goto free_buf; | |
b5b073e2 | 743 | |
204141ee PMF |
744 | result = ust_buffers_alloc_channel_buf_structs(ltt_chan); |
745 | if(result != 0) { | |
746 | goto free_buf_struct_shmids; | |
747 | } | |
b5b073e2 | 748 | |
b5b073e2 | 749 | result = ust_buffers_channel_open(ltt_chan, subbuf_size, n_subbufs); |
204141ee | 750 | if (result != 0) { |
c1f20530 | 751 | ERR("Cannot open channel for trace %s", trace_name); |
204141ee | 752 | goto unalloc_buf_structs; |
b5b073e2 PMF |
753 | } |
754 | ||
204141ee PMF |
755 | return 0; |
756 | ||
757 | unalloc_buf_structs: | |
758 | /* FIXME: put a call here to unalloc the buf structs! */ | |
759 | ||
760 | free_buf_struct_shmids: | |
761 | free(ltt_chan->buf_struct_shmids); | |
b5b073e2 | 762 | |
204141ee PMF |
763 | free_buf: |
764 | free(ltt_chan->buf); | |
765 | ||
766 | error: | |
767 | return -1; | |
b5b073e2 PMF |
768 | } |
769 | ||
770 | /* | |
771 | * LTTng channel flush function. | |
772 | * | |
773 | * Must be called when no tracing is active in the channel, because of | |
774 | * accesses across CPUs. | |
775 | */ | |
776 | static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) | |
777 | { | |
778 | int result; | |
779 | ||
780 | //ust// buf->finalized = 1; | |
781 | ltt_force_switch(buf, FORCE_FLUSH); | |
782 | ||
783 | result = write(buf->data_ready_fd_write, "1", 1); | |
784 | if(result == -1) { | |
785 | PERROR("write (in ltt_relay_buffer_flush)"); | |
786 | ERR("this should never happen!"); | |
787 | } | |
788 | } | |
789 | ||
790 | static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) | |
791 | { | |
792 | //ust// unsigned int i; | |
793 | //ust// struct rchan *rchan = ltt_channel->trans_channel_data; | |
794 | //ust// | |
795 | //ust// for_each_possible_cpu(i) { | |
796 | //ust// struct ltt_channel_buf_struct *ltt_buf = | |
797 | //ust// percpu_ptr(ltt_channel->buf, i); | |
798 | //ust// | |
799 | //ust// if (atomic_read(<t_buf->wakeup_readers) == 1) { | |
800 | //ust// atomic_set(<t_buf->wakeup_readers, 0); | |
801 | //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); | |
802 | //ust// } | |
803 | //ust// } | |
804 | } | |
805 | ||
204141ee | 806 | static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cpu) |
b5b073e2 PMF |
807 | { |
808 | // int result; | |
809 | ||
204141ee PMF |
810 | if (channel->buf[cpu]) { |
811 | struct ust_buffer *buf = channel->buf[cpu]; | |
b5b073e2 PMF |
812 | ltt_relay_buffer_flush(buf); |
813 | //ust// ltt_relay_wake_writers(ltt_buf); | |
814 | /* closing the pipe tells the consumer the buffer is finished */ | |
815 | ||
816 | //result = write(ltt_buf->data_ready_fd_write, "D", 1); | |
817 | //if(result == -1) { | |
818 | // PERROR("write (in ltt_relay_finish_buffer)"); | |
819 | // ERR("this should never happen!"); | |
820 | //} | |
821 | close(buf->data_ready_fd_write); | |
822 | } | |
823 | } | |
824 | ||
825 | ||
826 | static void ltt_relay_finish_channel(struct ust_channel *channel) | |
827 | { | |
204141ee | 828 | unsigned int i; |
b5b073e2 | 829 | |
204141ee PMF |
830 | for(i=0; i<channel->n_cpus; i++) { |
831 | ltt_relay_finish_buffer(channel, i); | |
832 | } | |
b5b073e2 PMF |
833 | } |
834 | ||
835 | static void ltt_relay_remove_channel(struct ust_channel *channel) | |
836 | { | |
837 | ust_buffers_channel_close(channel); | |
838 | kref_put(&channel->kref, ltt_relay_release_channel); | |
839 | } | |
840 | ||
841 | struct ltt_reserve_switch_offsets { | |
842 | long begin, end, old; | |
843 | long begin_switch, end_switch_current, end_switch_old; | |
844 | long commit_count, reserve_commit_diff; | |
845 | size_t before_hdr_pad, size; | |
846 | }; | |
847 | ||
848 | /* | |
849 | * Returns : | |
850 | * 0 if ok | |
851 | * !0 if execution must be aborted. | |
852 | */ | |
853 | static inline int ltt_relay_try_reserve( | |
854 | struct ust_channel *channel, struct ust_buffer *buf, | |
855 | struct ltt_reserve_switch_offsets *offsets, size_t data_size, | |
856 | u64 *tsc, unsigned int *rflags, int largest_align) | |
857 | { | |
858 | offsets->begin = local_read(&buf->offset); | |
859 | offsets->old = offsets->begin; | |
860 | offsets->begin_switch = 0; | |
861 | offsets->end_switch_current = 0; | |
862 | offsets->end_switch_old = 0; | |
863 | ||
864 | *tsc = trace_clock_read64(); | |
865 | if (last_tsc_overflow(buf, *tsc)) | |
866 | *rflags = LTT_RFLAG_ID_SIZE_TSC; | |
867 | ||
868 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { | |
869 | offsets->begin_switch = 1; /* For offsets->begin */ | |
870 | } else { | |
871 | offsets->size = ust_get_header_size(channel, | |
872 | offsets->begin, data_size, | |
873 | &offsets->before_hdr_pad, *rflags); | |
874 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
875 | largest_align) | |
876 | + data_size; | |
877 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
878 | > buf->chan->subbuf_size) { | |
879 | offsets->end_switch_old = 1; /* For offsets->old */ | |
880 | offsets->begin_switch = 1; /* For offsets->begin */ | |
881 | } | |
882 | } | |
883 | if (offsets->begin_switch) { | |
884 | long subbuf_index; | |
885 | ||
886 | if (offsets->end_switch_old) | |
887 | offsets->begin = SUBBUF_ALIGN(offsets->begin, | |
888 | buf->chan); | |
889 | offsets->begin = offsets->begin + ltt_subbuffer_header_size(); | |
890 | /* Test new buffer integrity */ | |
891 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
892 | offsets->reserve_commit_diff = | |
893 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
894 | >> channel->n_subbufs_order) | |
895 | - (local_read(&buf->commit_count[subbuf_index]) | |
896 | & channel->commit_count_mask); | |
897 | if (offsets->reserve_commit_diff == 0) { | |
898 | long consumed; | |
899 | ||
900 | consumed = atomic_long_read(&buf->consumed); | |
901 | ||
902 | /* Next buffer not corrupted. */ | |
903 | if (!channel->overwrite && | |
904 | (SUBBUF_TRUNC(offsets->begin, buf->chan) | |
905 | - SUBBUF_TRUNC(consumed, buf->chan)) | |
906 | >= channel->alloc_size) { | |
907 | ||
908 | long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); | |
909 | long commit_count = local_read(&buf->commit_count[consumed_idx]); | |
910 | if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { | |
911 | WARN("Event dropped. Caused by non-committed event."); | |
912 | } | |
913 | else { | |
914 | WARN("Event dropped. Caused by non-consumed buffer."); | |
915 | } | |
916 | /* | |
917 | * We do not overwrite non consumed buffers | |
918 | * and we are full : event is lost. | |
919 | */ | |
920 | local_inc(&buf->events_lost); | |
921 | return -1; | |
922 | } else { | |
923 | /* | |
924 | * next buffer not corrupted, we are either in | |
925 | * overwrite mode or the buffer is not full. | |
926 | * It's safe to write in this new subbuffer. | |
927 | */ | |
928 | } | |
929 | } else { | |
930 | /* | |
931 | * Next subbuffer corrupted. Force pushing reader even | |
932 | * in normal mode. It's safe to write in this new | |
933 | * subbuffer. | |
934 | */ | |
935 | } | |
936 | offsets->size = ust_get_header_size(channel, | |
937 | offsets->begin, data_size, | |
938 | &offsets->before_hdr_pad, *rflags); | |
939 | offsets->size += ltt_align(offsets->begin + offsets->size, | |
940 | largest_align) | |
941 | + data_size; | |
942 | if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) | |
943 | > buf->chan->subbuf_size) { | |
944 | /* | |
945 | * Event too big for subbuffers, report error, don't | |
946 | * complete the sub-buffer switch. | |
947 | */ | |
948 | local_inc(&buf->events_lost); | |
949 | return -1; | |
950 | } else { | |
951 | /* | |
952 | * We just made a successful buffer switch and the event | |
953 | * fits in the new subbuffer. Let's write. | |
954 | */ | |
955 | } | |
956 | } else { | |
957 | /* | |
958 | * Event fits in the current buffer and we are not on a switch | |
959 | * boundary. It's safe to write. | |
960 | */ | |
961 | } | |
962 | offsets->end = offsets->begin + offsets->size; | |
963 | ||
964 | if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { | |
965 | /* | |
966 | * The offset_end will fall at the very beginning of the next | |
967 | * subbuffer. | |
968 | */ | |
969 | offsets->end_switch_current = 1; /* For offsets->begin */ | |
970 | } | |
971 | return 0; | |
972 | } | |
973 | ||
974 | /* | |
975 | * Returns : | |
976 | * 0 if ok | |
977 | * !0 if execution must be aborted. | |
978 | */ | |
979 | static inline int ltt_relay_try_switch( | |
980 | enum force_switch_mode mode, | |
981 | struct ust_channel *channel, | |
982 | struct ust_buffer *buf, | |
983 | struct ltt_reserve_switch_offsets *offsets, | |
984 | u64 *tsc) | |
985 | { | |
986 | long subbuf_index; | |
987 | ||
988 | offsets->begin = local_read(&buf->offset); | |
989 | offsets->old = offsets->begin; | |
990 | offsets->begin_switch = 0; | |
991 | offsets->end_switch_old = 0; | |
992 | ||
993 | *tsc = trace_clock_read64(); | |
994 | ||
995 | if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { | |
996 | offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); | |
997 | offsets->end_switch_old = 1; | |
998 | } else { | |
999 | /* we do not have to switch : buffer is empty */ | |
1000 | return -1; | |
1001 | } | |
1002 | if (mode == FORCE_ACTIVE) | |
1003 | offsets->begin += ltt_subbuffer_header_size(); | |
1004 | /* | |
1005 | * Always begin_switch in FORCE_ACTIVE mode. | |
1006 | * Test new buffer integrity | |
1007 | */ | |
1008 | subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); | |
1009 | offsets->reserve_commit_diff = | |
1010 | (BUFFER_TRUNC(offsets->begin, buf->chan) | |
1011 | >> channel->n_subbufs_order) | |
1012 | - (local_read(&buf->commit_count[subbuf_index]) | |
1013 | & channel->commit_count_mask); | |
1014 | if (offsets->reserve_commit_diff == 0) { | |
1015 | /* Next buffer not corrupted. */ | |
1016 | if (mode == FORCE_ACTIVE | |
1017 | && !channel->overwrite | |
1018 | && offsets->begin - atomic_long_read(&buf->consumed) | |
1019 | >= channel->alloc_size) { | |
1020 | /* | |
1021 | * We do not overwrite non consumed buffers and we are | |
1022 | * full : ignore switch while tracing is active. | |
1023 | */ | |
1024 | return -1; | |
1025 | } | |
1026 | } else { | |
1027 | /* | |
1028 | * Next subbuffer corrupted. Force pushing reader even in normal | |
1029 | * mode | |
1030 | */ | |
1031 | } | |
1032 | offsets->end = offsets->begin; | |
1033 | return 0; | |
1034 | } | |
1035 | ||
1036 | static inline void ltt_reserve_push_reader( | |
1037 | struct ust_channel *channel, | |
1038 | struct ust_buffer *buf, | |
1039 | struct ltt_reserve_switch_offsets *offsets) | |
1040 | { | |
1041 | long consumed_old, consumed_new; | |
1042 | ||
1043 | do { | |
1044 | consumed_old = atomic_long_read(&buf->consumed); | |
1045 | /* | |
1046 | * If buffer is in overwrite mode, push the reader consumed | |
1047 | * count if the write position has reached it and we are not | |
1048 | * at the first iteration (don't push the reader farther than | |
1049 | * the writer). This operation can be done concurrently by many | |
1050 | * writers in the same buffer, the writer being at the farthest | |
1051 | * write position sub-buffer index in the buffer being the one | |
1052 | * which will win this loop. | |
1053 | * If the buffer is not in overwrite mode, pushing the reader | |
1054 | * only happens if a sub-buffer is corrupted. | |
1055 | */ | |
1056 | if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) | |
1057 | - SUBBUF_TRUNC(consumed_old, buf->chan)) | |
1058 | >= channel->alloc_size) | |
1059 | consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); | |
1060 | else { | |
1061 | consumed_new = consumed_old; | |
1062 | break; | |
1063 | } | |
1064 | } while (atomic_long_cmpxchg(&buf->consumed, consumed_old, | |
1065 | consumed_new) != consumed_old); | |
1066 | ||
1067 | if (consumed_old != consumed_new) { | |
1068 | /* | |
1069 | * Reader pushed : we are the winner of the push, we can | |
1070 | * therefore reequilibrate reserve and commit. Atomic increment | |
1071 | * of the commit count permits other writers to play around | |
1072 | * with this variable before us. We keep track of | |
1073 | * corrupted_subbuffers even in overwrite mode : | |
1074 | * we never want to write over a non completely committed | |
1075 | * sub-buffer : possible causes : the buffer size is too low | |
1076 | * compared to the unordered data input, or there is a writer | |
1077 | * that died between the reserve and the commit. | |
1078 | */ | |
1079 | if (offsets->reserve_commit_diff) { | |
1080 | /* | |
1081 | * We have to alter the sub-buffer commit count. | |
1082 | * We do not deliver the previous subbuffer, given it | |
1083 | * was either corrupted or not consumed (overwrite | |
1084 | * mode). | |
1085 | */ | |
1086 | local_add(offsets->reserve_commit_diff, | |
1087 | &buf->commit_count[ | |
1088 | SUBBUF_INDEX(offsets->begin, | |
1089 | buf->chan)]); | |
1090 | if (!channel->overwrite | |
1091 | || offsets->reserve_commit_diff | |
1092 | != channel->subbuf_size) { | |
1093 | /* | |
1094 | * The reserve commit diff was not subbuf_size : | |
1095 | * it means the subbuffer was partly written to | |
1096 | * and is therefore corrupted. If it is multiple | |
1097 | * of subbuffer size and we are in flight | |
1098 | * recorder mode, we are skipping over a whole | |
1099 | * subbuffer. | |
1100 | */ | |
1101 | local_inc(&buf->corrupted_subbuffers); | |
1102 | } | |
1103 | } | |
1104 | } | |
1105 | } | |
1106 | ||
1107 | ||
1108 | /* | |
1109 | * ltt_reserve_switch_old_subbuf: switch old subbuffer | |
1110 | * | |
1111 | * Concurrency safe because we are the last and only thread to alter this | |
1112 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1113 | * alter the offset, alter the reserve_count or call the | |
1114 | * client_buffer_end_callback on this sub-buffer. | |
1115 | * | |
1116 | * The only remaining threads could be the ones with pending commits. They will | |
1117 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1118 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1119 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1120 | * | |
1121 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1122 | * switches in, finding out it's corrupted. The result will be than the old | |
1123 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1124 | * will be declared corrupted too because of the commit count adjustment. | |
1125 | * | |
1126 | * Note : offset_old should never be 0 here. | |
1127 | */ | |
1128 | static inline void ltt_reserve_switch_old_subbuf( | |
1129 | struct ust_channel *channel, | |
1130 | struct ust_buffer *buf, | |
1131 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1132 | { | |
1133 | long oldidx = SUBBUF_INDEX(offsets->old - 1, channel); | |
1134 | ||
1135 | channel->buffer_end(buf, *tsc, offsets->old, oldidx); | |
1136 | /* Must write buffer end before incrementing commit count */ | |
1137 | smp_wmb(); | |
1138 | offsets->commit_count = | |
1139 | local_add_return(channel->subbuf_size | |
1140 | - (SUBBUF_OFFSET(offsets->old - 1, channel) | |
1141 | + 1), | |
1142 | &buf->commit_count[oldidx]); | |
1143 | if ((BUFFER_TRUNC(offsets->old - 1, channel) | |
1144 | >> channel->n_subbufs_order) | |
1145 | - ((offsets->commit_count - channel->subbuf_size) | |
1146 | & channel->commit_count_mask) == 0) | |
1147 | ltt_deliver(buf, oldidx, offsets->commit_count); | |
1148 | } | |
1149 | ||
1150 | /* | |
1151 | * ltt_reserve_switch_new_subbuf: Populate new subbuffer. | |
1152 | * | |
1153 | * This code can be executed unordered : writers may already have written to the | |
1154 | * sub-buffer before this code gets executed, caution. The commit makes sure | |
1155 | * that this code is executed before the deliver of this sub-buffer. | |
1156 | */ | |
1157 | static /*inline*/ void ltt_reserve_switch_new_subbuf( | |
1158 | struct ust_channel *channel, | |
1159 | struct ust_buffer *buf, | |
1160 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1161 | { | |
1162 | long beginidx = SUBBUF_INDEX(offsets->begin, channel); | |
1163 | ||
1164 | channel->buffer_begin(buf, *tsc, beginidx); | |
1165 | /* Must write buffer end before incrementing commit count */ | |
1166 | smp_wmb(); | |
1167 | offsets->commit_count = local_add_return(ltt_subbuffer_header_size(), | |
1168 | &buf->commit_count[beginidx]); | |
1169 | /* Check if the written buffer has to be delivered */ | |
1170 | if ((BUFFER_TRUNC(offsets->begin, channel) | |
1171 | >> channel->n_subbufs_order) | |
1172 | - ((offsets->commit_count - channel->subbuf_size) | |
1173 | & channel->commit_count_mask) == 0) | |
1174 | ltt_deliver(buf, beginidx, offsets->commit_count); | |
1175 | } | |
1176 | ||
1177 | ||
1178 | /* | |
1179 | * ltt_reserve_end_switch_current: finish switching current subbuffer | |
1180 | * | |
1181 | * Concurrency safe because we are the last and only thread to alter this | |
1182 | * sub-buffer. As long as it is not delivered and read, no other thread can | |
1183 | * alter the offset, alter the reserve_count or call the | |
1184 | * client_buffer_end_callback on this sub-buffer. | |
1185 | * | |
1186 | * The only remaining threads could be the ones with pending commits. They will | |
1187 | * have to do the deliver themselves. Not concurrency safe in overwrite mode. | |
1188 | * We detect corrupted subbuffers with commit and reserve counts. We keep a | |
1189 | * corrupted sub-buffers count and push the readers across these sub-buffers. | |
1190 | * | |
1191 | * Not concurrency safe if a writer is stalled in a subbuffer and another writer | |
1192 | * switches in, finding out it's corrupted. The result will be than the old | |
1193 | * (uncommited) subbuffer will be declared corrupted, and that the new subbuffer | |
1194 | * will be declared corrupted too because of the commit count adjustment. | |
1195 | */ | |
1196 | static inline void ltt_reserve_end_switch_current( | |
1197 | struct ust_channel *channel, | |
1198 | struct ust_buffer *buf, | |
1199 | struct ltt_reserve_switch_offsets *offsets, u64 *tsc) | |
1200 | { | |
1201 | long endidx = SUBBUF_INDEX(offsets->end - 1, channel); | |
1202 | ||
1203 | channel->buffer_end(buf, *tsc, offsets->end, endidx); | |
1204 | /* Must write buffer begin before incrementing commit count */ | |
1205 | smp_wmb(); | |
1206 | offsets->commit_count = | |
1207 | local_add_return(channel->subbuf_size | |
1208 | - (SUBBUF_OFFSET(offsets->end - 1, channel) | |
1209 | + 1), | |
1210 | &buf->commit_count[endidx]); | |
1211 | if ((BUFFER_TRUNC(offsets->end - 1, channel) | |
1212 | >> channel->n_subbufs_order) | |
1213 | - ((offsets->commit_count - channel->subbuf_size) | |
1214 | & channel->commit_count_mask) == 0) | |
1215 | ltt_deliver(buf, endidx, offsets->commit_count); | |
1216 | } | |
1217 | ||
1218 | /** | |
1219 | * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. | |
1220 | * @trace: the trace structure to log to. | |
1221 | * @ltt_channel: channel structure | |
1222 | * @transport_data: data structure specific to ltt relay | |
1223 | * @data_size: size of the variable length data to log. | |
1224 | * @slot_size: pointer to total size of the slot (out) | |
1225 | * @buf_offset : pointer to reserved buffer offset (out) | |
1226 | * @tsc: pointer to the tsc at the slot reservation (out) | |
1227 | * @cpu: cpuid | |
1228 | * | |
1229 | * Return : -ENOSPC if not enough space, else returns 0. | |
1230 | * It will take care of sub-buffer switching. | |
1231 | */ | |
1232 | static notrace int ltt_relay_reserve_slot(struct ltt_trace_struct *trace, | |
1233 | struct ust_channel *channel, void **transport_data, | |
1234 | size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, | |
204141ee | 1235 | unsigned int *rflags, int largest_align, int cpu) |
b5b073e2 | 1236 | { |
204141ee | 1237 | struct ust_buffer *buf = *transport_data = channel->buf[cpu]; |
b5b073e2 PMF |
1238 | struct ltt_reserve_switch_offsets offsets; |
1239 | ||
1240 | offsets.reserve_commit_diff = 0; | |
1241 | offsets.size = 0; | |
1242 | ||
1243 | /* | |
1244 | * Perform retryable operations. | |
1245 | */ | |
1246 | if (ltt_nesting > 4) { | |
1247 | local_inc(&buf->events_lost); | |
1248 | return -EPERM; | |
1249 | } | |
1250 | do { | |
1251 | if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, | |
1252 | largest_align)) | |
1253 | return -ENOSPC; | |
1254 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1255 | offsets.end) != offsets.old); | |
1256 | ||
1257 | /* | |
1258 | * Atomically update last_tsc. This update races against concurrent | |
1259 | * atomic updates, but the race will always cause supplementary full TSC | |
1260 | * events, never the opposite (missing a full TSC event when it would be | |
1261 | * needed). | |
1262 | */ | |
1263 | save_last_tsc(buf, *tsc); | |
1264 | ||
1265 | /* | |
1266 | * Push the reader if necessary | |
1267 | */ | |
1268 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1269 | ||
1270 | /* | |
1271 | * Switch old subbuffer if needed. | |
1272 | */ | |
1273 | if (offsets.end_switch_old) | |
1274 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); | |
1275 | ||
1276 | /* | |
1277 | * Populate new subbuffer. | |
1278 | */ | |
1279 | if (offsets.begin_switch) | |
1280 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); | |
1281 | ||
1282 | if (offsets.end_switch_current) | |
1283 | ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); | |
1284 | ||
1285 | *slot_size = offsets.size; | |
1286 | *buf_offset = offsets.begin + offsets.before_hdr_pad; | |
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * Force a sub-buffer switch for a per-cpu buffer. This operation is | |
1292 | * completely reentrant : can be called while tracing is active with | |
1293 | * absolutely no lock held. | |
1294 | * | |
1295 | * Note, however, that as a local_cmpxchg is used for some atomic | |
1296 | * operations, this function must be called from the CPU which owns the buffer | |
1297 | * for a ACTIVE flush. | |
1298 | */ | |
1299 | static notrace void ltt_force_switch(struct ust_buffer *buf, | |
1300 | enum force_switch_mode mode) | |
1301 | { | |
1302 | struct ust_channel *channel = buf->chan; | |
1303 | struct ltt_reserve_switch_offsets offsets; | |
1304 | u64 tsc; | |
1305 | ||
1306 | offsets.reserve_commit_diff = 0; | |
1307 | offsets.size = 0; | |
1308 | ||
1309 | /* | |
1310 | * Perform retryable operations. | |
1311 | */ | |
1312 | do { | |
1313 | if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) | |
1314 | return; | |
1315 | } while (local_cmpxchg(&buf->offset, offsets.old, | |
1316 | offsets.end) != offsets.old); | |
1317 | ||
1318 | /* | |
1319 | * Atomically update last_tsc. This update races against concurrent | |
1320 | * atomic updates, but the race will always cause supplementary full TSC | |
1321 | * events, never the opposite (missing a full TSC event when it would be | |
1322 | * needed). | |
1323 | */ | |
1324 | save_last_tsc(buf, tsc); | |
1325 | ||
1326 | /* | |
1327 | * Push the reader if necessary | |
1328 | */ | |
1329 | if (mode == FORCE_ACTIVE) | |
1330 | ltt_reserve_push_reader(channel, buf, &offsets); | |
1331 | ||
1332 | /* | |
1333 | * Switch old subbuffer if needed. | |
1334 | */ | |
1335 | if (offsets.end_switch_old) | |
1336 | ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); | |
1337 | ||
1338 | /* | |
1339 | * Populate new subbuffer. | |
1340 | */ | |
1341 | if (mode == FORCE_ACTIVE) | |
1342 | ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); | |
1343 | } | |
1344 | ||
b5b073e2 PMF |
1345 | static struct ltt_transport ust_relay_transport = { |
1346 | .name = "ustrelay", | |
1347 | .ops = { | |
1348 | .create_channel = ust_buffers_create_channel, | |
1349 | .finish_channel = ltt_relay_finish_channel, | |
1350 | .remove_channel = ltt_relay_remove_channel, | |
1351 | .wakeup_channel = ltt_relay_async_wakeup_chan, | |
1352 | // .commit_slot = ltt_relay_commit_slot, | |
1353 | .reserve_slot = ltt_relay_reserve_slot, | |
b5b073e2 PMF |
1354 | }, |
1355 | }; | |
1356 | ||
1357 | /* | |
1358 | * for flight recording. must be called after relay_commit. | |
1359 | * This function decrements de subbuffer's lost_size each time the commit count | |
1360 | * reaches back the reserve offset (module subbuffer size). It is useful for | |
1361 | * crash dump. | |
1362 | */ | |
1363 | static /* inline */ void ltt_write_commit_counter(struct ust_buffer *buf, | |
1364 | struct ust_buffer *ltt_buf, | |
1365 | long idx, long buf_offset, long commit_count, size_t data_size) | |
1366 | { | |
1367 | long offset; | |
1368 | long commit_seq_old; | |
1369 | ||
1370 | offset = buf_offset + data_size; | |
1371 | ||
1372 | /* | |
1373 | * SUBBUF_OFFSET includes commit_count_mask. We can simply | |
1374 | * compare the offsets within the subbuffer without caring about | |
1375 | * buffer full/empty mismatch because offset is never zero here | |
1376 | * (subbuffer header and event headers have non-zero length). | |
1377 | */ | |
1378 | if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan))) | |
1379 | return; | |
1380 | ||
1381 | commit_seq_old = local_read(<t_buf->commit_seq[idx]); | |
1382 | while (commit_seq_old < commit_count) | |
1383 | commit_seq_old = local_cmpxchg(<t_buf->commit_seq[idx], | |
1384 | commit_seq_old, commit_count); | |
1385 | } | |
1386 | ||
1387 | /* | |
1388 | * Atomic unordered slot commit. Increments the commit count in the | |
1389 | * specified sub-buffer, and delivers it if necessary. | |
1390 | * | |
1391 | * Parameters: | |
1392 | * | |
1393 | * @ltt_channel : channel structure | |
1394 | * @transport_data: transport-specific data | |
1395 | * @buf_offset : offset following the event header. | |
1396 | * @data_size : size of the event data. | |
1397 | * @slot_size : size of the reserved slot. | |
1398 | */ | |
1399 | /* FIXME: make this function static inline in the .h! */ | |
1400 | /*static*/ /* inline */ notrace void ltt_commit_slot( | |
1401 | struct ust_channel *channel, | |
1402 | void **transport_data, long buf_offset, | |
1403 | size_t data_size, size_t slot_size) | |
1404 | { | |
1405 | struct ust_buffer *buf = *transport_data; | |
1406 | long offset_end = buf_offset; | |
1407 | long endidx = SUBBUF_INDEX(offset_end - 1, channel); | |
1408 | long commit_count; | |
1409 | ||
1410 | /* Must write slot data before incrementing commit count */ | |
1411 | smp_wmb(); | |
1412 | commit_count = local_add_return(slot_size, | |
1413 | &buf->commit_count[endidx]); | |
1414 | /* Check if all commits have been done */ | |
1415 | if ((BUFFER_TRUNC(offset_end - 1, channel) | |
1416 | >> channel->n_subbufs_order) | |
1417 | - ((commit_count - channel->subbuf_size) | |
1418 | & channel->commit_count_mask) == 0) | |
1419 | ltt_deliver(buf, endidx, commit_count); | |
1420 | /* | |
1421 | * Update lost_size for each commit. It's needed only for extracting | |
1422 | * ltt buffers from vmcore, after crash. | |
1423 | */ | |
1424 | ltt_write_commit_counter(buf, buf, endidx, | |
1425 | buf_offset, commit_count, data_size); | |
1426 | } | |
1427 | ||
1428 | ||
1429 | static char initialized = 0; | |
1430 | ||
1431 | void __attribute__((constructor)) init_ustrelay_transport(void) | |
1432 | { | |
1433 | if(!initialized) { | |
1434 | ltt_transport_register(&ust_relay_transport); | |
1435 | initialized = 1; | |
1436 | } | |
1437 | } | |
1438 | ||
1439 | static void __attribute__((destructor)) ltt_relay_exit(void) | |
1440 | { | |
1441 | ltt_transport_unregister(&ust_relay_transport); | |
1442 | } |