remove dependency on ustcomm.h where possible
[ust.git] / libust / serialize.c
1 /*
2 * LTTng serializing code.
3 *
4 * Copyright Mathieu Desnoyers, March 2007.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 *
21 * See this discussion about weirdness about passing va_list and then va_list to
22 * functions. (related to array argument passing). va_list seems to be
23 * implemented as an array on x86_64, but not on i386... This is why we pass a
24 * va_list * to ltt_vtrace.
25 */
26
27 #define _GNU_SOURCE
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <stdint.h>
33 #include <stdio.h>
34
35 #define _LGPL_SOURCE
36 #include <urcu-bp.h>
37 #include <urcu/rculist.h>
38
39 #include <ust/core.h>
40 #include <ust/clock.h>
41 #include "buffers.h"
42 #include "tracer.h"
43 #include "usterr.h"
44 #include "ust_snprintf.h"
45
46 /*
47 * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here.
48 * It is just an approximation for the tracer stack.
49 */
50 #define PAGE_SIZE_STATIC 4096
51
52 enum ltt_type {
53 LTT_TYPE_SIGNED_INT,
54 LTT_TYPE_UNSIGNED_INT,
55 LTT_TYPE_STRING,
56 LTT_TYPE_NONE,
57 };
58
59 /*
60 * Special stack for the tracer. Keeps serialization offsets for each field.
61 * Per-thread. Deals with reentrancy from signals by simply ensuring that
62 * interrupting signals put the stack back to its original position.
63 */
64 #define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long))
65 static unsigned long __thread tracer_stack[TRACER_STACK_LEN];
66
67 static unsigned int __thread tracer_stack_pos;
68
69 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
70
71 /*
72 * Inspired from vsnprintf
73 *
74 * The serialization format string supports the basic printf format strings.
75 * In addition, it defines new formats that can be used to serialize more
76 * complex/non portable data structures.
77 *
78 * Typical use:
79 *
80 * field_name %ctype
81 * field_name #tracetype %ctype
82 * field_name #tracetype %ctype1 %ctype2 ...
83 *
84 * A conversion is performed between format string types supported by GCC and
85 * the trace type requested. GCC type is used to perform type checking on format
86 * strings. Trace type is used to specify the exact binary representation
87 * in the trace. A mapping is done between one or more GCC types to one trace
88 * type. Sign extension, if required by the conversion, is performed following
89 * the trace type.
90 *
91 * If a gcc format is not declared with a trace format, the gcc format is
92 * also used as binary representation in the trace.
93 *
94 * Strings are supported with %s.
95 * A single tracetype (sequence) can take multiple c types as parameter.
96 *
97 * c types:
98 *
99 * see printf(3).
100 *
101 * Note: to write a uint32_t in a trace, the following expression is recommended
102 * si it can be portable:
103 *
104 * ("#4u%lu", (unsigned long)var)
105 *
106 * trace types:
107 *
108 * Serialization specific formats :
109 *
110 * Fixed size integers
111 * #1u writes uint8_t
112 * #2u writes uint16_t
113 * #4u writes uint32_t
114 * #8u writes uint64_t
115 * #1d writes int8_t
116 * #2d writes int16_t
117 * #4d writes int32_t
118 * #8d writes int64_t
119 * i.e.:
120 * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
121 *
122 * * Attributes:
123 *
124 * n: (for network byte order)
125 * #ntracetype%ctype
126 * is written in the trace in network byte order.
127 *
128 * i.e.: #bn4u%lu, #n%lu, #b%u
129 *
130 * TODO (eventually)
131 * Variable length sequence
132 * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
133 * In the trace:
134 * #a specifies that this is a sequence
135 * #tracetype1 is the type of elements in the sequence
136 * #tracetype2 is the type of the element count
137 * GCC input:
138 * array_ptr is a pointer to an array that contains members of size
139 * elem_size.
140 * num_elems is the number of elements in the array.
141 * i.e.: #a #lu #lu %p %lu %u
142 *
143 * Callback
144 * #k callback (taken from the probe data)
145 * The following % arguments are exepected by the callback
146 *
147 * i.e.: #a #lu #lu #k %p
148 *
149 * Note: No conversion is done from floats to integers, nor from integers to
150 * floats between c types and trace types. float conversion from double to float
151 * or from float to double is also not supported.
152 *
153 * REMOVE
154 * %*b expects sizeof(data), data
155 * where sizeof(data) is 1, 2, 4 or 8
156 *
157 * Fixed length struct, union or array.
158 * FIXME: unable to extract those sizes statically.
159 * %*r expects sizeof(*ptr), ptr
160 * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
161 * struct and unions removed.
162 * Fixed length array:
163 * [%p]#a[len #tracetype]
164 * i.e.: [%p]#a[12 #lu]
165 *
166 * Variable length sequence
167 * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
168 * where elem_num is the number of elements in the sequence
169 */
170 static inline const char *parse_trace_type(const char *fmt,
171 char *trace_size, enum ltt_type *trace_type,
172 unsigned long *attributes)
173 {
174 int qualifier; /* 'h', 'l', or 'L' for integer fields */
175 /* 'z' support added 23/7/1999 S.H. */
176 /* 'z' changed to 'Z' --davidm 1/25/99 */
177 /* 't' added for ptrdiff_t */
178
179 /* parse attributes. */
180 repeat:
181 switch (*fmt) {
182 case 'n':
183 *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
184 ++fmt;
185 goto repeat;
186 }
187
188 /* get the conversion qualifier */
189 qualifier = -1;
190 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
191 *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
192 *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
193 *fmt == '4' || *fmt == 8) {
194 qualifier = *fmt;
195 ++fmt;
196 if (qualifier == 'l' && *fmt == 'l') {
197 qualifier = 'L';
198 ++fmt;
199 }
200 }
201
202 switch (*fmt) {
203 case 'c':
204 *trace_type = LTT_TYPE_UNSIGNED_INT;
205 *trace_size = sizeof(unsigned char);
206 goto parse_end;
207 case 's':
208 *trace_type = LTT_TYPE_STRING;
209 goto parse_end;
210 case 'p':
211 *trace_type = LTT_TYPE_UNSIGNED_INT;
212 *trace_size = sizeof(void *);
213 goto parse_end;
214 case 'd':
215 case 'i':
216 *trace_type = LTT_TYPE_SIGNED_INT;
217 break;
218 case 'o':
219 case 'u':
220 case 'x':
221 case 'X':
222 *trace_type = LTT_TYPE_UNSIGNED_INT;
223 break;
224 default:
225 if (!*fmt)
226 --fmt;
227 goto parse_end;
228 }
229 switch (qualifier) {
230 case 'L':
231 *trace_size = sizeof(long long);
232 break;
233 case 'l':
234 *trace_size = sizeof(long);
235 break;
236 case 'Z':
237 case 'z':
238 *trace_size = sizeof(size_t);
239 break;
240 //ust// case 't':
241 //ust// *trace_size = sizeof(ptrdiff_t);
242 //ust// break;
243 case 'h':
244 *trace_size = sizeof(short);
245 break;
246 case '1':
247 *trace_size = sizeof(uint8_t);
248 break;
249 case '2':
250 *trace_size = sizeof(uint16_t);
251 break;
252 case '4':
253 *trace_size = sizeof(uint32_t);
254 break;
255 case '8':
256 *trace_size = sizeof(uint64_t);
257 break;
258 default:
259 *trace_size = sizeof(int);
260 }
261
262 parse_end:
263 return fmt;
264 }
265
266 /*
267 * Restrictions:
268 * Field width and precision are *not* supported.
269 * %n not supported.
270 */
271 static inline
272 const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type,
273 char *outfmt)
274 {
275 int qualifier; /* 'h', 'l', or 'L' for integer fields */
276 /* 'z' support added 23/7/1999 S.H. */
277 /* 'z' changed to 'Z' --davidm 1/25/99 */
278 /* 't' added for ptrdiff_t */
279
280 /* process flags : ignore standard print formats for now. */
281 repeat:
282 switch (*fmt) {
283 case '-':
284 case '+':
285 case ' ':
286 case '#':
287 case '0':
288 ++fmt;
289 goto repeat;
290 }
291
292 /* get the conversion qualifier */
293 qualifier = -1;
294 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
295 *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
296 *fmt == 'S') {
297 qualifier = *fmt;
298 ++fmt;
299 if (qualifier == 'l' && *fmt == 'l') {
300 qualifier = 'L';
301 ++fmt;
302 }
303 }
304
305 if (outfmt) {
306 if (qualifier != -1)
307 *outfmt++ = (char)qualifier;
308 *outfmt++ = *fmt;
309 *outfmt = 0;
310 }
311
312 switch (*fmt) {
313 case 'c':
314 *c_type = LTT_TYPE_UNSIGNED_INT;
315 *c_size = sizeof(unsigned char);
316 goto parse_end;
317 case 's':
318 *c_type = LTT_TYPE_STRING;
319 goto parse_end;
320 case 'p':
321 *c_type = LTT_TYPE_UNSIGNED_INT;
322 *c_size = sizeof(void *);
323 goto parse_end;
324 case 'd':
325 case 'i':
326 *c_type = LTT_TYPE_SIGNED_INT;
327 break;
328 case 'o':
329 case 'u':
330 case 'x':
331 case 'X':
332 *c_type = LTT_TYPE_UNSIGNED_INT;
333 break;
334 default:
335 if (!*fmt)
336 --fmt;
337 goto parse_end;
338 }
339 switch (qualifier) {
340 case 'L':
341 *c_size = sizeof(long long);
342 break;
343 case 'l':
344 *c_size = sizeof(long);
345 break;
346 case 'Z':
347 case 'z':
348 *c_size = sizeof(size_t);
349 break;
350 //ust// case 't':
351 //ust// *c_size = sizeof(ptrdiff_t);
352 //ust// break;
353 case 'h':
354 *c_size = sizeof(short);
355 break;
356 default:
357 *c_size = sizeof(int);
358 }
359
360 parse_end:
361 return fmt;
362 }
363
364 static inline size_t serialize_trace_data(struct ust_buffer *buf,
365 size_t buf_offset,
366 char trace_size, enum ltt_type trace_type,
367 char c_size, enum ltt_type c_type,
368 unsigned int *stack_pos_ctx,
369 int *largest_align,
370 va_list *args)
371 {
372 union {
373 unsigned long v_ulong;
374 uint64_t v_uint64;
375 struct {
376 const char *s;
377 size_t len;
378 } v_string;
379 } tmp;
380
381 /*
382 * Be careful about sign extension here.
383 * Sign extension is done with the destination (trace) type.
384 */
385 switch (trace_type) {
386 case LTT_TYPE_SIGNED_INT:
387 switch (c_size) {
388 case 1:
389 tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
390 break;
391 case 2:
392 tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
393 break;
394 case 4:
395 tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
396 break;
397 case 8:
398 tmp.v_uint64 = va_arg(*args, int64_t);
399 break;
400 default:
401 BUG();
402 }
403 break;
404 case LTT_TYPE_UNSIGNED_INT:
405 switch (c_size) {
406 case 1:
407 tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int);
408 break;
409 case 2:
410 tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int);
411 break;
412 case 4:
413 tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int);
414 break;
415 case 8:
416 tmp.v_uint64 = va_arg(*args, uint64_t);
417 break;
418 default:
419 BUG();
420 }
421 break;
422 case LTT_TYPE_STRING:
423 tmp.v_string.s = va_arg(*args, const char *);
424 if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
425 tmp.v_string.s = "<NULL>";
426 if (!buf) {
427 /*
428 * Reserve tracer stack entry.
429 */
430 tracer_stack_pos++;
431 assert(tracer_stack_pos <= TRACER_STACK_LEN);
432 barrier();
433 tracer_stack[*stack_pos_ctx] =
434 strlen(tmp.v_string.s) + 1;
435 }
436 tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++];
437 if (buf)
438 ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s,
439 tmp.v_string.len);
440 buf_offset += tmp.v_string.len;
441 goto copydone;
442 default:
443 BUG();
444 }
445
446 /*
447 * If trace_size is lower or equal to 4 bytes, there is no sign
448 * extension to do because we are already encoded in a long. Therefore,
449 * we can combine signed and unsigned ops. 4 bytes float also works
450 * with this, because we do a simple copy of 4 bytes into 4 bytes
451 * without manipulation (and we do not support conversion from integers
452 * to floats).
453 * It is also the case if c_size is 8 bytes, which is the largest
454 * possible integer.
455 */
456 if (ltt_get_alignment()) {
457 buf_offset += ltt_align(buf_offset, trace_size);
458 if (largest_align)
459 *largest_align = max_t(int, *largest_align, trace_size);
460 }
461 if (trace_size <= 4 || c_size == 8) {
462 if (buf) {
463 switch (trace_size) {
464 case 1:
465 if (c_size == 8)
466 ust_buffers_write(buf, buf_offset,
467 (uint8_t[]){ (uint8_t)tmp.v_uint64 },
468 sizeof(uint8_t));
469 else
470 ust_buffers_write(buf, buf_offset,
471 (uint8_t[]){ (uint8_t)tmp.v_ulong },
472 sizeof(uint8_t));
473 break;
474 case 2:
475 if (c_size == 8)
476 ust_buffers_write(buf, buf_offset,
477 (uint16_t[]){ (uint16_t)tmp.v_uint64 },
478 sizeof(uint16_t));
479 else
480 ust_buffers_write(buf, buf_offset,
481 (uint16_t[]){ (uint16_t)tmp.v_ulong },
482 sizeof(uint16_t));
483 break;
484 case 4:
485 if (c_size == 8)
486 ust_buffers_write(buf, buf_offset,
487 (uint32_t[]){ (uint32_t)tmp.v_uint64 },
488 sizeof(uint32_t));
489 else
490 ust_buffers_write(buf, buf_offset,
491 (uint32_t[]){ (uint32_t)tmp.v_ulong },
492 sizeof(uint32_t));
493 break;
494 case 8:
495 /*
496 * c_size cannot be other than 8 here because
497 * trace_size > 4.
498 */
499 ust_buffers_write(buf, buf_offset,
500 (uint64_t[]){ (uint64_t)tmp.v_uint64 },
501 sizeof(uint64_t));
502 break;
503 default:
504 BUG();
505 }
506 }
507 buf_offset += trace_size;
508 goto copydone;
509 } else {
510 /*
511 * Perform sign extension.
512 */
513 if (buf) {
514 switch (trace_type) {
515 case LTT_TYPE_SIGNED_INT:
516 ust_buffers_write(buf, buf_offset,
517 (int64_t[]){ (int64_t)tmp.v_ulong },
518 sizeof(int64_t));
519 break;
520 case LTT_TYPE_UNSIGNED_INT:
521 ust_buffers_write(buf, buf_offset,
522 (uint64_t[]){ (uint64_t)tmp.v_ulong },
523 sizeof(uint64_t));
524 break;
525 default:
526 BUG();
527 }
528 }
529 buf_offset += trace_size;
530 goto copydone;
531 }
532
533 copydone:
534 return buf_offset;
535 }
536
537 notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset,
538 struct ltt_serialize_closure *closure,
539 void *serialize_private,
540 unsigned int stack_pos_ctx,
541 int *largest_align,
542 const char *fmt, va_list *args)
543 {
544 char trace_size = 0, c_size = 0; /*
545 * 0 (unset), 1, 2, 4, 8 bytes.
546 */
547 enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
548 unsigned long attributes = 0;
549
550 for (; *fmt ; ++fmt) {
551 switch (*fmt) {
552 case '#':
553 /* tracetypes (#) */
554 ++fmt; /* skip first '#' */
555 if (*fmt == '#') /* Escaped ## */
556 break;
557 attributes = 0;
558 fmt = parse_trace_type(fmt, &trace_size, &trace_type,
559 &attributes);
560 break;
561 case '%':
562 /* c types (%) */
563 ++fmt; /* skip first '%' */
564 if (*fmt == '%') /* Escaped %% */
565 break;
566 fmt = parse_c_type(fmt, &c_size, &c_type, NULL);
567 /*
568 * Output c types if no trace types has been
569 * specified.
570 */
571 if (!trace_size)
572 trace_size = c_size;
573 if (trace_type == LTT_TYPE_NONE)
574 trace_type = c_type;
575 if (c_type == LTT_TYPE_STRING)
576 trace_type = LTT_TYPE_STRING;
577 /* perform trace write */
578 buf_offset = serialize_trace_data(buf,
579 buf_offset, trace_size,
580 trace_type, c_size, c_type,
581 &stack_pos_ctx,
582 largest_align,
583 args);
584 trace_size = 0;
585 c_size = 0;
586 trace_type = LTT_TYPE_NONE;
587 c_size = LTT_TYPE_NONE;
588 attributes = 0;
589 break;
590 /* default is to skip the text, doing nothing */
591 }
592 }
593 return buf_offset;
594 }
595
596 /*
597 * Calculate data size
598 * Assume that the padding for alignment starts at a sizeof(void *) address.
599 */
600 static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
601 void *serialize_private,
602 unsigned int stack_pos_ctx, int *largest_align,
603 const char *fmt, va_list *args)
604 {
605 ltt_serialize_cb cb = closure->callbacks[0];
606 closure->cb_idx = 0;
607 return (size_t)cb(NULL, 0, closure, serialize_private,
608 stack_pos_ctx, largest_align, fmt, args);
609 }
610
611 static notrace
612 void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset,
613 struct ltt_serialize_closure *closure,
614 void *serialize_private,
615 unsigned int stack_pos_ctx,
616 int largest_align,
617 const char *fmt, va_list *args)
618 {
619 ltt_serialize_cb cb = closure->callbacks[0];
620 closure->cb_idx = 0;
621 buf_offset += ltt_align(buf_offset, largest_align);
622 cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
623 fmt, args);
624 }
625
626
627 notrace void ltt_vtrace(const struct marker *mdata, void *probe_data,
628 struct registers *regs, void *call_data,
629 const char *fmt, va_list *args)
630 {
631 int largest_align, ret;
632 struct ltt_active_marker *pdata;
633 uint16_t eID;
634 size_t data_size, slot_size;
635 unsigned int chan_index;
636 struct ust_channel *channel;
637 struct ust_trace *trace, *dest_trace = NULL;
638 struct ust_buffer *buf;
639 u64 tsc;
640 long buf_offset;
641 va_list args_copy;
642 struct ltt_serialize_closure closure;
643 struct ltt_probe_private_data *private_data = call_data;
644 void *serialize_private = NULL;
645 int cpu;
646 unsigned int rflags;
647 unsigned int stack_pos_ctx;
648
649 /*
650 * This test is useful for quickly exiting static tracing when no trace
651 * is active. We expect to have an active trace when we get here.
652 */
653 if (unlikely(ltt_traces.num_active_traces == 0))
654 return;
655
656 rcu_read_lock(); //ust// rcu_read_lock_sched_notrace();
657 cpu = ust_get_cpu();
658
659 /* Force volatile access. */
660 STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1);
661 stack_pos_ctx = tracer_stack_pos;
662 barrier();
663
664 pdata = (struct ltt_active_marker *)probe_data;
665 eID = mdata->event_id;
666 chan_index = mdata->channel_id;
667 closure.callbacks = pdata->probe->callbacks;
668
669 if (unlikely(private_data)) {
670 dest_trace = private_data->trace;
671 if (private_data->serializer)
672 closure.callbacks = &private_data->serializer;
673 serialize_private = private_data->serialize_private;
674 }
675
676 va_copy(args_copy, *args);
677 /*
678 * Assumes event payload to start on largest_align alignment.
679 */
680 largest_align = 1; /* must be non-zero for ltt_align */
681 data_size = ltt_get_data_size(&closure, serialize_private,
682 stack_pos_ctx, &largest_align,
683 fmt, &args_copy);
684 largest_align = min_t(int, largest_align, sizeof(void *));
685 va_end(args_copy);
686
687 /* Iterate on each trace */
688 list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
689 /*
690 * Expect the filter to filter out events. If we get here,
691 * we went through tracepoint activation as a first step.
692 */
693 if (unlikely(dest_trace && trace != dest_trace))
694 continue;
695 if (unlikely(!trace->active))
696 continue;
697 if (unlikely(!ltt_run_filter(trace, eID)))
698 continue;
699 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
700 rflags = LTT_RFLAG_ID_SIZE;
701 #else
702 if (unlikely(eID >= LTT_FREE_EVENTS))
703 rflags = LTT_RFLAG_ID;
704 else
705 rflags = 0;
706 #endif
707 /*
708 * Skip channels added after trace creation.
709 */
710 if (unlikely(chan_index >= trace->nr_channels))
711 continue;
712 channel = &trace->channels[chan_index];
713 if (!channel->active)
714 continue;
715
716 /* If a new cpu was plugged since the trace was started, we did
717 * not add it to the trace, and therefore we write the event to
718 * cpu 0.
719 */
720 if(cpu >= channel->n_cpus) {
721 cpu = 0;
722 }
723
724 /* reserve space : header and data */
725 ret = ltt_reserve_slot(channel, trace, data_size, largest_align,
726 cpu, &buf, &slot_size, &buf_offset,
727 &tsc, &rflags);
728 if (unlikely(ret < 0))
729 continue; /* buffer full */
730
731 va_copy(args_copy, *args);
732 /* FIXME : could probably encapsulate transport better. */
733 //ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu];
734 buf = channel->buf[cpu];
735 /* Out-of-order write : header and data */
736 buf_offset = ltt_write_event_header(channel, buf, buf_offset,
737 eID, data_size, tsc, rflags);
738 ltt_write_event_data(buf, buf_offset, &closure,
739 serialize_private,
740 stack_pos_ctx, largest_align,
741 fmt, &args_copy);
742 va_end(args_copy);
743 /* Out-of-order commit */
744 ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size);
745 DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size);
746 }
747
748 barrier();
749 tracer_stack_pos = stack_pos_ctx;
750 STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1);
751
752 rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace();
753 }
754
755 notrace void ltt_trace(const struct marker *mdata, void *probe_data,
756 struct registers *regs, void *call_data,
757 const char *fmt, ...)
758 {
759 va_list args;
760
761 va_start(args, fmt);
762 ltt_vtrace(mdata, probe_data, regs, call_data, fmt, &args);
763 va_end(args);
764 }
765
766 static notrace void skip_space(const char **ps)
767 {
768 while(**ps == ' ')
769 (*ps)++;
770 }
771
772 static notrace void copy_token(char **out, const char **in)
773 {
774 while(**in != ' ' && **in != '\0') {
775 **out = **in;
776 (*out)++;
777 (*in)++;
778 }
779 }
780
781 /* serialize_to_text
782 *
783 * Given a format string and a va_list of arguments, convert them to a
784 * human-readable string.
785 *
786 * @outbuf: the buffer to output the string to
787 * @bufsize: the max size that can be used in outbuf
788 * @fmt: the marker format string
789 * @ap: a va_list that contains the arguments corresponding to fmt
790 *
791 * Return value: the number of chars that have been put in outbuf, excluding
792 * the final \0, or, if the buffer was too small, the number of chars that
793 * would have been written in outbuf if it had been large enough.
794 *
795 * outbuf may be NULL. The return value may then be used be allocate an
796 * appropriate outbuf.
797 *
798 */
799
800 notrace
801 int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap)
802 {
803 int fmt_len = strlen(fmt);
804 char *new_fmt = alloca(fmt_len + 1);
805 const char *orig_fmt_p = fmt;
806 char *new_fmt_p = new_fmt;
807 char false_buf;
808 int result;
809 enum { none, cfmt, tracefmt, argname } prev_token = none;
810
811 while(*orig_fmt_p != '\0') {
812 if(*orig_fmt_p == '%') {
813 prev_token = cfmt;
814 copy_token(&new_fmt_p, &orig_fmt_p);
815 }
816 else if(*orig_fmt_p == '#') {
817 prev_token = tracefmt;
818 do {
819 orig_fmt_p++;
820 } while(*orig_fmt_p != ' ' && *orig_fmt_p != '\0');
821 }
822 else if(*orig_fmt_p == ' ') {
823 if(prev_token == argname) {
824 *new_fmt_p = '=';
825 new_fmt_p++;
826 }
827 else if(prev_token == cfmt) {
828 *new_fmt_p = ' ';
829 new_fmt_p++;
830 }
831
832 skip_space(&orig_fmt_p);
833 }
834 else {
835 prev_token = argname;
836 copy_token(&new_fmt_p, &orig_fmt_p);
837 }
838 }
839
840 *new_fmt_p = '\0';
841
842 if(outbuf == NULL) {
843 /* use this false_buffer for compatibility with pre-C99 */
844 outbuf = &false_buf;
845 bufsize = 1;
846 }
847 result = ust_safe_vsnprintf(outbuf, bufsize, new_fmt, ap);
848
849 return result;
850 }
This page took 0.04775 seconds and 4 git commands to generate.