]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_output.c
tracing: Buffer the output of seq_file in case of filled buffer
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_output.c
1 /*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8 #include <linux/module.h>
9 #include <linux/mutex.h>
10 #include <linux/ftrace.h>
11
12 #include "trace_output.h"
13
14 /* must be a power of 2 */
15 #define EVENT_HASHSIZE 128
16
17 DECLARE_RWSEM(trace_event_mutex);
18
19 DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq);
20 EXPORT_PER_CPU_SYMBOL(ftrace_event_seq);
21
22 static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
23
24 static int next_event_type = __TRACE_LAST_TYPE + 1;
25
26 int trace_print_seq(struct seq_file *m, struct trace_seq *s)
27 {
28 int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
29 int ret;
30
31 ret = seq_write(m, s->buffer, len);
32
33 /*
34 * Only reset this buffer if we successfully wrote to the
35 * seq_file buffer.
36 */
37 if (!ret)
38 trace_seq_init(s);
39
40 return ret;
41 }
42
43 enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
44 {
45 struct trace_seq *s = &iter->seq;
46 struct trace_entry *entry = iter->ent;
47 struct bprint_entry *field;
48 int ret;
49
50 trace_assign_type(field, entry);
51
52 ret = trace_seq_bprintf(s, field->fmt, field->buf);
53 if (!ret)
54 return TRACE_TYPE_PARTIAL_LINE;
55
56 return TRACE_TYPE_HANDLED;
57 }
58
59 enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
60 {
61 struct trace_seq *s = &iter->seq;
62 struct trace_entry *entry = iter->ent;
63 struct print_entry *field;
64 int ret;
65
66 trace_assign_type(field, entry);
67
68 ret = trace_seq_printf(s, "%s", field->buf);
69 if (!ret)
70 return TRACE_TYPE_PARTIAL_LINE;
71
72 return TRACE_TYPE_HANDLED;
73 }
74
75 /**
76 * trace_seq_printf - sequence printing of trace information
77 * @s: trace sequence descriptor
78 * @fmt: printf format string
79 *
80 * It returns 0 if the trace oversizes the buffer's free
81 * space, 1 otherwise.
82 *
83 * The tracer may use either sequence operations or its own
84 * copy to user routines. To simplify formating of a trace
85 * trace_seq_printf is used to store strings into a special
86 * buffer (@s). Then the output may be either used by
87 * the sequencer or pulled into another buffer.
88 */
89 int
90 trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
91 {
92 int len = (PAGE_SIZE - 1) - s->len;
93 va_list ap;
94 int ret;
95
96 if (!len)
97 return 0;
98
99 va_start(ap, fmt);
100 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
101 va_end(ap);
102
103 /* If we can't write it all, don't bother writing anything */
104 if (ret >= len)
105 return 0;
106
107 s->len += ret;
108
109 return 1;
110 }
111 EXPORT_SYMBOL_GPL(trace_seq_printf);
112
113 /**
114 * trace_seq_vprintf - sequence printing of trace information
115 * @s: trace sequence descriptor
116 * @fmt: printf format string
117 *
118 * The tracer may use either sequence operations or its own
119 * copy to user routines. To simplify formating of a trace
120 * trace_seq_printf is used to store strings into a special
121 * buffer (@s). Then the output may be either used by
122 * the sequencer or pulled into another buffer.
123 */
124 int
125 trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
126 {
127 int len = (PAGE_SIZE - 1) - s->len;
128 int ret;
129
130 if (!len)
131 return 0;
132
133 ret = vsnprintf(s->buffer + s->len, len, fmt, args);
134
135 /* If we can't write it all, don't bother writing anything */
136 if (ret >= len)
137 return 0;
138
139 s->len += ret;
140
141 return len;
142 }
143 EXPORT_SYMBOL_GPL(trace_seq_vprintf);
144
145 int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
146 {
147 int len = (PAGE_SIZE - 1) - s->len;
148 int ret;
149
150 if (!len)
151 return 0;
152
153 ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
154
155 /* If we can't write it all, don't bother writing anything */
156 if (ret >= len)
157 return 0;
158
159 s->len += ret;
160
161 return len;
162 }
163
164 /**
165 * trace_seq_puts - trace sequence printing of simple string
166 * @s: trace sequence descriptor
167 * @str: simple string to record
168 *
169 * The tracer may use either the sequence operations or its own
170 * copy to user routines. This function records a simple string
171 * into a special buffer (@s) for later retrieval by a sequencer
172 * or other mechanism.
173 */
174 int trace_seq_puts(struct trace_seq *s, const char *str)
175 {
176 int len = strlen(str);
177
178 if (len > ((PAGE_SIZE - 1) - s->len))
179 return 0;
180
181 memcpy(s->buffer + s->len, str, len);
182 s->len += len;
183
184 return len;
185 }
186
187 int trace_seq_putc(struct trace_seq *s, unsigned char c)
188 {
189 if (s->len >= (PAGE_SIZE - 1))
190 return 0;
191
192 s->buffer[s->len++] = c;
193
194 return 1;
195 }
196
197 int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
198 {
199 if (len > ((PAGE_SIZE - 1) - s->len))
200 return 0;
201
202 memcpy(s->buffer + s->len, mem, len);
203 s->len += len;
204
205 return len;
206 }
207
208 int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
209 {
210 unsigned char hex[HEX_CHARS];
211 const unsigned char *data = mem;
212 int i, j;
213
214 #ifdef __BIG_ENDIAN
215 for (i = 0, j = 0; i < len; i++) {
216 #else
217 for (i = len-1, j = 0; i >= 0; i--) {
218 #endif
219 hex[j++] = hex_asc_hi(data[i]);
220 hex[j++] = hex_asc_lo(data[i]);
221 }
222 hex[j++] = ' ';
223
224 return trace_seq_putmem(s, hex, j);
225 }
226
227 void *trace_seq_reserve(struct trace_seq *s, size_t len)
228 {
229 void *ret;
230
231 if (len > ((PAGE_SIZE - 1) - s->len))
232 return NULL;
233
234 ret = s->buffer + s->len;
235 s->len += len;
236
237 return ret;
238 }
239
240 int trace_seq_path(struct trace_seq *s, struct path *path)
241 {
242 unsigned char *p;
243
244 if (s->len >= (PAGE_SIZE - 1))
245 return 0;
246 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
247 if (!IS_ERR(p)) {
248 p = mangle_path(s->buffer + s->len, p, "\n");
249 if (p) {
250 s->len = p - s->buffer;
251 return 1;
252 }
253 } else {
254 s->buffer[s->len++] = '?';
255 return 1;
256 }
257
258 return 0;
259 }
260
261 const char *
262 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
263 unsigned long flags,
264 const struct trace_print_flags *flag_array)
265 {
266 unsigned long mask;
267 const char *str;
268 const char *ret = p->buffer + p->len;
269 int i;
270
271 for (i = 0; flag_array[i].name && flags; i++) {
272
273 mask = flag_array[i].mask;
274 if ((flags & mask) != mask)
275 continue;
276
277 str = flag_array[i].name;
278 flags &= ~mask;
279 if (p->len && delim)
280 trace_seq_puts(p, delim);
281 trace_seq_puts(p, str);
282 }
283
284 /* check for left over flags */
285 if (flags) {
286 if (p->len && delim)
287 trace_seq_puts(p, delim);
288 trace_seq_printf(p, "0x%lx", flags);
289 }
290
291 trace_seq_putc(p, 0);
292
293 return ret;
294 }
295 EXPORT_SYMBOL(ftrace_print_flags_seq);
296
297 const char *
298 ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
299 const struct trace_print_flags *symbol_array)
300 {
301 int i;
302 const char *ret = p->buffer + p->len;
303
304 for (i = 0; symbol_array[i].name; i++) {
305
306 if (val != symbol_array[i].mask)
307 continue;
308
309 trace_seq_puts(p, symbol_array[i].name);
310 break;
311 }
312
313 if (!p->len)
314 trace_seq_printf(p, "0x%lx", val);
315
316 trace_seq_putc(p, 0);
317
318 return ret;
319 }
320 EXPORT_SYMBOL(ftrace_print_symbols_seq);
321
322 #ifdef CONFIG_KRETPROBES
323 static inline const char *kretprobed(const char *name)
324 {
325 static const char tramp_name[] = "kretprobe_trampoline";
326 int size = sizeof(tramp_name);
327
328 if (strncmp(tramp_name, name, size) == 0)
329 return "[unknown/kretprobe'd]";
330 return name;
331 }
332 #else
333 static inline const char *kretprobed(const char *name)
334 {
335 return name;
336 }
337 #endif /* CONFIG_KRETPROBES */
338
339 static int
340 seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
341 {
342 #ifdef CONFIG_KALLSYMS
343 char str[KSYM_SYMBOL_LEN];
344 const char *name;
345
346 kallsyms_lookup(address, NULL, NULL, NULL, str);
347
348 name = kretprobed(str);
349
350 return trace_seq_printf(s, fmt, name);
351 #endif
352 return 1;
353 }
354
355 static int
356 seq_print_sym_offset(struct trace_seq *s, const char *fmt,
357 unsigned long address)
358 {
359 #ifdef CONFIG_KALLSYMS
360 char str[KSYM_SYMBOL_LEN];
361 const char *name;
362
363 sprint_symbol(str, address);
364 name = kretprobed(str);
365
366 return trace_seq_printf(s, fmt, name);
367 #endif
368 return 1;
369 }
370
371 #ifndef CONFIG_64BIT
372 # define IP_FMT "%08lx"
373 #else
374 # define IP_FMT "%016lx"
375 #endif
376
377 int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
378 unsigned long ip, unsigned long sym_flags)
379 {
380 struct file *file = NULL;
381 unsigned long vmstart = 0;
382 int ret = 1;
383
384 if (mm) {
385 const struct vm_area_struct *vma;
386
387 down_read(&mm->mmap_sem);
388 vma = find_vma(mm, ip);
389 if (vma) {
390 file = vma->vm_file;
391 vmstart = vma->vm_start;
392 }
393 if (file) {
394 ret = trace_seq_path(s, &file->f_path);
395 if (ret)
396 ret = trace_seq_printf(s, "[+0x%lx]",
397 ip - vmstart);
398 }
399 up_read(&mm->mmap_sem);
400 }
401 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
402 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
403 return ret;
404 }
405
406 int
407 seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
408 unsigned long sym_flags)
409 {
410 struct mm_struct *mm = NULL;
411 int ret = 1;
412 unsigned int i;
413
414 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
415 struct task_struct *task;
416 /*
417 * we do the lookup on the thread group leader,
418 * since individual threads might have already quit!
419 */
420 rcu_read_lock();
421 task = find_task_by_vpid(entry->tgid);
422 if (task)
423 mm = get_task_mm(task);
424 rcu_read_unlock();
425 }
426
427 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
428 unsigned long ip = entry->caller[i];
429
430 if (ip == ULONG_MAX || !ret)
431 break;
432 if (ret)
433 ret = trace_seq_puts(s, " => ");
434 if (!ip) {
435 if (ret)
436 ret = trace_seq_puts(s, "??");
437 if (ret)
438 ret = trace_seq_puts(s, "\n");
439 continue;
440 }
441 if (!ret)
442 break;
443 if (ret)
444 ret = seq_print_user_ip(s, mm, ip, sym_flags);
445 ret = trace_seq_puts(s, "\n");
446 }
447
448 if (mm)
449 mmput(mm);
450 return ret;
451 }
452
453 int
454 seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
455 {
456 int ret;
457
458 if (!ip)
459 return trace_seq_printf(s, "0");
460
461 if (sym_flags & TRACE_ITER_SYM_OFFSET)
462 ret = seq_print_sym_offset(s, "%s", ip);
463 else
464 ret = seq_print_sym_short(s, "%s", ip);
465
466 if (!ret)
467 return 0;
468
469 if (sym_flags & TRACE_ITER_SYM_ADDR)
470 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
471 return ret;
472 }
473
474 /**
475 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
476 * @s: trace seq struct to write to
477 * @entry: The trace entry field from the ring buffer
478 *
479 * Prints the generic fields of irqs off, in hard or softirq, preempt
480 * count and lock depth.
481 */
482 int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
483 {
484 int hardirq, softirq;
485 int ret;
486
487 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
488 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
489
490 if (!trace_seq_printf(s, "%c%c%c",
491 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
492 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ?
493 'X' : '.',
494 (entry->flags & TRACE_FLAG_NEED_RESCHED) ?
495 'N' : '.',
496 (hardirq && softirq) ? 'H' :
497 hardirq ? 'h' : softirq ? 's' : '.'))
498 return 0;
499
500 if (entry->preempt_count)
501 ret = trace_seq_printf(s, "%x", entry->preempt_count);
502 else
503 ret = trace_seq_putc(s, '.');
504
505 if (!ret)
506 return 0;
507
508 if (entry->lock_depth < 0)
509 return trace_seq_putc(s, '.');
510
511 return trace_seq_printf(s, "%d", entry->lock_depth);
512 }
513
514 static int
515 lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
516 {
517 char comm[TASK_COMM_LEN];
518
519 trace_find_cmdline(entry->pid, comm);
520
521 if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
522 comm, entry->pid, cpu))
523 return 0;
524
525 return trace_print_lat_fmt(s, entry);
526 }
527
528 static unsigned long preempt_mark_thresh = 100;
529
530 static int
531 lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
532 unsigned long rel_usecs)
533 {
534 return trace_seq_printf(s, " %4lldus%c: ", abs_usecs,
535 rel_usecs > preempt_mark_thresh ? '!' :
536 rel_usecs > 1 ? '+' : ' ');
537 }
538
539 int trace_print_context(struct trace_iterator *iter)
540 {
541 struct trace_seq *s = &iter->seq;
542 struct trace_entry *entry = iter->ent;
543 unsigned long long t = ns2usecs(iter->ts);
544 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
545 unsigned long secs = (unsigned long)t;
546 char comm[TASK_COMM_LEN];
547
548 trace_find_cmdline(entry->pid, comm);
549
550 return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ",
551 comm, entry->pid, iter->cpu, secs, usec_rem);
552 }
553
554 int trace_print_lat_context(struct trace_iterator *iter)
555 {
556 u64 next_ts;
557 int ret;
558 struct trace_seq *s = &iter->seq;
559 struct trace_entry *entry = iter->ent,
560 *next_entry = trace_find_next_entry(iter, NULL,
561 &next_ts);
562 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
563 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
564 unsigned long rel_usecs;
565
566 if (!next_entry)
567 next_ts = iter->ts;
568 rel_usecs = ns2usecs(next_ts - iter->ts);
569
570 if (verbose) {
571 char comm[TASK_COMM_LEN];
572
573 trace_find_cmdline(entry->pid, comm);
574
575 ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08llx]"
576 " %ld.%03ldms (+%ld.%03ldms): ", comm,
577 entry->pid, iter->cpu, entry->flags,
578 entry->preempt_count, iter->idx,
579 ns2usecs(iter->ts),
580 abs_usecs / USEC_PER_MSEC,
581 abs_usecs % USEC_PER_MSEC,
582 rel_usecs / USEC_PER_MSEC,
583 rel_usecs % USEC_PER_MSEC);
584 } else {
585 ret = lat_print_generic(s, entry, iter->cpu);
586 if (ret)
587 ret = lat_print_timestamp(s, abs_usecs, rel_usecs);
588 }
589
590 return ret;
591 }
592
593 static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
594
595 static int task_state_char(unsigned long state)
596 {
597 int bit = state ? __ffs(state) + 1 : 0;
598
599 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
600 }
601
602 /**
603 * ftrace_find_event - find a registered event
604 * @type: the type of event to look for
605 *
606 * Returns an event of type @type otherwise NULL
607 * Called with trace_event_read_lock() held.
608 */
609 struct trace_event *ftrace_find_event(int type)
610 {
611 struct trace_event *event;
612 struct hlist_node *n;
613 unsigned key;
614
615 key = type & (EVENT_HASHSIZE - 1);
616
617 hlist_for_each_entry(event, n, &event_hash[key], node) {
618 if (event->type == type)
619 return event;
620 }
621
622 return NULL;
623 }
624
625 static LIST_HEAD(ftrace_event_list);
626
627 static int trace_search_list(struct list_head **list)
628 {
629 struct trace_event *e;
630 int last = __TRACE_LAST_TYPE;
631
632 if (list_empty(&ftrace_event_list)) {
633 *list = &ftrace_event_list;
634 return last + 1;
635 }
636
637 /*
638 * We used up all possible max events,
639 * lets see if somebody freed one.
640 */
641 list_for_each_entry(e, &ftrace_event_list, list) {
642 if (e->type != last + 1)
643 break;
644 last++;
645 }
646
647 /* Did we used up all 65 thousand events??? */
648 if ((last + 1) > FTRACE_MAX_EVENT)
649 return 0;
650
651 *list = &e->list;
652 return last + 1;
653 }
654
655 void trace_event_read_lock(void)
656 {
657 down_read(&trace_event_mutex);
658 }
659
660 void trace_event_read_unlock(void)
661 {
662 up_read(&trace_event_mutex);
663 }
664
665 /**
666 * register_ftrace_event - register output for an event type
667 * @event: the event type to register
668 *
669 * Event types are stored in a hash and this hash is used to
670 * find a way to print an event. If the @event->type is set
671 * then it will use that type, otherwise it will assign a
672 * type to use.
673 *
674 * If you assign your own type, please make sure it is added
675 * to the trace_type enum in trace.h, to avoid collisions
676 * with the dynamic types.
677 *
678 * Returns the event type number or zero on error.
679 */
680 int register_ftrace_event(struct trace_event *event)
681 {
682 unsigned key;
683 int ret = 0;
684
685 down_write(&trace_event_mutex);
686
687 if (WARN_ON(!event))
688 goto out;
689
690 INIT_LIST_HEAD(&event->list);
691
692 if (!event->type) {
693 struct list_head *list = NULL;
694
695 if (next_event_type > FTRACE_MAX_EVENT) {
696
697 event->type = trace_search_list(&list);
698 if (!event->type)
699 goto out;
700
701 } else {
702
703 event->type = next_event_type++;
704 list = &ftrace_event_list;
705 }
706
707 if (WARN_ON(ftrace_find_event(event->type)))
708 goto out;
709
710 list_add_tail(&event->list, list);
711
712 } else if (event->type > __TRACE_LAST_TYPE) {
713 printk(KERN_WARNING "Need to add type to trace.h\n");
714 WARN_ON(1);
715 goto out;
716 } else {
717 /* Is this event already used */
718 if (ftrace_find_event(event->type))
719 goto out;
720 }
721
722 if (event->trace == NULL)
723 event->trace = trace_nop_print;
724 if (event->raw == NULL)
725 event->raw = trace_nop_print;
726 if (event->hex == NULL)
727 event->hex = trace_nop_print;
728 if (event->binary == NULL)
729 event->binary = trace_nop_print;
730
731 key = event->type & (EVENT_HASHSIZE - 1);
732
733 hlist_add_head(&event->node, &event_hash[key]);
734
735 ret = event->type;
736 out:
737 up_write(&trace_event_mutex);
738
739 return ret;
740 }
741 EXPORT_SYMBOL_GPL(register_ftrace_event);
742
743 /*
744 * Used by module code with the trace_event_mutex held for write.
745 */
746 int __unregister_ftrace_event(struct trace_event *event)
747 {
748 hlist_del(&event->node);
749 list_del(&event->list);
750 return 0;
751 }
752
753 /**
754 * unregister_ftrace_event - remove a no longer used event
755 * @event: the event to remove
756 */
757 int unregister_ftrace_event(struct trace_event *event)
758 {
759 down_write(&trace_event_mutex);
760 __unregister_ftrace_event(event);
761 up_write(&trace_event_mutex);
762
763 return 0;
764 }
765 EXPORT_SYMBOL_GPL(unregister_ftrace_event);
766
767 /*
768 * Standard events
769 */
770
771 enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags)
772 {
773 return TRACE_TYPE_HANDLED;
774 }
775
776 /* TRACE_FN */
777 static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags)
778 {
779 struct ftrace_entry *field;
780 struct trace_seq *s = &iter->seq;
781
782 trace_assign_type(field, iter->ent);
783
784 if (!seq_print_ip_sym(s, field->ip, flags))
785 goto partial;
786
787 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
788 if (!trace_seq_printf(s, " <-"))
789 goto partial;
790 if (!seq_print_ip_sym(s,
791 field->parent_ip,
792 flags))
793 goto partial;
794 }
795 if (!trace_seq_printf(s, "\n"))
796 goto partial;
797
798 return TRACE_TYPE_HANDLED;
799
800 partial:
801 return TRACE_TYPE_PARTIAL_LINE;
802 }
803
804 static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags)
805 {
806 struct ftrace_entry *field;
807
808 trace_assign_type(field, iter->ent);
809
810 if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
811 field->ip,
812 field->parent_ip))
813 return TRACE_TYPE_PARTIAL_LINE;
814
815 return TRACE_TYPE_HANDLED;
816 }
817
818 static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags)
819 {
820 struct ftrace_entry *field;
821 struct trace_seq *s = &iter->seq;
822
823 trace_assign_type(field, iter->ent);
824
825 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
826 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
827
828 return TRACE_TYPE_HANDLED;
829 }
830
831 static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags)
832 {
833 struct ftrace_entry *field;
834 struct trace_seq *s = &iter->seq;
835
836 trace_assign_type(field, iter->ent);
837
838 SEQ_PUT_FIELD_RET(s, field->ip);
839 SEQ_PUT_FIELD_RET(s, field->parent_ip);
840
841 return TRACE_TYPE_HANDLED;
842 }
843
844 static struct trace_event trace_fn_event = {
845 .type = TRACE_FN,
846 .trace = trace_fn_trace,
847 .raw = trace_fn_raw,
848 .hex = trace_fn_hex,
849 .binary = trace_fn_bin,
850 };
851
852 /* TRACE_CTX an TRACE_WAKE */
853 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
854 char *delim)
855 {
856 struct ctx_switch_entry *field;
857 char comm[TASK_COMM_LEN];
858 int S, T;
859
860
861 trace_assign_type(field, iter->ent);
862
863 T = task_state_char(field->next_state);
864 S = task_state_char(field->prev_state);
865 trace_find_cmdline(field->next_pid, comm);
866 if (!trace_seq_printf(&iter->seq,
867 " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
868 field->prev_pid,
869 field->prev_prio,
870 S, delim,
871 field->next_cpu,
872 field->next_pid,
873 field->next_prio,
874 T, comm))
875 return TRACE_TYPE_PARTIAL_LINE;
876
877 return TRACE_TYPE_HANDLED;
878 }
879
880 static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags)
881 {
882 return trace_ctxwake_print(iter, "==>");
883 }
884
885 static enum print_line_t trace_wake_print(struct trace_iterator *iter,
886 int flags)
887 {
888 return trace_ctxwake_print(iter, " +");
889 }
890
891 static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
892 {
893 struct ctx_switch_entry *field;
894 int T;
895
896 trace_assign_type(field, iter->ent);
897
898 if (!S)
899 S = task_state_char(field->prev_state);
900 T = task_state_char(field->next_state);
901 if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
902 field->prev_pid,
903 field->prev_prio,
904 S,
905 field->next_cpu,
906 field->next_pid,
907 field->next_prio,
908 T))
909 return TRACE_TYPE_PARTIAL_LINE;
910
911 return TRACE_TYPE_HANDLED;
912 }
913
914 static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags)
915 {
916 return trace_ctxwake_raw(iter, 0);
917 }
918
919 static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags)
920 {
921 return trace_ctxwake_raw(iter, '+');
922 }
923
924
925 static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
926 {
927 struct ctx_switch_entry *field;
928 struct trace_seq *s = &iter->seq;
929 int T;
930
931 trace_assign_type(field, iter->ent);
932
933 if (!S)
934 S = task_state_char(field->prev_state);
935 T = task_state_char(field->next_state);
936
937 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
938 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
939 SEQ_PUT_HEX_FIELD_RET(s, S);
940 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
941 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
942 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
943 SEQ_PUT_HEX_FIELD_RET(s, T);
944
945 return TRACE_TYPE_HANDLED;
946 }
947
948 static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags)
949 {
950 return trace_ctxwake_hex(iter, 0);
951 }
952
953 static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags)
954 {
955 return trace_ctxwake_hex(iter, '+');
956 }
957
958 static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
959 int flags)
960 {
961 struct ctx_switch_entry *field;
962 struct trace_seq *s = &iter->seq;
963
964 trace_assign_type(field, iter->ent);
965
966 SEQ_PUT_FIELD_RET(s, field->prev_pid);
967 SEQ_PUT_FIELD_RET(s, field->prev_prio);
968 SEQ_PUT_FIELD_RET(s, field->prev_state);
969 SEQ_PUT_FIELD_RET(s, field->next_pid);
970 SEQ_PUT_FIELD_RET(s, field->next_prio);
971 SEQ_PUT_FIELD_RET(s, field->next_state);
972
973 return TRACE_TYPE_HANDLED;
974 }
975
976 static struct trace_event trace_ctx_event = {
977 .type = TRACE_CTX,
978 .trace = trace_ctx_print,
979 .raw = trace_ctx_raw,
980 .hex = trace_ctx_hex,
981 .binary = trace_ctxwake_bin,
982 };
983
984 static struct trace_event trace_wake_event = {
985 .type = TRACE_WAKE,
986 .trace = trace_wake_print,
987 .raw = trace_wake_raw,
988 .hex = trace_wake_hex,
989 .binary = trace_ctxwake_bin,
990 };
991
992 /* TRACE_SPECIAL */
993 static enum print_line_t trace_special_print(struct trace_iterator *iter,
994 int flags)
995 {
996 struct special_entry *field;
997
998 trace_assign_type(field, iter->ent);
999
1000 if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n",
1001 field->arg1,
1002 field->arg2,
1003 field->arg3))
1004 return TRACE_TYPE_PARTIAL_LINE;
1005
1006 return TRACE_TYPE_HANDLED;
1007 }
1008
1009 static enum print_line_t trace_special_hex(struct trace_iterator *iter,
1010 int flags)
1011 {
1012 struct special_entry *field;
1013 struct trace_seq *s = &iter->seq;
1014
1015 trace_assign_type(field, iter->ent);
1016
1017 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1018 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1019 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1020
1021 return TRACE_TYPE_HANDLED;
1022 }
1023
1024 static enum print_line_t trace_special_bin(struct trace_iterator *iter,
1025 int flags)
1026 {
1027 struct special_entry *field;
1028 struct trace_seq *s = &iter->seq;
1029
1030 trace_assign_type(field, iter->ent);
1031
1032 SEQ_PUT_FIELD_RET(s, field->arg1);
1033 SEQ_PUT_FIELD_RET(s, field->arg2);
1034 SEQ_PUT_FIELD_RET(s, field->arg3);
1035
1036 return TRACE_TYPE_HANDLED;
1037 }
1038
1039 static struct trace_event trace_special_event = {
1040 .type = TRACE_SPECIAL,
1041 .trace = trace_special_print,
1042 .raw = trace_special_print,
1043 .hex = trace_special_hex,
1044 .binary = trace_special_bin,
1045 };
1046
1047 /* TRACE_STACK */
1048
1049 static enum print_line_t trace_stack_print(struct trace_iterator *iter,
1050 int flags)
1051 {
1052 struct stack_entry *field;
1053 struct trace_seq *s = &iter->seq;
1054 int i;
1055
1056 trace_assign_type(field, iter->ent);
1057
1058 if (!trace_seq_puts(s, "<stack trace>\n"))
1059 goto partial;
1060 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1061 if (!field->caller[i] || (field->caller[i] == ULONG_MAX))
1062 break;
1063 if (!trace_seq_puts(s, " => "))
1064 goto partial;
1065
1066 if (!seq_print_ip_sym(s, field->caller[i], flags))
1067 goto partial;
1068 if (!trace_seq_puts(s, "\n"))
1069 goto partial;
1070 }
1071
1072 return TRACE_TYPE_HANDLED;
1073
1074 partial:
1075 return TRACE_TYPE_PARTIAL_LINE;
1076 }
1077
1078 static struct trace_event trace_stack_event = {
1079 .type = TRACE_STACK,
1080 .trace = trace_stack_print,
1081 .raw = trace_special_print,
1082 .hex = trace_special_hex,
1083 .binary = trace_special_bin,
1084 };
1085
1086 /* TRACE_USER_STACK */
1087 static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
1088 int flags)
1089 {
1090 struct userstack_entry *field;
1091 struct trace_seq *s = &iter->seq;
1092
1093 trace_assign_type(field, iter->ent);
1094
1095 if (!trace_seq_puts(s, "<user stack trace>\n"))
1096 goto partial;
1097
1098 if (!seq_print_userip_objs(field, s, flags))
1099 goto partial;
1100
1101 return TRACE_TYPE_HANDLED;
1102
1103 partial:
1104 return TRACE_TYPE_PARTIAL_LINE;
1105 }
1106
1107 static struct trace_event trace_user_stack_event = {
1108 .type = TRACE_USER_STACK,
1109 .trace = trace_user_stack_print,
1110 .raw = trace_special_print,
1111 .hex = trace_special_hex,
1112 .binary = trace_special_bin,
1113 };
1114
1115 /* TRACE_BPRINT */
1116 static enum print_line_t
1117 trace_bprint_print(struct trace_iterator *iter, int flags)
1118 {
1119 struct trace_entry *entry = iter->ent;
1120 struct trace_seq *s = &iter->seq;
1121 struct bprint_entry *field;
1122
1123 trace_assign_type(field, entry);
1124
1125 if (!seq_print_ip_sym(s, field->ip, flags))
1126 goto partial;
1127
1128 if (!trace_seq_puts(s, ": "))
1129 goto partial;
1130
1131 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1132 goto partial;
1133
1134 return TRACE_TYPE_HANDLED;
1135
1136 partial:
1137 return TRACE_TYPE_PARTIAL_LINE;
1138 }
1139
1140
1141 static enum print_line_t
1142 trace_bprint_raw(struct trace_iterator *iter, int flags)
1143 {
1144 struct bprint_entry *field;
1145 struct trace_seq *s = &iter->seq;
1146
1147 trace_assign_type(field, iter->ent);
1148
1149 if (!trace_seq_printf(s, ": %lx : ", field->ip))
1150 goto partial;
1151
1152 if (!trace_seq_bprintf(s, field->fmt, field->buf))
1153 goto partial;
1154
1155 return TRACE_TYPE_HANDLED;
1156
1157 partial:
1158 return TRACE_TYPE_PARTIAL_LINE;
1159 }
1160
1161
1162 static struct trace_event trace_bprint_event = {
1163 .type = TRACE_BPRINT,
1164 .trace = trace_bprint_print,
1165 .raw = trace_bprint_raw,
1166 };
1167
1168 /* TRACE_PRINT */
1169 static enum print_line_t trace_print_print(struct trace_iterator *iter,
1170 int flags)
1171 {
1172 struct print_entry *field;
1173 struct trace_seq *s = &iter->seq;
1174
1175 trace_assign_type(field, iter->ent);
1176
1177 if (!seq_print_ip_sym(s, field->ip, flags))
1178 goto partial;
1179
1180 if (!trace_seq_printf(s, ": %s", field->buf))
1181 goto partial;
1182
1183 return TRACE_TYPE_HANDLED;
1184
1185 partial:
1186 return TRACE_TYPE_PARTIAL_LINE;
1187 }
1188
1189 static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags)
1190 {
1191 struct print_entry *field;
1192
1193 trace_assign_type(field, iter->ent);
1194
1195 if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
1196 goto partial;
1197
1198 return TRACE_TYPE_HANDLED;
1199
1200 partial:
1201 return TRACE_TYPE_PARTIAL_LINE;
1202 }
1203
1204 static struct trace_event trace_print_event = {
1205 .type = TRACE_PRINT,
1206 .trace = trace_print_print,
1207 .raw = trace_print_raw,
1208 };
1209
1210
1211 static struct trace_event *events[] __initdata = {
1212 &trace_fn_event,
1213 &trace_ctx_event,
1214 &trace_wake_event,
1215 &trace_special_event,
1216 &trace_stack_event,
1217 &trace_user_stack_event,
1218 &trace_bprint_event,
1219 &trace_print_event,
1220 NULL
1221 };
1222
1223 __init static int init_events(void)
1224 {
1225 struct trace_event *event;
1226 int i, ret;
1227
1228 for (i = 0; events[i]; i++) {
1229 event = events[i];
1230
1231 ret = register_ftrace_event(event);
1232 if (!ret) {
1233 printk(KERN_WARNING "event %d failed to register\n",
1234 event->type);
1235 WARN_ON_ONCE(1);
1236 }
1237 }
1238
1239 return 0;
1240 }
1241 device_initcall(init_events);