]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace_output.c
trace: better manage the context info for events
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_output.c
CommitLineData
f0868d1e
SR
1/*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/ftrace.h>
11
12#include "trace_output.h"
13
14/* must be a power of 2 */
15#define EVENT_HASHSIZE 128
16
17static DEFINE_MUTEX(trace_event_mutex);
18static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19
20static int next_event_type = __TRACE_LAST_TYPE + 1;
21
22/**
23 * trace_seq_printf - sequence printing of trace information
24 * @s: trace sequence descriptor
25 * @fmt: printf format string
26 *
27 * The tracer may use either sequence operations or its own
28 * copy to user routines. To simplify formating of a trace
29 * trace_seq_printf is used to store strings into a special
30 * buffer (@s). Then the output may be either used by
31 * the sequencer or pulled into another buffer.
32 */
33int
34trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
35{
36 int len = (PAGE_SIZE - 1) - s->len;
37 va_list ap;
38 int ret;
39
40 if (!len)
41 return 0;
42
43 va_start(ap, fmt);
44 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
45 va_end(ap);
46
47 /* If we can't write it all, don't bother writing anything */
48 if (ret >= len)
49 return 0;
50
51 s->len += ret;
52
53 return len;
54}
55
56/**
57 * trace_seq_puts - trace sequence printing of simple string
58 * @s: trace sequence descriptor
59 * @str: simple string to record
60 *
61 * The tracer may use either the sequence operations or its own
62 * copy to user routines. This function records a simple string
63 * into a special buffer (@s) for later retrieval by a sequencer
64 * or other mechanism.
65 */
66int trace_seq_puts(struct trace_seq *s, const char *str)
67{
68 int len = strlen(str);
69
70 if (len > ((PAGE_SIZE - 1) - s->len))
71 return 0;
72
73 memcpy(s->buffer + s->len, str, len);
74 s->len += len;
75
76 return len;
77}
78
79int trace_seq_putc(struct trace_seq *s, unsigned char c)
80{
81 if (s->len >= (PAGE_SIZE - 1))
82 return 0;
83
84 s->buffer[s->len++] = c;
85
86 return 1;
87}
88
89int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
90{
91 if (len > ((PAGE_SIZE - 1) - s->len))
92 return 0;
93
94 memcpy(s->buffer + s->len, mem, len);
95 s->len += len;
96
97 return len;
98}
99
100int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
101{
102 unsigned char hex[HEX_CHARS];
103 unsigned char *data = mem;
104 int i, j;
105
106#ifdef __BIG_ENDIAN
107 for (i = 0, j = 0; i < len; i++) {
108#else
109 for (i = len-1, j = 0; i >= 0; i--) {
110#endif
111 hex[j++] = hex_asc_hi(data[i]);
112 hex[j++] = hex_asc_lo(data[i]);
113 }
114 hex[j++] = ' ';
115
116 return trace_seq_putmem(s, hex, j);
117}
118
119int trace_seq_path(struct trace_seq *s, struct path *path)
120{
121 unsigned char *p;
122
123 if (s->len >= (PAGE_SIZE - 1))
124 return 0;
125 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
126 if (!IS_ERR(p)) {
127 p = mangle_path(s->buffer + s->len, p, "\n");
128 if (p) {
129 s->len = p - s->buffer;
130 return 1;
131 }
132 } else {
133 s->buffer[s->len++] = '?';
134 return 1;
135 }
136
137 return 0;
138}
139
140#ifdef CONFIG_KRETPROBES
141static inline const char *kretprobed(const char *name)
142{
143 static const char tramp_name[] = "kretprobe_trampoline";
144 int size = sizeof(tramp_name);
145
146 if (strncmp(tramp_name, name, size) == 0)
147 return "[unknown/kretprobe'd]";
148 return name;
149}
150#else
151static inline const char *kretprobed(const char *name)
152{
153 return name;
154}
155#endif /* CONFIG_KRETPROBES */
156
157static int
158seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
159{
160#ifdef CONFIG_KALLSYMS
161 char str[KSYM_SYMBOL_LEN];
162 const char *name;
163
164 kallsyms_lookup(address, NULL, NULL, NULL, str);
165
166 name = kretprobed(str);
167
168 return trace_seq_printf(s, fmt, name);
169#endif
170 return 1;
171}
172
173static int
174seq_print_sym_offset(struct trace_seq *s, const char *fmt,
175 unsigned long address)
176{
177#ifdef CONFIG_KALLSYMS
178 char str[KSYM_SYMBOL_LEN];
179 const char *name;
180
181 sprint_symbol(str, address);
182 name = kretprobed(str);
183
184 return trace_seq_printf(s, fmt, name);
185#endif
186 return 1;
187}
188
189#ifndef CONFIG_64BIT
190# define IP_FMT "%08lx"
191#else
192# define IP_FMT "%016lx"
193#endif
194
195int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
196 unsigned long ip, unsigned long sym_flags)
197{
198 struct file *file = NULL;
199 unsigned long vmstart = 0;
200 int ret = 1;
201
202 if (mm) {
203 const struct vm_area_struct *vma;
204
205 down_read(&mm->mmap_sem);
206 vma = find_vma(mm, ip);
207 if (vma) {
208 file = vma->vm_file;
209 vmstart = vma->vm_start;
210 }
211 if (file) {
212 ret = trace_seq_path(s, &file->f_path);
213 if (ret)
214 ret = trace_seq_printf(s, "[+0x%lx]",
215 ip - vmstart);
216 }
217 up_read(&mm->mmap_sem);
218 }
219 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
220 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
221 return ret;
222}
223
224int
225seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
226 unsigned long sym_flags)
227{
228 struct mm_struct *mm = NULL;
229 int ret = 1;
230 unsigned int i;
231
232 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
233 struct task_struct *task;
234 /*
235 * we do the lookup on the thread group leader,
236 * since individual threads might have already quit!
237 */
238 rcu_read_lock();
239 task = find_task_by_vpid(entry->ent.tgid);
240 if (task)
241 mm = get_task_mm(task);
242 rcu_read_unlock();
243 }
244
245 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
246 unsigned long ip = entry->caller[i];
247
248 if (ip == ULONG_MAX || !ret)
249 break;
250 if (i && ret)
251 ret = trace_seq_puts(s, " <- ");
252 if (!ip) {
253 if (ret)
254 ret = trace_seq_puts(s, "??");
255 continue;
256 }
257 if (!ret)
258 break;
259 if (ret)
260 ret = seq_print_user_ip(s, mm, ip, sym_flags);
261 }
262
263 if (mm)
264 mmput(mm);
265 return ret;
266}
267
268int
269seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
270{
271 int ret;
272
273 if (!ip)
274 return trace_seq_printf(s, "0");
275
276 if (sym_flags & TRACE_ITER_SYM_OFFSET)
277 ret = seq_print_sym_offset(s, "%s", ip);
278 else
279 ret = seq_print_sym_short(s, "%s", ip);
280
281 if (!ret)
282 return 0;
283
284 if (sym_flags & TRACE_ITER_SYM_ADDR)
285 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
286 return ret;
287}
288
c4a8e8be
FW
289static void
290lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
291{
292 int hardirq, softirq;
293 char *comm;
294
295 comm = trace_find_cmdline(entry->pid);
296
297 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
298 trace_seq_printf(s, "%3d", cpu);
299 trace_seq_printf(s, "%c%c",
300 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
301 (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.',
302 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
303
304 hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
305 softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
306 if (hardirq && softirq) {
307 trace_seq_putc(s, 'H');
308 } else {
309 if (hardirq) {
310 trace_seq_putc(s, 'h');
311 } else {
312 if (softirq)
313 trace_seq_putc(s, 's');
314 else
315 trace_seq_putc(s, '.');
316 }
317 }
318
319 if (entry->preempt_count)
320 trace_seq_printf(s, "%x", entry->preempt_count);
321 else
322 trace_seq_puts(s, ".");
323}
324
325static unsigned long preempt_mark_thresh = 100;
326
327static void
328lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
329 unsigned long rel_usecs)
330{
331 trace_seq_printf(s, " %4lldus", abs_usecs);
332 if (rel_usecs > preempt_mark_thresh)
333 trace_seq_puts(s, "!: ");
334 else if (rel_usecs > 1)
335 trace_seq_puts(s, "+: ");
336 else
337 trace_seq_puts(s, " : ");
338}
339
340int trace_print_context(struct trace_iterator *iter)
341{
342 struct trace_seq *s = &iter->seq;
343 struct trace_entry *entry = iter->ent;
344 char *comm = trace_find_cmdline(entry->pid);
345 unsigned long long t = ns2usecs(iter->ts);
346 unsigned long usec_rem = do_div(t, USEC_PER_SEC);
347 unsigned long secs = (unsigned long)t;
348
349 if (!trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid))
350 goto partial;
351 if (!trace_seq_printf(s, "[%03d] ", entry->cpu))
352 goto partial;
353 if (!trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem))
354 goto partial;
355
356 return 0;
357
358partial:
359 return TRACE_TYPE_PARTIAL_LINE;
360}
361
362int trace_print_lat_context(struct trace_iterator *iter)
363{
364 u64 next_ts;
365 struct trace_seq *s = &iter->seq;
366 struct trace_entry *entry = iter->ent,
367 *next_entry = trace_find_next_entry(iter, NULL,
368 &next_ts);
369 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
370 unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
371 unsigned long rel_usecs;
372
373 if (!next_entry)
374 next_ts = iter->ts;
375 rel_usecs = ns2usecs(next_ts - iter->ts);
376
377 if (verbose) {
378 char *comm = trace_find_cmdline(entry->pid);
379 trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]"
380 " %ld.%03ldms (+%ld.%03ldms): ",
381 comm,
382 entry->pid, entry->cpu, entry->flags,
383 entry->preempt_count, iter->idx,
384 ns2usecs(iter->ts),
385 abs_usecs/1000,
386 abs_usecs % 1000, rel_usecs/1000,
387 rel_usecs % 1000);
388 } else {
389 lat_print_generic(s, entry, entry->cpu);
390 lat_print_timestamp(s, abs_usecs, rel_usecs);
391 }
392
393 return 0;
394}
395
f633cef0
SR
396static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
397
398static int task_state_char(unsigned long state)
399{
400 int bit = state ? __ffs(state) + 1 : 0;
401
402 return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
403}
404
f0868d1e
SR
405/**
406 * ftrace_find_event - find a registered event
407 * @type: the type of event to look for
408 *
409 * Returns an event of type @type otherwise NULL
410 */
411struct trace_event *ftrace_find_event(int type)
412{
413 struct trace_event *event;
414 struct hlist_node *n;
415 unsigned key;
416
417 key = type & (EVENT_HASHSIZE - 1);
418
419 hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
420 if (event->type == type)
421 return event;
422 }
423
424 return NULL;
425}
426
427/**
428 * register_ftrace_event - register output for an event type
429 * @event: the event type to register
430 *
431 * Event types are stored in a hash and this hash is used to
432 * find a way to print an event. If the @event->type is set
433 * then it will use that type, otherwise it will assign a
434 * type to use.
435 *
436 * If you assign your own type, please make sure it is added
437 * to the trace_type enum in trace.h, to avoid collisions
438 * with the dynamic types.
439 *
440 * Returns the event type number or zero on error.
441 */
442int register_ftrace_event(struct trace_event *event)
443{
444 unsigned key;
445 int ret = 0;
446
447 mutex_lock(&trace_event_mutex);
448
449 if (!event->type)
450 event->type = next_event_type++;
451 else if (event->type > __TRACE_LAST_TYPE) {
452 printk(KERN_WARNING "Need to add type to trace.h\n");
453 WARN_ON(1);
454 }
455
456 if (ftrace_find_event(event->type))
457 goto out;
458
459 key = event->type & (EVENT_HASHSIZE - 1);
460
461 hlist_add_head_rcu(&event->node, &event_hash[key]);
462
463 ret = event->type;
464 out:
465 mutex_unlock(&trace_event_mutex);
466
467 return ret;
468}
469
470/**
471 * unregister_ftrace_event - remove a no longer used event
472 * @event: the event to remove
473 */
474int unregister_ftrace_event(struct trace_event *event)
475{
476 mutex_lock(&trace_event_mutex);
477 hlist_del(&event->node);
478 mutex_unlock(&trace_event_mutex);
479
480 return 0;
481}
f633cef0
SR
482
483/*
484 * Standard events
485 */
486
487int
488trace_nop_print(struct trace_seq *s, struct trace_entry *entry, int flags)
489{
490 return 0;
491}
492
493/* TRACE_FN */
494static int
495trace_fn_latency(struct trace_seq *s, struct trace_entry *entry, int flags)
496{
497 struct ftrace_entry *field;
498
499 trace_assign_type(field, entry);
500
501 if (!seq_print_ip_sym(s, field->ip, flags))
502 goto partial;
503 if (!trace_seq_puts(s, " ("))
504 goto partial;
505 if (!seq_print_ip_sym(s, field->parent_ip, flags))
506 goto partial;
507 if (!trace_seq_puts(s, ")\n"))
508 goto partial;
509
510 return 0;
511
512 partial:
513 return TRACE_TYPE_PARTIAL_LINE;
514}
515
516static int
517trace_fn_trace(struct trace_seq *s, struct trace_entry *entry, int flags)
518{
519 struct ftrace_entry *field;
520
521 trace_assign_type(field, entry);
522
523 if (!seq_print_ip_sym(s, field->ip, flags))
524 goto partial;
525
526 if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
527 if (!trace_seq_printf(s, " <-"))
528 goto partial;
529 if (!seq_print_ip_sym(s,
530 field->parent_ip,
531 flags))
532 goto partial;
533 }
534 if (!trace_seq_printf(s, "\n"))
535 goto partial;
536
537 return 0;
538
539 partial:
540 return TRACE_TYPE_PARTIAL_LINE;
541}
542
543static int
544trace_fn_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
545{
546 struct ftrace_entry *field;
547
548 trace_assign_type(field, entry);
549
5e4abc98 550 if (!trace_seq_printf(s, "%lx %lx\n",
6c1a99af
LJ
551 field->ip,
552 field->parent_ip))
f633cef0
SR
553 return TRACE_TYPE_PARTIAL_LINE;
554
555 return 0;
556}
557
558static int
559trace_fn_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
560{
561 struct ftrace_entry *field;
562
563 trace_assign_type(field, entry);
564
565 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
566 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
567
568 return 0;
569}
570
571static int
572trace_fn_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
573{
574 struct ftrace_entry *field;
575
576 trace_assign_type(field, entry);
577
578 SEQ_PUT_FIELD_RET(s, field->ip);
579 SEQ_PUT_FIELD_RET(s, field->parent_ip);
580
581 return 0;
582}
583
584static struct trace_event trace_fn_event = {
585 .type = TRACE_FN,
586 .trace = trace_fn_trace,
587 .latency_trace = trace_fn_latency,
588 .raw = trace_fn_raw,
589 .hex = trace_fn_hex,
590 .binary = trace_fn_bin,
591};
592
593/* TRACE_CTX an TRACE_WAKE */
594static int
595trace_ctxwake_print(struct trace_seq *s, struct trace_entry *entry, int flags,
596 char *delim)
597{
598 struct ctx_switch_entry *field;
599 char *comm;
600 int S, T;
601
602 trace_assign_type(field, entry);
603
604 T = task_state_char(field->next_state);
605 S = task_state_char(field->prev_state);
606 comm = trace_find_cmdline(field->next_pid);
6c1a99af
LJ
607 if (!trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
608 field->prev_pid,
609 field->prev_prio,
610 S, delim,
611 field->next_cpu,
612 field->next_pid,
613 field->next_prio,
614 T, comm))
f633cef0
SR
615 return TRACE_TYPE_PARTIAL_LINE;
616
617 return 0;
618}
619
620static int
621trace_ctx_print(struct trace_seq *s, struct trace_entry *entry, int flags)
622{
623 return trace_ctxwake_print(s, entry, flags, "==>");
624}
625
626static int
627trace_wake_print(struct trace_seq *s, struct trace_entry *entry, int flags)
628{
629 return trace_ctxwake_print(s, entry, flags, " +");
630}
631
632static int
633trace_ctxwake_raw(struct trace_seq *s, struct trace_entry *entry, int flags,
634 char S)
635{
636 struct ctx_switch_entry *field;
637 int T;
638
639 trace_assign_type(field, entry);
640
641 if (!S)
642 task_state_char(field->prev_state);
643 T = task_state_char(field->next_state);
6c1a99af
LJ
644 if (!trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
645 field->prev_pid,
646 field->prev_prio,
647 S,
648 field->next_cpu,
649 field->next_pid,
650 field->next_prio,
651 T))
f633cef0
SR
652 return TRACE_TYPE_PARTIAL_LINE;
653
654 return 0;
655}
656
657static int
658trace_ctx_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
659{
660 return trace_ctxwake_raw(s, entry, flags, 0);
661}
662
663static int
664trace_wake_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
665{
666 return trace_ctxwake_raw(s, entry, flags, '+');
667}
668
669
670static int
671trace_ctxwake_hex(struct trace_seq *s, struct trace_entry *entry, int flags,
672 char S)
673{
674 struct ctx_switch_entry *field;
675 int T;
676
677 trace_assign_type(field, entry);
678
679 if (!S)
680 task_state_char(field->prev_state);
681 T = task_state_char(field->next_state);
682
683 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
684 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
685 SEQ_PUT_HEX_FIELD_RET(s, S);
686 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
687 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
688 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
689 SEQ_PUT_HEX_FIELD_RET(s, T);
690
691 return 0;
692}
693
694static int
695trace_ctx_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
696{
697 return trace_ctxwake_hex(s, entry, flags, 0);
698}
699
700static int
701trace_wake_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
702{
703 return trace_ctxwake_hex(s, entry, flags, '+');
704}
705
706static int
707trace_ctxwake_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
708{
709 struct ctx_switch_entry *field;
710
711 trace_assign_type(field, entry);
712
713 SEQ_PUT_FIELD_RET(s, field->prev_pid);
714 SEQ_PUT_FIELD_RET(s, field->prev_prio);
715 SEQ_PUT_FIELD_RET(s, field->prev_state);
716 SEQ_PUT_FIELD_RET(s, field->next_pid);
717 SEQ_PUT_FIELD_RET(s, field->next_prio);
718 SEQ_PUT_FIELD_RET(s, field->next_state);
719
720 return 0;
721}
722
723static struct trace_event trace_ctx_event = {
724 .type = TRACE_CTX,
725 .trace = trace_ctx_print,
726 .latency_trace = trace_ctx_print,
727 .raw = trace_ctx_raw,
728 .hex = trace_ctx_hex,
729 .binary = trace_ctxwake_bin,
730};
731
732static struct trace_event trace_wake_event = {
733 .type = TRACE_WAKE,
734 .trace = trace_wake_print,
735 .latency_trace = trace_wake_print,
736 .raw = trace_wake_raw,
737 .hex = trace_wake_hex,
738 .binary = trace_ctxwake_bin,
739};
740
741/* TRACE_SPECIAL */
742static int
743trace_special_print(struct trace_seq *s, struct trace_entry *entry, int flags)
744{
745 struct special_entry *field;
746
747 trace_assign_type(field, entry);
748
6c1a99af
LJ
749 if (!trace_seq_printf(s, "# %ld %ld %ld\n",
750 field->arg1,
751 field->arg2,
752 field->arg3))
f633cef0
SR
753 return TRACE_TYPE_PARTIAL_LINE;
754
755 return 0;
756}
757
758static int
759trace_special_hex(struct trace_seq *s, struct trace_entry *entry, int flags)
760{
761 struct special_entry *field;
762
763 trace_assign_type(field, entry);
764
765 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
766 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
767 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
768
769 return 0;
770}
771
772static int
773trace_special_bin(struct trace_seq *s, struct trace_entry *entry, int flags)
774{
775 struct special_entry *field;
776
777 trace_assign_type(field, entry);
778
779 SEQ_PUT_FIELD_RET(s, field->arg1);
780 SEQ_PUT_FIELD_RET(s, field->arg2);
781 SEQ_PUT_FIELD_RET(s, field->arg3);
782
783 return 0;
784}
785
786static struct trace_event trace_special_event = {
787 .type = TRACE_SPECIAL,
788 .trace = trace_special_print,
789 .latency_trace = trace_special_print,
790 .raw = trace_special_print,
791 .hex = trace_special_hex,
792 .binary = trace_special_bin,
793};
794
795/* TRACE_STACK */
796
797static int
798trace_stack_print(struct trace_seq *s, struct trace_entry *entry, int flags)
799{
800 struct stack_entry *field;
801 int i;
802
803 trace_assign_type(field, entry);
804
805 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
806 if (i) {
6c1a99af 807 if (!trace_seq_puts(s, " <= "))
f633cef0
SR
808 goto partial;
809
6c1a99af 810 if (!seq_print_ip_sym(s, field->caller[i], flags))
f633cef0
SR
811 goto partial;
812 }
6c1a99af 813 if (!trace_seq_puts(s, "\n"))
f633cef0
SR
814 goto partial;
815 }
816
817 return 0;
818
819 partial:
820 return TRACE_TYPE_PARTIAL_LINE;
821}
822
823static struct trace_event trace_stack_event = {
824 .type = TRACE_STACK,
825 .trace = trace_stack_print,
826 .latency_trace = trace_stack_print,
827 .raw = trace_special_print,
828 .hex = trace_special_hex,
829 .binary = trace_special_bin,
830};
831
832/* TRACE_USER_STACK */
833static int
834trace_user_stack_print(struct trace_seq *s, struct trace_entry *entry,
835 int flags)
836{
837 struct userstack_entry *field;
838
839 trace_assign_type(field, entry);
840
6c1a99af 841 if (!seq_print_userip_objs(field, s, flags))
f633cef0
SR
842 goto partial;
843
6c1a99af 844 if (!trace_seq_putc(s, '\n'))
f633cef0
SR
845 goto partial;
846
847 return 0;
848
849 partial:
850 return TRACE_TYPE_PARTIAL_LINE;
851}
852
853static struct trace_event trace_user_stack_event = {
854 .type = TRACE_USER_STACK,
855 .trace = trace_user_stack_print,
856 .latency_trace = trace_user_stack_print,
857 .raw = trace_special_print,
858 .hex = trace_special_hex,
859 .binary = trace_special_bin,
860};
861
862/* TRACE_PRINT */
863static int
864trace_print_print(struct trace_seq *s, struct trace_entry *entry, int flags)
865{
866 struct print_entry *field;
867
868 trace_assign_type(field, entry);
869
6c1a99af 870 if (!seq_print_ip_sym(s, field->ip, flags))
f633cef0
SR
871 goto partial;
872
6c1a99af 873 if (!trace_seq_printf(s, ": %s", field->buf))
f633cef0
SR
874 goto partial;
875
876 return 0;
877
878 partial:
879 return TRACE_TYPE_PARTIAL_LINE;
880}
881
882static int
883trace_print_raw(struct trace_seq *s, struct trace_entry *entry, int flags)
884{
885 struct print_entry *field;
886
887 trace_assign_type(field, entry);
888
6c1a99af 889 if (!trace_seq_printf(s, "# %lx %s", field->ip, field->buf))
f633cef0
SR
890 goto partial;
891
892 return 0;
893
894 partial:
895 return TRACE_TYPE_PARTIAL_LINE;
896}
897
898static struct trace_event trace_print_event = {
899 .type = TRACE_PRINT,
900 .trace = trace_print_print,
901 .latency_trace = trace_print_print,
902 .raw = trace_print_raw,
903 .hex = trace_nop_print,
904 .binary = trace_nop_print,
905};
906
907static struct trace_event *events[] __initdata = {
908 &trace_fn_event,
909 &trace_ctx_event,
910 &trace_wake_event,
911 &trace_special_event,
912 &trace_stack_event,
913 &trace_user_stack_event,
914 &trace_print_event,
915 NULL
916};
917
918__init static int init_events(void)
919{
920 struct trace_event *event;
921 int i, ret;
922
923 for (i = 0; events[i]; i++) {
924 event = events[i];
925
926 ret = register_ftrace_event(event);
927 if (!ret) {
928 printk(KERN_WARNING "event %d failed to register\n",
929 event->type);
930 WARN_ON_ONCE(1);
931 }
932 }
933
934 return 0;
935}
936device_initcall(init_events);