]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - kernel/trace/trace_output.c
ftrace: set up trace event hash infrastructure
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_output.c
CommitLineData
f0868d1e
SR
1/*
2 * trace_output.c
3 *
4 * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/mutex.h>
10#include <linux/ftrace.h>
11
12#include "trace_output.h"
13
14/* must be a power of 2 */
15#define EVENT_HASHSIZE 128
16
17static DEFINE_MUTEX(trace_event_mutex);
18static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
19
20static int next_event_type = __TRACE_LAST_TYPE + 1;
21
22/**
23 * trace_seq_printf - sequence printing of trace information
24 * @s: trace sequence descriptor
25 * @fmt: printf format string
26 *
27 * The tracer may use either sequence operations or its own
28 * copy to user routines. To simplify formating of a trace
29 * trace_seq_printf is used to store strings into a special
30 * buffer (@s). Then the output may be either used by
31 * the sequencer or pulled into another buffer.
32 */
33int
34trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
35{
36 int len = (PAGE_SIZE - 1) - s->len;
37 va_list ap;
38 int ret;
39
40 if (!len)
41 return 0;
42
43 va_start(ap, fmt);
44 ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
45 va_end(ap);
46
47 /* If we can't write it all, don't bother writing anything */
48 if (ret >= len)
49 return 0;
50
51 s->len += ret;
52
53 return len;
54}
55
56/**
57 * trace_seq_puts - trace sequence printing of simple string
58 * @s: trace sequence descriptor
59 * @str: simple string to record
60 *
61 * The tracer may use either the sequence operations or its own
62 * copy to user routines. This function records a simple string
63 * into a special buffer (@s) for later retrieval by a sequencer
64 * or other mechanism.
65 */
66int trace_seq_puts(struct trace_seq *s, const char *str)
67{
68 int len = strlen(str);
69
70 if (len > ((PAGE_SIZE - 1) - s->len))
71 return 0;
72
73 memcpy(s->buffer + s->len, str, len);
74 s->len += len;
75
76 return len;
77}
78
79int trace_seq_putc(struct trace_seq *s, unsigned char c)
80{
81 if (s->len >= (PAGE_SIZE - 1))
82 return 0;
83
84 s->buffer[s->len++] = c;
85
86 return 1;
87}
88
89int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
90{
91 if (len > ((PAGE_SIZE - 1) - s->len))
92 return 0;
93
94 memcpy(s->buffer + s->len, mem, len);
95 s->len += len;
96
97 return len;
98}
99
100int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
101{
102 unsigned char hex[HEX_CHARS];
103 unsigned char *data = mem;
104 int i, j;
105
106#ifdef __BIG_ENDIAN
107 for (i = 0, j = 0; i < len; i++) {
108#else
109 for (i = len-1, j = 0; i >= 0; i--) {
110#endif
111 hex[j++] = hex_asc_hi(data[i]);
112 hex[j++] = hex_asc_lo(data[i]);
113 }
114 hex[j++] = ' ';
115
116 return trace_seq_putmem(s, hex, j);
117}
118
119int trace_seq_path(struct trace_seq *s, struct path *path)
120{
121 unsigned char *p;
122
123 if (s->len >= (PAGE_SIZE - 1))
124 return 0;
125 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
126 if (!IS_ERR(p)) {
127 p = mangle_path(s->buffer + s->len, p, "\n");
128 if (p) {
129 s->len = p - s->buffer;
130 return 1;
131 }
132 } else {
133 s->buffer[s->len++] = '?';
134 return 1;
135 }
136
137 return 0;
138}
139
140#ifdef CONFIG_KRETPROBES
141static inline const char *kretprobed(const char *name)
142{
143 static const char tramp_name[] = "kretprobe_trampoline";
144 int size = sizeof(tramp_name);
145
146 if (strncmp(tramp_name, name, size) == 0)
147 return "[unknown/kretprobe'd]";
148 return name;
149}
150#else
151static inline const char *kretprobed(const char *name)
152{
153 return name;
154}
155#endif /* CONFIG_KRETPROBES */
156
157static int
158seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
159{
160#ifdef CONFIG_KALLSYMS
161 char str[KSYM_SYMBOL_LEN];
162 const char *name;
163
164 kallsyms_lookup(address, NULL, NULL, NULL, str);
165
166 name = kretprobed(str);
167
168 return trace_seq_printf(s, fmt, name);
169#endif
170 return 1;
171}
172
173static int
174seq_print_sym_offset(struct trace_seq *s, const char *fmt,
175 unsigned long address)
176{
177#ifdef CONFIG_KALLSYMS
178 char str[KSYM_SYMBOL_LEN];
179 const char *name;
180
181 sprint_symbol(str, address);
182 name = kretprobed(str);
183
184 return trace_seq_printf(s, fmt, name);
185#endif
186 return 1;
187}
188
189#ifndef CONFIG_64BIT
190# define IP_FMT "%08lx"
191#else
192# define IP_FMT "%016lx"
193#endif
194
195int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
196 unsigned long ip, unsigned long sym_flags)
197{
198 struct file *file = NULL;
199 unsigned long vmstart = 0;
200 int ret = 1;
201
202 if (mm) {
203 const struct vm_area_struct *vma;
204
205 down_read(&mm->mmap_sem);
206 vma = find_vma(mm, ip);
207 if (vma) {
208 file = vma->vm_file;
209 vmstart = vma->vm_start;
210 }
211 if (file) {
212 ret = trace_seq_path(s, &file->f_path);
213 if (ret)
214 ret = trace_seq_printf(s, "[+0x%lx]",
215 ip - vmstart);
216 }
217 up_read(&mm->mmap_sem);
218 }
219 if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
220 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
221 return ret;
222}
223
224int
225seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
226 unsigned long sym_flags)
227{
228 struct mm_struct *mm = NULL;
229 int ret = 1;
230 unsigned int i;
231
232 if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
233 struct task_struct *task;
234 /*
235 * we do the lookup on the thread group leader,
236 * since individual threads might have already quit!
237 */
238 rcu_read_lock();
239 task = find_task_by_vpid(entry->ent.tgid);
240 if (task)
241 mm = get_task_mm(task);
242 rcu_read_unlock();
243 }
244
245 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
246 unsigned long ip = entry->caller[i];
247
248 if (ip == ULONG_MAX || !ret)
249 break;
250 if (i && ret)
251 ret = trace_seq_puts(s, " <- ");
252 if (!ip) {
253 if (ret)
254 ret = trace_seq_puts(s, "??");
255 continue;
256 }
257 if (!ret)
258 break;
259 if (ret)
260 ret = seq_print_user_ip(s, mm, ip, sym_flags);
261 }
262
263 if (mm)
264 mmput(mm);
265 return ret;
266}
267
268int
269seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
270{
271 int ret;
272
273 if (!ip)
274 return trace_seq_printf(s, "0");
275
276 if (sym_flags & TRACE_ITER_SYM_OFFSET)
277 ret = seq_print_sym_offset(s, "%s", ip);
278 else
279 ret = seq_print_sym_short(s, "%s", ip);
280
281 if (!ret)
282 return 0;
283
284 if (sym_flags & TRACE_ITER_SYM_ADDR)
285 ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
286 return ret;
287}
288
289/**
290 * ftrace_find_event - find a registered event
291 * @type: the type of event to look for
292 *
293 * Returns an event of type @type otherwise NULL
294 */
295struct trace_event *ftrace_find_event(int type)
296{
297 struct trace_event *event;
298 struct hlist_node *n;
299 unsigned key;
300
301 key = type & (EVENT_HASHSIZE - 1);
302
303 hlist_for_each_entry_rcu(event, n, &event_hash[key], node) {
304 if (event->type == type)
305 return event;
306 }
307
308 return NULL;
309}
310
311/**
312 * register_ftrace_event - register output for an event type
313 * @event: the event type to register
314 *
315 * Event types are stored in a hash and this hash is used to
316 * find a way to print an event. If the @event->type is set
317 * then it will use that type, otherwise it will assign a
318 * type to use.
319 *
320 * If you assign your own type, please make sure it is added
321 * to the trace_type enum in trace.h, to avoid collisions
322 * with the dynamic types.
323 *
324 * Returns the event type number or zero on error.
325 */
326int register_ftrace_event(struct trace_event *event)
327{
328 unsigned key;
329 int ret = 0;
330
331 mutex_lock(&trace_event_mutex);
332
333 if (!event->type)
334 event->type = next_event_type++;
335 else if (event->type > __TRACE_LAST_TYPE) {
336 printk(KERN_WARNING "Need to add type to trace.h\n");
337 WARN_ON(1);
338 }
339
340 if (ftrace_find_event(event->type))
341 goto out;
342
343 key = event->type & (EVENT_HASHSIZE - 1);
344
345 hlist_add_head_rcu(&event->node, &event_hash[key]);
346
347 ret = event->type;
348 out:
349 mutex_unlock(&trace_event_mutex);
350
351 return ret;
352}
353
354/**
355 * unregister_ftrace_event - remove a no longer used event
356 * @event: the event to remove
357 */
358int unregister_ftrace_event(struct trace_event *event)
359{
360 mutex_lock(&trace_event_mutex);
361 hlist_del(&event->node);
362 mutex_unlock(&trace_event_mutex);
363
364 return 0;
365}