]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/trace/trace_syscalls.c
Merge branch 'perf/urgent' into perf/core
[mirror_ubuntu-bionic-kernel.git] / kernel / trace / trace_syscalls.c
1 #include <trace/syscall.h>
2 #include <trace/events/syscalls.h>
3 #include <linux/kernel.h>
4 #include <linux/ftrace.h>
5 #include <linux/perf_event.h>
6 #include <asm/syscall.h>
7
8 #include "trace_output.h"
9 #include "trace.h"
10
11 static DEFINE_MUTEX(syscall_trace_lock);
12 static int sys_refcount_enter;
13 static int sys_refcount_exit;
14 static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
15 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16
17 extern unsigned long __start_syscalls_metadata[];
18 extern unsigned long __stop_syscalls_metadata[];
19
20 static struct syscall_metadata **syscalls_metadata;
21
22 static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
23 {
24 struct syscall_metadata *start;
25 struct syscall_metadata *stop;
26 char str[KSYM_SYMBOL_LEN];
27
28
29 start = (struct syscall_metadata *)__start_syscalls_metadata;
30 stop = (struct syscall_metadata *)__stop_syscalls_metadata;
31 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
32
33 for ( ; start < stop; start++) {
34 /*
35 * Only compare after the "sys" prefix. Archs that use
36 * syscall wrappers may have syscalls symbols aliases prefixed
37 * with "SyS" instead of "sys", leading to an unwanted
38 * mismatch.
39 */
40 if (start->name && !strcmp(start->name + 3, str + 3))
41 return start;
42 }
43 return NULL;
44 }
45
46 static struct syscall_metadata *syscall_nr_to_meta(int nr)
47 {
48 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
49 return NULL;
50
51 return syscalls_metadata[nr];
52 }
53
54 int syscall_name_to_nr(char *name)
55 {
56 int i;
57
58 if (!syscalls_metadata)
59 return -1;
60
61 for (i = 0; i < NR_syscalls; i++) {
62 if (syscalls_metadata[i]) {
63 if (!strcmp(syscalls_metadata[i]->name, name))
64 return i;
65 }
66 }
67 return -1;
68 }
69
70 void set_syscall_enter_id(int num, int id)
71 {
72 syscalls_metadata[num]->enter_id = id;
73 }
74
75 void set_syscall_exit_id(int num, int id)
76 {
77 syscalls_metadata[num]->exit_id = id;
78 }
79
80 enum print_line_t
81 print_syscall_enter(struct trace_iterator *iter, int flags)
82 {
83 struct trace_seq *s = &iter->seq;
84 struct trace_entry *ent = iter->ent;
85 struct syscall_trace_enter *trace;
86 struct syscall_metadata *entry;
87 int i, ret, syscall;
88
89 trace = (typeof(trace))ent;
90 syscall = trace->nr;
91 entry = syscall_nr_to_meta(syscall);
92
93 if (!entry)
94 goto end;
95
96 if (entry->enter_id != ent->type) {
97 WARN_ON_ONCE(1);
98 goto end;
99 }
100
101 ret = trace_seq_printf(s, "%s(", entry->name);
102 if (!ret)
103 return TRACE_TYPE_PARTIAL_LINE;
104
105 for (i = 0; i < entry->nb_args; i++) {
106 /* parameter types */
107 if (trace_flags & TRACE_ITER_VERBOSE) {
108 ret = trace_seq_printf(s, "%s ", entry->types[i]);
109 if (!ret)
110 return TRACE_TYPE_PARTIAL_LINE;
111 }
112 /* parameter values */
113 ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
114 trace->args[i],
115 i == entry->nb_args - 1 ? "" : ", ");
116 if (!ret)
117 return TRACE_TYPE_PARTIAL_LINE;
118 }
119
120 ret = trace_seq_putc(s, ')');
121 if (!ret)
122 return TRACE_TYPE_PARTIAL_LINE;
123
124 end:
125 ret = trace_seq_putc(s, '\n');
126 if (!ret)
127 return TRACE_TYPE_PARTIAL_LINE;
128
129 return TRACE_TYPE_HANDLED;
130 }
131
132 enum print_line_t
133 print_syscall_exit(struct trace_iterator *iter, int flags)
134 {
135 struct trace_seq *s = &iter->seq;
136 struct trace_entry *ent = iter->ent;
137 struct syscall_trace_exit *trace;
138 int syscall;
139 struct syscall_metadata *entry;
140 int ret;
141
142 trace = (typeof(trace))ent;
143 syscall = trace->nr;
144 entry = syscall_nr_to_meta(syscall);
145
146 if (!entry) {
147 trace_seq_printf(s, "\n");
148 return TRACE_TYPE_HANDLED;
149 }
150
151 if (entry->exit_id != ent->type) {
152 WARN_ON_ONCE(1);
153 return TRACE_TYPE_UNHANDLED;
154 }
155
156 ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
157 trace->ret);
158 if (!ret)
159 return TRACE_TYPE_PARTIAL_LINE;
160
161 return TRACE_TYPE_HANDLED;
162 }
163
164 extern char *__bad_type_size(void);
165
166 #define SYSCALL_FIELD(type, name) \
167 sizeof(type) != sizeof(trace.name) ? \
168 __bad_type_size() : \
169 #type, #name, offsetof(typeof(trace), name), \
170 sizeof(trace.name), is_signed_type(type)
171
172 int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
173 {
174 int i;
175 int nr;
176 int ret;
177 struct syscall_metadata *entry;
178 struct syscall_trace_enter trace;
179 int offset = offsetof(struct syscall_trace_enter, args);
180
181 nr = syscall_name_to_nr(call->data);
182 entry = syscall_nr_to_meta(nr);
183
184 if (!entry)
185 return 0;
186
187 ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
188 "\tsigned:%u;\n",
189 SYSCALL_FIELD(int, nr));
190 if (!ret)
191 return 0;
192
193 for (i = 0; i < entry->nb_args; i++) {
194 ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
195 entry->args[i]);
196 if (!ret)
197 return 0;
198 ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
199 "\tsigned:%u;\n", offset,
200 sizeof(unsigned long),
201 is_signed_type(unsigned long));
202 if (!ret)
203 return 0;
204 offset += sizeof(unsigned long);
205 }
206
207 trace_seq_puts(s, "\nprint fmt: \"");
208 for (i = 0; i < entry->nb_args; i++) {
209 ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
210 sizeof(unsigned long),
211 i == entry->nb_args - 1 ? "" : ", ");
212 if (!ret)
213 return 0;
214 }
215 trace_seq_putc(s, '"');
216
217 for (i = 0; i < entry->nb_args; i++) {
218 ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
219 entry->args[i]);
220 if (!ret)
221 return 0;
222 }
223
224 return trace_seq_putc(s, '\n');
225 }
226
227 int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
228 {
229 int ret;
230 struct syscall_trace_exit trace;
231
232 ret = trace_seq_printf(s,
233 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
234 "\tsigned:%u;\n"
235 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
236 "\tsigned:%u;\n",
237 SYSCALL_FIELD(int, nr),
238 SYSCALL_FIELD(long, ret));
239 if (!ret)
240 return 0;
241
242 return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
243 }
244
245 int syscall_enter_define_fields(struct ftrace_event_call *call)
246 {
247 struct syscall_trace_enter trace;
248 struct syscall_metadata *meta;
249 int ret;
250 int nr;
251 int i;
252 int offset = offsetof(typeof(trace), args);
253
254 nr = syscall_name_to_nr(call->data);
255 meta = syscall_nr_to_meta(nr);
256
257 if (!meta)
258 return 0;
259
260 ret = trace_define_common_fields(call);
261 if (ret)
262 return ret;
263
264 for (i = 0; i < meta->nb_args; i++) {
265 ret = trace_define_field(call, meta->types[i],
266 meta->args[i], offset,
267 sizeof(unsigned long), 0,
268 FILTER_OTHER);
269 offset += sizeof(unsigned long);
270 }
271
272 return ret;
273 }
274
275 int syscall_exit_define_fields(struct ftrace_event_call *call)
276 {
277 struct syscall_trace_exit trace;
278 int ret;
279
280 ret = trace_define_common_fields(call);
281 if (ret)
282 return ret;
283
284 ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
285 FILTER_OTHER);
286
287 return ret;
288 }
289
290 void ftrace_syscall_enter(struct pt_regs *regs, long id)
291 {
292 struct syscall_trace_enter *entry;
293 struct syscall_metadata *sys_data;
294 struct ring_buffer_event *event;
295 struct ring_buffer *buffer;
296 int size;
297 int syscall_nr;
298
299 syscall_nr = syscall_get_nr(current, regs);
300 if (syscall_nr < 0)
301 return;
302 if (!test_bit(syscall_nr, enabled_enter_syscalls))
303 return;
304
305 sys_data = syscall_nr_to_meta(syscall_nr);
306 if (!sys_data)
307 return;
308
309 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
310
311 event = trace_current_buffer_lock_reserve(&buffer, sys_data->enter_id,
312 size, 0, 0);
313 if (!event)
314 return;
315
316 entry = ring_buffer_event_data(event);
317 entry->nr = syscall_nr;
318 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
319
320 if (!filter_current_check_discard(buffer, sys_data->enter_event,
321 entry, event))
322 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
323 }
324
325 void ftrace_syscall_exit(struct pt_regs *regs, long ret)
326 {
327 struct syscall_trace_exit *entry;
328 struct syscall_metadata *sys_data;
329 struct ring_buffer_event *event;
330 struct ring_buffer *buffer;
331 int syscall_nr;
332
333 syscall_nr = syscall_get_nr(current, regs);
334 if (syscall_nr < 0)
335 return;
336 if (!test_bit(syscall_nr, enabled_exit_syscalls))
337 return;
338
339 sys_data = syscall_nr_to_meta(syscall_nr);
340 if (!sys_data)
341 return;
342
343 event = trace_current_buffer_lock_reserve(&buffer, sys_data->exit_id,
344 sizeof(*entry), 0, 0);
345 if (!event)
346 return;
347
348 entry = ring_buffer_event_data(event);
349 entry->nr = syscall_nr;
350 entry->ret = syscall_get_return_value(current, regs);
351
352 if (!filter_current_check_discard(buffer, sys_data->exit_event,
353 entry, event))
354 trace_current_buffer_unlock_commit(buffer, event, 0, 0);
355 }
356
357 int reg_event_syscall_enter(struct ftrace_event_call *call)
358 {
359 int ret = 0;
360 int num;
361 char *name;
362
363 name = (char *)call->data;
364 num = syscall_name_to_nr(name);
365 if (num < 0 || num >= NR_syscalls)
366 return -ENOSYS;
367 mutex_lock(&syscall_trace_lock);
368 if (!sys_refcount_enter)
369 ret = register_trace_sys_enter(ftrace_syscall_enter);
370 if (ret) {
371 pr_info("event trace: Could not activate"
372 "syscall entry trace point");
373 } else {
374 set_bit(num, enabled_enter_syscalls);
375 sys_refcount_enter++;
376 }
377 mutex_unlock(&syscall_trace_lock);
378 return ret;
379 }
380
381 void unreg_event_syscall_enter(struct ftrace_event_call *call)
382 {
383 int num;
384 char *name;
385
386 name = (char *)call->data;
387 num = syscall_name_to_nr(name);
388 if (num < 0 || num >= NR_syscalls)
389 return;
390 mutex_lock(&syscall_trace_lock);
391 sys_refcount_enter--;
392 clear_bit(num, enabled_enter_syscalls);
393 if (!sys_refcount_enter)
394 unregister_trace_sys_enter(ftrace_syscall_enter);
395 mutex_unlock(&syscall_trace_lock);
396 }
397
398 int reg_event_syscall_exit(struct ftrace_event_call *call)
399 {
400 int ret = 0;
401 int num;
402 char *name;
403
404 name = call->data;
405 num = syscall_name_to_nr(name);
406 if (num < 0 || num >= NR_syscalls)
407 return -ENOSYS;
408 mutex_lock(&syscall_trace_lock);
409 if (!sys_refcount_exit)
410 ret = register_trace_sys_exit(ftrace_syscall_exit);
411 if (ret) {
412 pr_info("event trace: Could not activate"
413 "syscall exit trace point");
414 } else {
415 set_bit(num, enabled_exit_syscalls);
416 sys_refcount_exit++;
417 }
418 mutex_unlock(&syscall_trace_lock);
419 return ret;
420 }
421
422 void unreg_event_syscall_exit(struct ftrace_event_call *call)
423 {
424 int num;
425 char *name;
426
427 name = call->data;
428 num = syscall_name_to_nr(name);
429 if (num < 0 || num >= NR_syscalls)
430 return;
431 mutex_lock(&syscall_trace_lock);
432 sys_refcount_exit--;
433 clear_bit(num, enabled_exit_syscalls);
434 if (!sys_refcount_exit)
435 unregister_trace_sys_exit(ftrace_syscall_exit);
436 mutex_unlock(&syscall_trace_lock);
437 }
438
439 struct trace_event event_syscall_enter = {
440 .trace = print_syscall_enter,
441 };
442
443 struct trace_event event_syscall_exit = {
444 .trace = print_syscall_exit,
445 };
446
447 int __init init_ftrace_syscalls(void)
448 {
449 struct syscall_metadata *meta;
450 unsigned long addr;
451 int i;
452
453 syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
454 NR_syscalls, GFP_KERNEL);
455 if (!syscalls_metadata) {
456 WARN_ON(1);
457 return -ENOMEM;
458 }
459
460 for (i = 0; i < NR_syscalls; i++) {
461 addr = arch_syscall_addr(i);
462 meta = find_syscall_meta(addr);
463 syscalls_metadata[i] = meta;
464 }
465
466 return 0;
467 }
468 core_initcall(init_ftrace_syscalls);
469
470 #ifdef CONFIG_EVENT_PROFILE
471
472 static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
473 static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
474 static int sys_prof_refcount_enter;
475 static int sys_prof_refcount_exit;
476
477 static void prof_syscall_enter(struct pt_regs *regs, long id)
478 {
479 struct syscall_metadata *sys_data;
480 struct perf_trace_buf *trace_buf;
481 struct syscall_trace_enter *rec;
482 unsigned long flags;
483 char *raw_data;
484 int syscall_nr;
485 int size;
486 int cpu;
487
488 syscall_nr = syscall_get_nr(current, regs);
489 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
490 return;
491
492 sys_data = syscall_nr_to_meta(syscall_nr);
493 if (!sys_data)
494 return;
495
496 /* get the size after alignment with the u32 buffer size field */
497 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
498 size = ALIGN(size + sizeof(u32), sizeof(u64));
499 size -= sizeof(u32);
500
501 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
502 "profile buffer not large enough"))
503 return;
504
505 /* Protect the per cpu buffer, begin the rcu read side */
506 local_irq_save(flags);
507
508 cpu = smp_processor_id();
509
510 if (in_nmi())
511 trace_buf = rcu_dereference(perf_trace_buf_nmi);
512 else
513 trace_buf = rcu_dereference(perf_trace_buf);
514
515 if (!trace_buf)
516 goto end;
517
518 trace_buf = per_cpu_ptr(trace_buf, cpu);
519
520 if (trace_buf->recursion++)
521 goto end_recursion;
522
523 /*
524 * Make recursion update visible before entering perf_tp_event
525 * so that we protect from perf recursions.
526 */
527 barrier();
528
529 raw_data = trace_buf->buf;
530
531 /* zero the dead bytes from align to not leak stack to user */
532 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
533
534 rec = (struct syscall_trace_enter *) raw_data;
535 tracing_generic_entry_update(&rec->ent, 0, 0);
536 rec->ent.type = sys_data->enter_id;
537 rec->nr = syscall_nr;
538 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
539 (unsigned long *)&rec->args);
540 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
541
542 end_recursion:
543 trace_buf->recursion--;
544 end:
545 local_irq_restore(flags);
546 }
547
548 int reg_prof_syscall_enter(char *name)
549 {
550 int ret = 0;
551 int num;
552
553 num = syscall_name_to_nr(name);
554 if (num < 0 || num >= NR_syscalls)
555 return -ENOSYS;
556
557 mutex_lock(&syscall_trace_lock);
558 if (!sys_prof_refcount_enter)
559 ret = register_trace_sys_enter(prof_syscall_enter);
560 if (ret) {
561 pr_info("event trace: Could not activate"
562 "syscall entry trace point");
563 } else {
564 set_bit(num, enabled_prof_enter_syscalls);
565 sys_prof_refcount_enter++;
566 }
567 mutex_unlock(&syscall_trace_lock);
568 return ret;
569 }
570
571 void unreg_prof_syscall_enter(char *name)
572 {
573 int num;
574
575 num = syscall_name_to_nr(name);
576 if (num < 0 || num >= NR_syscalls)
577 return;
578
579 mutex_lock(&syscall_trace_lock);
580 sys_prof_refcount_enter--;
581 clear_bit(num, enabled_prof_enter_syscalls);
582 if (!sys_prof_refcount_enter)
583 unregister_trace_sys_enter(prof_syscall_enter);
584 mutex_unlock(&syscall_trace_lock);
585 }
586
587 static void prof_syscall_exit(struct pt_regs *regs, long ret)
588 {
589 struct syscall_metadata *sys_data;
590 struct syscall_trace_exit *rec;
591 struct perf_trace_buf *trace_buf;
592 unsigned long flags;
593 int syscall_nr;
594 char *raw_data;
595 int size;
596 int cpu;
597
598 syscall_nr = syscall_get_nr(current, regs);
599 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
600 return;
601
602 sys_data = syscall_nr_to_meta(syscall_nr);
603 if (!sys_data)
604 return;
605
606 /* We can probably do that at build time */
607 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
608 size -= sizeof(u32);
609
610 /*
611 * Impossible, but be paranoid with the future
612 * How to put this check outside runtime?
613 */
614 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
615 "exit event has grown above profile buffer size"))
616 return;
617
618 /* Protect the per cpu buffer, begin the rcu read side */
619 local_irq_save(flags);
620 cpu = smp_processor_id();
621
622 if (in_nmi())
623 trace_buf = rcu_dereference(perf_trace_buf_nmi);
624 else
625 trace_buf = rcu_dereference(perf_trace_buf);
626
627 if (!trace_buf)
628 goto end;
629
630 trace_buf = per_cpu_ptr(trace_buf, cpu);
631
632 if (trace_buf->recursion++)
633 goto end_recursion;
634
635 /*
636 * Make recursion update visible before entering perf_tp_event
637 * so that we protect from perf recursions.
638 */
639 barrier();
640
641 raw_data = trace_buf->buf;
642
643 /* zero the dead bytes from align to not leak stack to user */
644 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
645
646 rec = (struct syscall_trace_exit *)raw_data;
647
648 tracing_generic_entry_update(&rec->ent, 0, 0);
649 rec->ent.type = sys_data->exit_id;
650 rec->nr = syscall_nr;
651 rec->ret = syscall_get_return_value(current, regs);
652
653 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
654
655 end_recursion:
656 trace_buf->recursion--;
657 end:
658 local_irq_restore(flags);
659 }
660
661 int reg_prof_syscall_exit(char *name)
662 {
663 int ret = 0;
664 int num;
665
666 num = syscall_name_to_nr(name);
667 if (num < 0 || num >= NR_syscalls)
668 return -ENOSYS;
669
670 mutex_lock(&syscall_trace_lock);
671 if (!sys_prof_refcount_exit)
672 ret = register_trace_sys_exit(prof_syscall_exit);
673 if (ret) {
674 pr_info("event trace: Could not activate"
675 "syscall entry trace point");
676 } else {
677 set_bit(num, enabled_prof_exit_syscalls);
678 sys_prof_refcount_exit++;
679 }
680 mutex_unlock(&syscall_trace_lock);
681 return ret;
682 }
683
684 void unreg_prof_syscall_exit(char *name)
685 {
686 int num;
687
688 num = syscall_name_to_nr(name);
689 if (num < 0 || num >= NR_syscalls)
690 return;
691
692 mutex_lock(&syscall_trace_lock);
693 sys_prof_refcount_exit--;
694 clear_bit(num, enabled_prof_exit_syscalls);
695 if (!sys_prof_refcount_exit)
696 unregister_trace_sys_exit(prof_syscall_exit);
697 mutex_unlock(&syscall_trace_lock);
698 }
699
700 #endif
701
702