1 // SPDX-License-Identifier: GPL-2.0
2 #include <trace/syscall.h>
3 #include <trace/events/syscalls.h>
4 #include <linux/syscalls.h>
5 #include <linux/slab.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8 #include <linux/ftrace.h>
9 #include <linux/perf_event.h>
10 #include <asm/syscall.h>
12 #include "trace_output.h"
15 static DEFINE_MUTEX(syscall_trace_lock
);
17 static int syscall_enter_register(struct trace_event_call
*event
,
18 enum trace_reg type
, void *data
);
19 static int syscall_exit_register(struct trace_event_call
*event
,
20 enum trace_reg type
, void *data
);
22 static struct list_head
*
23 syscall_get_enter_fields(struct trace_event_call
*call
)
25 struct syscall_metadata
*entry
= call
->data
;
27 return &entry
->enter_fields
;
30 extern struct syscall_metadata
*__start_syscalls_metadata
[];
31 extern struct syscall_metadata
*__stop_syscalls_metadata
[];
33 static struct syscall_metadata
**syscalls_metadata
;
35 #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
36 static inline bool arch_syscall_match_sym_name(const char *sym
, const char *name
)
39 * Only compare after the "sys" prefix. Archs that use
40 * syscall wrappers may have syscalls symbols aliases prefixed
41 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
44 return !strcmp(sym
+ 3, name
+ 3);
48 #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
50 * Some architectures that allow for 32bit applications
51 * to run on a 64bit kernel, do not map the syscalls for
52 * the 32bit tasks the same as they do for 64bit tasks.
56 * In such a case, instead of reporting the wrong syscalls,
59 * For an arch to ignore the compat syscalls it needs to
60 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
61 * define the function arch_trace_is_compat_syscall() to let
62 * the tracing system know that it should ignore it.
65 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
67 if (unlikely(arch_trace_is_compat_syscall(regs
)))
70 return syscall_get_nr(task
, regs
);
74 trace_get_syscall_nr(struct task_struct
*task
, struct pt_regs
*regs
)
76 return syscall_get_nr(task
, regs
);
78 #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
80 static __init
struct syscall_metadata
*
81 find_syscall_meta(unsigned long syscall
)
83 struct syscall_metadata
**start
;
84 struct syscall_metadata
**stop
;
85 char str
[KSYM_SYMBOL_LEN
];
88 start
= __start_syscalls_metadata
;
89 stop
= __stop_syscalls_metadata
;
90 kallsyms_lookup(syscall
, NULL
, NULL
, NULL
, str
);
92 if (arch_syscall_match_sym_name(str
, "sys_ni_syscall"))
95 for ( ; start
< stop
; start
++) {
96 if ((*start
)->name
&& arch_syscall_match_sym_name(str
, (*start
)->name
))
102 static struct syscall_metadata
*syscall_nr_to_meta(int nr
)
104 if (!syscalls_metadata
|| nr
>= NR_syscalls
|| nr
< 0)
107 return syscalls_metadata
[nr
];
110 const char *get_syscall_name(int syscall
)
112 struct syscall_metadata
*entry
;
114 entry
= syscall_nr_to_meta(syscall
);
121 static enum print_line_t
122 print_syscall_enter(struct trace_iterator
*iter
, int flags
,
123 struct trace_event
*event
)
125 struct trace_array
*tr
= iter
->tr
;
126 struct trace_seq
*s
= &iter
->seq
;
127 struct trace_entry
*ent
= iter
->ent
;
128 struct syscall_trace_enter
*trace
;
129 struct syscall_metadata
*entry
;
132 trace
= (typeof(trace
))ent
;
134 entry
= syscall_nr_to_meta(syscall
);
139 if (entry
->enter_event
->event
.type
!= ent
->type
) {
144 trace_seq_printf(s
, "%s(", entry
->name
);
146 for (i
= 0; i
< entry
->nb_args
; i
++) {
148 if (trace_seq_has_overflowed(s
))
151 /* parameter types */
152 if (tr
->trace_flags
& TRACE_ITER_VERBOSE
)
153 trace_seq_printf(s
, "%s ", entry
->types
[i
]);
155 /* parameter values */
156 trace_seq_printf(s
, "%s: %lx%s", entry
->args
[i
],
158 i
== entry
->nb_args
- 1 ? "" : ", ");
161 trace_seq_putc(s
, ')');
163 trace_seq_putc(s
, '\n');
165 return trace_handle_return(s
);
168 static enum print_line_t
169 print_syscall_exit(struct trace_iterator
*iter
, int flags
,
170 struct trace_event
*event
)
172 struct trace_seq
*s
= &iter
->seq
;
173 struct trace_entry
*ent
= iter
->ent
;
174 struct syscall_trace_exit
*trace
;
176 struct syscall_metadata
*entry
;
178 trace
= (typeof(trace
))ent
;
180 entry
= syscall_nr_to_meta(syscall
);
183 trace_seq_putc(s
, '\n');
187 if (entry
->exit_event
->event
.type
!= ent
->type
) {
189 return TRACE_TYPE_UNHANDLED
;
192 trace_seq_printf(s
, "%s -> 0x%lx\n", entry
->name
,
196 return trace_handle_return(s
);
199 extern char *__bad_type_size(void);
201 #define SYSCALL_FIELD(type, field, name) \
202 sizeof(type) != sizeof(trace.field) ? \
203 __bad_type_size() : \
204 #type, #name, offsetof(typeof(trace), field), \
205 sizeof(trace.field), is_signed_type(type)
208 __set_enter_print_fmt(struct syscall_metadata
*entry
, char *buf
, int len
)
213 /* When len=0, we just calculate the needed length */
214 #define LEN_OR_ZERO (len ? len - pos : 0)
216 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
217 for (i
= 0; i
< entry
->nb_args
; i
++) {
218 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "%s: 0x%%0%zulx%s",
219 entry
->args
[i
], sizeof(unsigned long),
220 i
== entry
->nb_args
- 1 ? "" : ", ");
222 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
, "\"");
224 for (i
= 0; i
< entry
->nb_args
; i
++) {
225 pos
+= snprintf(buf
+ pos
, LEN_OR_ZERO
,
226 ", ((unsigned long)(REC->%s))", entry
->args
[i
]);
231 /* return the length of print_fmt */
235 static int __init
set_syscall_print_fmt(struct trace_event_call
*call
)
239 struct syscall_metadata
*entry
= call
->data
;
241 if (entry
->enter_event
!= call
) {
242 call
->print_fmt
= "\"0x%lx\", REC->ret";
246 /* First: called with 0 length to calculate the needed length */
247 len
= __set_enter_print_fmt(entry
, NULL
, 0);
249 print_fmt
= kmalloc(len
+ 1, GFP_KERNEL
);
253 /* Second: actually write the @print_fmt */
254 __set_enter_print_fmt(entry
, print_fmt
, len
+ 1);
255 call
->print_fmt
= print_fmt
;
260 static void __init
free_syscall_print_fmt(struct trace_event_call
*call
)
262 struct syscall_metadata
*entry
= call
->data
;
264 if (entry
->enter_event
== call
)
265 kfree(call
->print_fmt
);
268 static int __init
syscall_enter_define_fields(struct trace_event_call
*call
)
270 struct syscall_trace_enter trace
;
271 struct syscall_metadata
*meta
= call
->data
;
274 int offset
= offsetof(typeof(trace
), args
);
276 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
, __syscall_nr
),
281 for (i
= 0; i
< meta
->nb_args
; i
++) {
282 ret
= trace_define_field(call
, meta
->types
[i
],
283 meta
->args
[i
], offset
,
284 sizeof(unsigned long), 0,
286 offset
+= sizeof(unsigned long);
292 static int __init
syscall_exit_define_fields(struct trace_event_call
*call
)
294 struct syscall_trace_exit trace
;
297 ret
= trace_define_field(call
, SYSCALL_FIELD(int, nr
, __syscall_nr
),
302 ret
= trace_define_field(call
, SYSCALL_FIELD(long, ret
, ret
),
308 static void ftrace_syscall_enter(void *data
, struct pt_regs
*regs
, long id
)
310 struct trace_array
*tr
= data
;
311 struct trace_event_file
*trace_file
;
312 struct syscall_trace_enter
*entry
;
313 struct syscall_metadata
*sys_data
;
314 struct ring_buffer_event
*event
;
315 struct ring_buffer
*buffer
;
316 unsigned long irq_flags
;
317 unsigned long args
[6];
322 syscall_nr
= trace_get_syscall_nr(current
, regs
);
323 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
326 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
327 trace_file
= rcu_dereference_sched(tr
->enter_syscall_files
[syscall_nr
]);
331 if (trace_trigger_soft_disabled(trace_file
))
334 sys_data
= syscall_nr_to_meta(syscall_nr
);
338 size
= sizeof(*entry
) + sizeof(unsigned long) * sys_data
->nb_args
;
340 local_save_flags(irq_flags
);
341 pc
= preempt_count();
343 buffer
= tr
->trace_buffer
.buffer
;
344 event
= trace_buffer_lock_reserve(buffer
,
345 sys_data
->enter_event
->event
.type
, size
, irq_flags
, pc
);
349 entry
= ring_buffer_event_data(event
);
350 entry
->nr
= syscall_nr
;
351 syscall_get_arguments(current
, regs
, args
);
352 memcpy(entry
->args
, args
, sizeof(unsigned long) * sys_data
->nb_args
);
354 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
,
358 static void ftrace_syscall_exit(void *data
, struct pt_regs
*regs
, long ret
)
360 struct trace_array
*tr
= data
;
361 struct trace_event_file
*trace_file
;
362 struct syscall_trace_exit
*entry
;
363 struct syscall_metadata
*sys_data
;
364 struct ring_buffer_event
*event
;
365 struct ring_buffer
*buffer
;
366 unsigned long irq_flags
;
370 syscall_nr
= trace_get_syscall_nr(current
, regs
);
371 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
374 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
375 trace_file
= rcu_dereference_sched(tr
->exit_syscall_files
[syscall_nr
]);
379 if (trace_trigger_soft_disabled(trace_file
))
382 sys_data
= syscall_nr_to_meta(syscall_nr
);
386 local_save_flags(irq_flags
);
387 pc
= preempt_count();
389 buffer
= tr
->trace_buffer
.buffer
;
390 event
= trace_buffer_lock_reserve(buffer
,
391 sys_data
->exit_event
->event
.type
, sizeof(*entry
),
396 entry
= ring_buffer_event_data(event
);
397 entry
->nr
= syscall_nr
;
398 entry
->ret
= syscall_get_return_value(current
, regs
);
400 event_trigger_unlock_commit(trace_file
, buffer
, event
, entry
,
404 static int reg_event_syscall_enter(struct trace_event_file
*file
,
405 struct trace_event_call
*call
)
407 struct trace_array
*tr
= file
->tr
;
411 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
412 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
414 mutex_lock(&syscall_trace_lock
);
415 if (!tr
->sys_refcount_enter
)
416 ret
= register_trace_sys_enter(ftrace_syscall_enter
, tr
);
418 rcu_assign_pointer(tr
->enter_syscall_files
[num
], file
);
419 tr
->sys_refcount_enter
++;
421 mutex_unlock(&syscall_trace_lock
);
425 static void unreg_event_syscall_enter(struct trace_event_file
*file
,
426 struct trace_event_call
*call
)
428 struct trace_array
*tr
= file
->tr
;
431 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
432 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
434 mutex_lock(&syscall_trace_lock
);
435 tr
->sys_refcount_enter
--;
436 RCU_INIT_POINTER(tr
->enter_syscall_files
[num
], NULL
);
437 if (!tr
->sys_refcount_enter
)
438 unregister_trace_sys_enter(ftrace_syscall_enter
, tr
);
439 mutex_unlock(&syscall_trace_lock
);
442 static int reg_event_syscall_exit(struct trace_event_file
*file
,
443 struct trace_event_call
*call
)
445 struct trace_array
*tr
= file
->tr
;
449 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
450 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
452 mutex_lock(&syscall_trace_lock
);
453 if (!tr
->sys_refcount_exit
)
454 ret
= register_trace_sys_exit(ftrace_syscall_exit
, tr
);
456 rcu_assign_pointer(tr
->exit_syscall_files
[num
], file
);
457 tr
->sys_refcount_exit
++;
459 mutex_unlock(&syscall_trace_lock
);
463 static void unreg_event_syscall_exit(struct trace_event_file
*file
,
464 struct trace_event_call
*call
)
466 struct trace_array
*tr
= file
->tr
;
469 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
470 if (WARN_ON_ONCE(num
< 0 || num
>= NR_syscalls
))
472 mutex_lock(&syscall_trace_lock
);
473 tr
->sys_refcount_exit
--;
474 RCU_INIT_POINTER(tr
->exit_syscall_files
[num
], NULL
);
475 if (!tr
->sys_refcount_exit
)
476 unregister_trace_sys_exit(ftrace_syscall_exit
, tr
);
477 mutex_unlock(&syscall_trace_lock
);
480 static int __init
init_syscall_trace(struct trace_event_call
*call
)
485 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
486 if (num
< 0 || num
>= NR_syscalls
) {
487 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
488 ((struct syscall_metadata
*)call
->data
)->name
);
492 if (set_syscall_print_fmt(call
) < 0)
495 id
= trace_event_raw_init(call
);
498 free_syscall_print_fmt(call
);
505 struct trace_event_functions enter_syscall_print_funcs
= {
506 .trace
= print_syscall_enter
,
509 struct trace_event_functions exit_syscall_print_funcs
= {
510 .trace
= print_syscall_exit
,
513 struct trace_event_class __refdata event_class_syscall_enter
= {
514 .system
= "syscalls",
515 .reg
= syscall_enter_register
,
516 .define_fields
= syscall_enter_define_fields
,
517 .get_fields
= syscall_get_enter_fields
,
518 .raw_init
= init_syscall_trace
,
521 struct trace_event_class __refdata event_class_syscall_exit
= {
522 .system
= "syscalls",
523 .reg
= syscall_exit_register
,
524 .define_fields
= syscall_exit_define_fields
,
525 .fields
= LIST_HEAD_INIT(event_class_syscall_exit
.fields
),
526 .raw_init
= init_syscall_trace
,
529 unsigned long __init __weak
arch_syscall_addr(int nr
)
531 return (unsigned long)sys_call_table
[nr
];
534 void __init
init_ftrace_syscalls(void)
536 struct syscall_metadata
*meta
;
540 syscalls_metadata
= kcalloc(NR_syscalls
, sizeof(*syscalls_metadata
),
542 if (!syscalls_metadata
) {
547 for (i
= 0; i
< NR_syscalls
; i
++) {
548 addr
= arch_syscall_addr(i
);
549 meta
= find_syscall_meta(addr
);
553 meta
->syscall_nr
= i
;
554 syscalls_metadata
[i
] = meta
;
558 #ifdef CONFIG_PERF_EVENTS
560 static DECLARE_BITMAP(enabled_perf_enter_syscalls
, NR_syscalls
);
561 static DECLARE_BITMAP(enabled_perf_exit_syscalls
, NR_syscalls
);
562 static int sys_perf_refcount_enter
;
563 static int sys_perf_refcount_exit
;
565 static int perf_call_bpf_enter(struct trace_event_call
*call
, struct pt_regs
*regs
,
566 struct syscall_metadata
*sys_data
,
567 struct syscall_trace_enter
*rec
)
569 struct syscall_tp_t
{
570 unsigned long long regs
;
571 unsigned long syscall_nr
;
572 unsigned long args
[SYSCALL_DEFINE_MAXARGS
];
576 *(struct pt_regs
**)¶m
= regs
;
577 param
.syscall_nr
= rec
->nr
;
578 for (i
= 0; i
< sys_data
->nb_args
; i
++)
579 param
.args
[i
] = rec
->args
[i
];
580 return trace_call_bpf(call
, ¶m
);
583 static void perf_syscall_enter(void *ignore
, struct pt_regs
*regs
, long id
)
585 struct syscall_metadata
*sys_data
;
586 struct syscall_trace_enter
*rec
;
587 struct hlist_head
*head
;
588 unsigned long args
[6];
589 bool valid_prog_array
;
594 syscall_nr
= trace_get_syscall_nr(current
, regs
);
595 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
597 if (!test_bit(syscall_nr
, enabled_perf_enter_syscalls
))
600 sys_data
= syscall_nr_to_meta(syscall_nr
);
604 head
= this_cpu_ptr(sys_data
->enter_event
->perf_events
);
605 valid_prog_array
= bpf_prog_array_valid(sys_data
->enter_event
);
606 if (!valid_prog_array
&& hlist_empty(head
))
609 /* get the size after alignment with the u32 buffer size field */
610 size
= sizeof(unsigned long) * sys_data
->nb_args
+ sizeof(*rec
);
611 size
= ALIGN(size
+ sizeof(u32
), sizeof(u64
));
614 rec
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
618 rec
->nr
= syscall_nr
;
619 syscall_get_arguments(current
, regs
, args
);
620 memcpy(&rec
->args
, args
, sizeof(unsigned long) * sys_data
->nb_args
);
622 if ((valid_prog_array
&&
623 !perf_call_bpf_enter(sys_data
->enter_event
, regs
, sys_data
, rec
)) ||
625 perf_swevent_put_recursion_context(rctx
);
629 perf_trace_buf_submit(rec
, size
, rctx
,
630 sys_data
->enter_event
->event
.type
, 1, regs
,
634 static int perf_sysenter_enable(struct trace_event_call
*call
)
639 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
641 mutex_lock(&syscall_trace_lock
);
642 if (!sys_perf_refcount_enter
)
643 ret
= register_trace_sys_enter(perf_syscall_enter
, NULL
);
645 pr_info("event trace: Could not activate syscall entry trace point");
647 set_bit(num
, enabled_perf_enter_syscalls
);
648 sys_perf_refcount_enter
++;
650 mutex_unlock(&syscall_trace_lock
);
654 static void perf_sysenter_disable(struct trace_event_call
*call
)
658 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
660 mutex_lock(&syscall_trace_lock
);
661 sys_perf_refcount_enter
--;
662 clear_bit(num
, enabled_perf_enter_syscalls
);
663 if (!sys_perf_refcount_enter
)
664 unregister_trace_sys_enter(perf_syscall_enter
, NULL
);
665 mutex_unlock(&syscall_trace_lock
);
668 static int perf_call_bpf_exit(struct trace_event_call
*call
, struct pt_regs
*regs
,
669 struct syscall_trace_exit
*rec
)
671 struct syscall_tp_t
{
672 unsigned long long regs
;
673 unsigned long syscall_nr
;
677 *(struct pt_regs
**)¶m
= regs
;
678 param
.syscall_nr
= rec
->nr
;
679 param
.ret
= rec
->ret
;
680 return trace_call_bpf(call
, ¶m
);
683 static void perf_syscall_exit(void *ignore
, struct pt_regs
*regs
, long ret
)
685 struct syscall_metadata
*sys_data
;
686 struct syscall_trace_exit
*rec
;
687 struct hlist_head
*head
;
688 bool valid_prog_array
;
693 syscall_nr
= trace_get_syscall_nr(current
, regs
);
694 if (syscall_nr
< 0 || syscall_nr
>= NR_syscalls
)
696 if (!test_bit(syscall_nr
, enabled_perf_exit_syscalls
))
699 sys_data
= syscall_nr_to_meta(syscall_nr
);
703 head
= this_cpu_ptr(sys_data
->exit_event
->perf_events
);
704 valid_prog_array
= bpf_prog_array_valid(sys_data
->exit_event
);
705 if (!valid_prog_array
&& hlist_empty(head
))
708 /* We can probably do that at build time */
709 size
= ALIGN(sizeof(*rec
) + sizeof(u32
), sizeof(u64
));
712 rec
= perf_trace_buf_alloc(size
, NULL
, &rctx
);
716 rec
->nr
= syscall_nr
;
717 rec
->ret
= syscall_get_return_value(current
, regs
);
719 if ((valid_prog_array
&&
720 !perf_call_bpf_exit(sys_data
->exit_event
, regs
, rec
)) ||
722 perf_swevent_put_recursion_context(rctx
);
726 perf_trace_buf_submit(rec
, size
, rctx
, sys_data
->exit_event
->event
.type
,
727 1, regs
, head
, NULL
);
730 static int perf_sysexit_enable(struct trace_event_call
*call
)
735 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
737 mutex_lock(&syscall_trace_lock
);
738 if (!sys_perf_refcount_exit
)
739 ret
= register_trace_sys_exit(perf_syscall_exit
, NULL
);
741 pr_info("event trace: Could not activate syscall exit trace point");
743 set_bit(num
, enabled_perf_exit_syscalls
);
744 sys_perf_refcount_exit
++;
746 mutex_unlock(&syscall_trace_lock
);
750 static void perf_sysexit_disable(struct trace_event_call
*call
)
754 num
= ((struct syscall_metadata
*)call
->data
)->syscall_nr
;
756 mutex_lock(&syscall_trace_lock
);
757 sys_perf_refcount_exit
--;
758 clear_bit(num
, enabled_perf_exit_syscalls
);
759 if (!sys_perf_refcount_exit
)
760 unregister_trace_sys_exit(perf_syscall_exit
, NULL
);
761 mutex_unlock(&syscall_trace_lock
);
764 #endif /* CONFIG_PERF_EVENTS */
766 static int syscall_enter_register(struct trace_event_call
*event
,
767 enum trace_reg type
, void *data
)
769 struct trace_event_file
*file
= data
;
772 case TRACE_REG_REGISTER
:
773 return reg_event_syscall_enter(file
, event
);
774 case TRACE_REG_UNREGISTER
:
775 unreg_event_syscall_enter(file
, event
);
778 #ifdef CONFIG_PERF_EVENTS
779 case TRACE_REG_PERF_REGISTER
:
780 return perf_sysenter_enable(event
);
781 case TRACE_REG_PERF_UNREGISTER
:
782 perf_sysenter_disable(event
);
784 case TRACE_REG_PERF_OPEN
:
785 case TRACE_REG_PERF_CLOSE
:
786 case TRACE_REG_PERF_ADD
:
787 case TRACE_REG_PERF_DEL
:
794 static int syscall_exit_register(struct trace_event_call
*event
,
795 enum trace_reg type
, void *data
)
797 struct trace_event_file
*file
= data
;
800 case TRACE_REG_REGISTER
:
801 return reg_event_syscall_exit(file
, event
);
802 case TRACE_REG_UNREGISTER
:
803 unreg_event_syscall_exit(file
, event
);
806 #ifdef CONFIG_PERF_EVENTS
807 case TRACE_REG_PERF_REGISTER
:
808 return perf_sysexit_enable(event
);
809 case TRACE_REG_PERF_UNREGISTER
:
810 perf_sysexit_disable(event
);
812 case TRACE_REG_PERF_OPEN
:
813 case TRACE_REG_PERF_CLOSE
:
814 case TRACE_REG_PERF_ADD
:
815 case TRACE_REG_PERF_DEL
: