]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
entry: Wire up syscall_work in common entry code
authorGabriel Krisman Bertazi <krisman@collabora.com>
Mon, 16 Nov 2020 17:41:59 +0000 (12:41 -0500)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 16 Nov 2020 20:53:15 +0000 (21:53 +0100)
Prepare the common entry code to use the SYSCALL_WORK flags. They will
be defined in subsequent patches for each type of syscall
work. SYSCALL_WORK_ENTRY/EXIT are defined for the transition, as they
will replace the TIF_ equivalent defines.

Signed-off-by: Gabriel Krisman Bertazi <krisman@collabora.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Andy Lutomirski <luto@kernel.org>
Link: https://lore.kernel.org/r/20201116174206.2639648-4-krisman@collabora.com
include/linux/entry-common.h
kernel/entry/common.c

index aab549026ab81a6226916b7f05a5cb6d5c409632..3fe8f868f15ef496a8109db42267df76b3c029c8 100644 (file)
@@ -64,6 +64,9 @@
        (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT |                      \
         _TIF_SYSCALL_TRACEPOINT | ARCH_SYSCALL_EXIT_WORK)
 
+#define SYSCALL_WORK_ENTER     (0)
+#define SYSCALL_WORK_EXIT      (0)
+
 /*
  * TIF flags handled in exit_to_user_mode_loop()
  */
index fa17baadf63ee8250ce431bc992170f691ed9131..e7a11e38daba2c70bc8cd8b9e2392c0b5e638aa4 100644 (file)
@@ -42,7 +42,7 @@ static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
 }
 
 static long syscall_trace_enter(struct pt_regs *regs, long syscall,
-                               unsigned long ti_work)
+                               unsigned long ti_work, unsigned long work)
 {
        long ret = 0;
 
@@ -74,11 +74,12 @@ static long syscall_trace_enter(struct pt_regs *regs, long syscall,
 static __always_inline long
 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
 {
+       unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
        unsigned long ti_work;
 
        ti_work = READ_ONCE(current_thread_info()->flags);
-       if (ti_work & SYSCALL_ENTER_WORK)
-               syscall = syscall_trace_enter(regs, syscall, ti_work);
+       if (work & SYSCALL_WORK_ENTER || ti_work & SYSCALL_ENTER_WORK)
+               syscall = syscall_trace_enter(regs, syscall, ti_work, work);
 
        return syscall;
 }
@@ -225,7 +226,8 @@ static inline bool report_single_step(unsigned long ti_work)
 }
 #endif
 
-static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
+static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work,
+                             unsigned long work)
 {
        bool step;
 
@@ -245,6 +247,7 @@ static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
  */
 static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
 {
+       unsigned long work = READ_ONCE(current_thread_info()->syscall_work);
        u32 cached_flags = READ_ONCE(current_thread_info()->flags);
        unsigned long nr = syscall_get_nr(current, regs);
 
@@ -262,8 +265,8 @@ static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
         * enabled, we want to run them exactly once per syscall exit with
         * interrupts enabled.
         */
-       if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
-               syscall_exit_work(regs, cached_flags);
+       if (unlikely(work & SYSCALL_WORK_EXIT || cached_flags & SYSCALL_EXIT_WORK))
+               syscall_exit_work(regs, cached_flags, work);
 }
 
 __visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)