]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 May 2017 05:07:51 +0000 (22:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 May 2017 05:07:51 +0000 (22:07 -0700)
Pull x86 asm updates from Ingo Molnar:
 "The main changes in this cycle were:

   - unwinder fixes and enhancements

   - improve ftrace interaction with the unwinder

   - optimize the code footprint of WARN() and related debugging
     constructs

   - ... plus misc updates, cleanups and fixes"

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  x86/unwind: Dump all stacks in unwind_dump()
  x86/unwind: Silence more entry-code related warnings
  x86/ftrace: Fix ebp in ftrace_regs_caller that screws up unwinder
  x86/unwind: Remove unused 'sp' parameter in unwind_dump()
  x86/unwind: Prepend hex mask value with '0x' in unwind_dump()
  x86/unwind: Properly zero-pad 32-bit values in unwind_dump()
  x86/unwind: Ensure stack pointer is aligned
  debug: Avoid setting BUGFLAG_WARNING twice
  x86/unwind: Silence entry-related warnings
  x86/unwind: Read stack return address in update_stack_state()
  x86/unwind: Move common code into update_stack_state()
  debug: Fix __bug_table[] in arch linker scripts
  debug: Add _ONCE() logic to report_bug()
  x86/debug: Define BUG() again for !CONFIG_BUG
  x86/debug: Implement __WARN() using UD0
  x86/ftrace: Use Makefile logic instead of #ifdef for compiling ftrace_*.o
  x86/ftrace: Add -mfentry support to x86_32 with DYNAMIC_FTRACE set
  x86/ftrace: Clean up ftrace_regs_caller
  x86/ftrace: Add stack frame pointer to ftrace_caller
  x86/ftrace: Move the ftrace specific code out of entry_32.S
  ...

39 files changed:
arch/arm/kernel/vmlinux-xip.lds.S
arch/arm/kernel/vmlinux.lds.S
arch/arm64/include/asm/bug.h
arch/blackfin/kernel/vmlinux.lds.S
arch/c6x/kernel/vmlinux.lds.S
arch/cris/kernel/vmlinux.lds.S
arch/frv/kernel/vmlinux.lds.S
arch/ia64/kernel/vmlinux.lds.S
arch/mips/kernel/vmlinux.lds.S
arch/parisc/include/asm/bug.h
arch/powerpc/include/asm/bug.h
arch/powerpc/kernel/vmlinux.lds.S
arch/s390/include/asm/bug.h
arch/sh/include/asm/bug.h
arch/um/Kconfig.common
arch/x86/Kconfig
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/bug.h
arch/x86/include/asm/page_64.h
arch/x86/include/asm/unwind.h
arch/x86/kernel/Makefile
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/ftrace_32.S [new file with mode: 0644]
arch/x86/kernel/ftrace_64.S [new file with mode: 0644]
arch/x86/kernel/mcount_64.S [deleted file]
arch/x86/kernel/traps.c
arch/x86/kernel/unwind_frame.c
arch/x86/kernel/unwind_guess.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/lib/clear_page_64.S
arch/x86/um/Makefile
arch/x86/um/bug.c [deleted file]
include/asm-generic/bug.h
include/asm-generic/vmlinux.lds.h
include/linux/bug.h
lib/bug.c

index 37b2a11af34592b5f60f0db77ce014588f9327f4..8265b116218de4f44314b16d7795acae2492b12d 100644 (file)
@@ -242,6 +242,8 @@ SECTIONS
        }
        _edata_loc = __data_loc + SIZEOF(.data);
 
+       BUG_TABLE
+
 #ifdef CONFIG_HAVE_TCM
         /*
         * We align everything to a page boundary so we can
index ce18007f9e4eb08d1b36ad7103f15e7adadaae9e..c83a7ba737d6a96dedfe1dec88967d824309e538 100644 (file)
@@ -262,6 +262,8 @@ SECTIONS
        }
        _edata_loc = __data_loc + SIZEOF(.data);
 
+       BUG_TABLE
+
 #ifdef CONFIG_HAVE_TCM
         /*
         * We align everything to a page boundary so we can
index 561190d1588136b1f7bc207ebfd04251694ea552..a9be1072933c0d1213afd9a5fed65de5d554f055 100644 (file)
@@ -55,7 +55,7 @@ _BUGVERBOSE_LOCATION(__FILE__, __LINE__)              \
        unreachable();                          \
 } while (0)
 
-#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
+#define __WARN_FLAGS(flags) _BUG_FLAGS(BUGFLAG_WARNING|(flags))
 
 #endif /* ! CONFIG_GENERIC_BUG */
 
index 68069a120055b359b671b5890d2797f63027c007..334ef8139b35b7752eef6593bdb26314cf8d8b96 100644 (file)
@@ -115,6 +115,8 @@ SECTIONS
        __data_lma = LOADADDR(.data);
        __data_len = SIZEOF(.data);
 
+       BUG_TABLE
+
        /* The init section should be last, so when we free it, it goes into
         * the general memory pool, and (hopefully) will decrease fragmentation
         * a tiny bit. The init section has a _requirement_ that it be
index a1a5c166bc9b8b125bfd4e515ab7a16afad0bc65..29ebea49ddd5ae2b957c9fb8c4d60f674750989a 100644 (file)
@@ -128,6 +128,8 @@ SECTIONS
                . = ALIGN(8);
        }
 
+       BUG_TABLE
+
        _edata = .;
 
        __bss_start = .;
index 97958626152000cd24d171ea0fa0c3326f77ffe7..867f237d7c5c361a8bd02c2cb28d9f35794532a9 100644 (file)
@@ -68,6 +68,8 @@ SECTIONS
        __edata = . ;                   /* End of data section. */
        _edata = . ;
 
+       BUG_TABLE
+
        INIT_TASK_DATA_SECTION(PAGE_SIZE)
 
        . = ALIGN(PAGE_SIZE);           /* Init code and data. */
index aa6e573d57da46d1cb05bec37cb9dfcc7bfac13f..3f44dcbbad4d5b3605d2b088c154d36db531cf00 100644 (file)
@@ -102,6 +102,8 @@ SECTIONS
 
   _edata = .;                  /* End of data section */
 
+  BUG_TABLE
+
   /* GP section */
   . = ALIGN(L1_CACHE_BYTES);
   _gp = . + 2048;
index f89d20c9741225e4d416713fd32819e6937c00e9..798026dde52e42b81455bcd4547778a2f3369616 100644 (file)
@@ -192,6 +192,8 @@ SECTIONS {
                CONSTRUCTORS
        }
 
+       BUG_TABLE
+
        . = ALIGN(16);  /* gp must be 16-byte aligned for exc. table */
        .got : AT(ADDR(.got) - LOAD_OFFSET) {
                *(.got.plt)
index f0a0e6d62be38e5bcdb6f8c044f4b026ca940851..8ca2371aa684bb7ddcc6b83b0de8b93c87e93db3 100644 (file)
@@ -97,6 +97,7 @@ SECTIONS
                DATA_DATA
                CONSTRUCTORS
        }
+       BUG_TABLE
        _gp = . + 0x8000;
        .lit8 : {
                *(.lit8)
index 62a33338549c18d6bb32d0a08c7e0728808767fe..d2742273a685df2d243a14fd19a7b47b5a1b94a7 100644 (file)
@@ -46,7 +46,7 @@
 #endif
 
 #ifdef CONFIG_DEBUG_BUGVERBOSE
-#define __WARN_TAINT(taint)                                            \
+#define __WARN_FLAGS(flags)                                            \
        do {                                                            \
                asm volatile("\n"                                       \
                             "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
                             "\t.org 2b+%c3\n"                          \
                             "\t.popsection"                            \
                             : : "i" (__FILE__), "i" (__LINE__),        \
-                            "i" (BUGFLAG_TAINT(taint)),                \
+                            "i" (BUGFLAG_WARNING|(flags)),             \
                             "i" (sizeof(struct bug_entry)) );          \
        } while(0)
 #else
-#define __WARN_TAINT(taint)                                            \
+#define __WARN_FLAGS(flags)                                            \
        do {                                                            \
                asm volatile("\n"                                       \
                             "1:\t" PARISC_BUG_BREAK_ASM "\n"           \
@@ -69,7 +69,7 @@
                             "\t.short %c0\n"                           \
                             "\t.org 2b+%c1\n"                          \
                             "\t.popsection"                            \
-                            : : "i" (BUGFLAG_TAINT(taint)),            \
+                            : : "i" (BUGFLAG_WARNING|(flags)),         \
                             "i" (sizeof(struct bug_entry)) );          \
        } while(0)
 #endif
index 3a39283333c3cdbba5139f1c97f83d7f36815ed9..f2c562a0a427dda2c774acb1e13c7eda8514e21e 100644 (file)
        }                                                       \
 } while (0)
 
-#define __WARN_TAINT(taint) do {                               \
+#define __WARN_FLAGS(flags) do {                               \
        __asm__ __volatile__(                                   \
                "1:     twi 31,0,0\n"                           \
                _EMIT_BUG_ENTRY                                 \
                : : "i" (__FILE__), "i" (__LINE__),             \
-                 "i" (BUGFLAG_TAINT(taint)),                   \
+                 "i" (BUGFLAG_WARNING|(flags)),                \
                  "i" (sizeof(struct bug_entry)));              \
 } while (0)
 
index 7394b770ae1f6b2402ef2b04fe23e92eae2c7ee2..1c24c894c9082249c0cc32dc078cd1905b62a548 100644 (file)
@@ -312,6 +312,8 @@ SECTIONS
                NOSAVE_DATA
        }
 
+       BUG_TABLE
+
        . = ALIGN(PAGE_SIZE);
        _edata  =  .;
        PROVIDE32 (edata = .);
index bf90d1fd97a59cbd4141f8180be49e7b0cc265f7..1bbd9dbfe4e0a2baac12f5a83ac4538bc8a6ca8f 100644 (file)
@@ -46,8 +46,8 @@
        unreachable();                                  \
 } while (0)
 
-#define __WARN_TAINT(taint) do {                       \
-       __EMIT_BUG(BUGFLAG_TAINT(taint));               \
+#define __WARN_FLAGS(flags) do {                       \
+       __EMIT_BUG(BUGFLAG_WARNING|(flags));            \
 } while (0)
 
 #define WARN_ON(x) ({                                  \
index dcf278075429fcb2774cbfaeb586a7dc43bf10ab..1b77f068be2b1d027ae460ce0e6919a84223a1f1 100644 (file)
@@ -50,7 +50,7 @@ do {                                                  \
                   "i" (sizeof(struct bug_entry)));     \
 } while (0)
 
-#define __WARN_TAINT(taint)                            \
+#define __WARN_FLAGS(flags)                            \
 do {                                                   \
        __asm__ __volatile__ (                          \
                "1:\t.short %O0\n"                      \
@@ -59,7 +59,7 @@ do {                                                  \
                 : "n" (TRAPA_BUG_OPCODE),              \
                   "i" (__FILE__),                      \
                   "i" (__LINE__),                      \
-                  "i" (BUGFLAG_TAINT(taint)),          \
+                  "i" (BUGFLAG_WARNING|(flags)),       \
                   "i" (sizeof(struct bug_entry)));     \
 } while (0)
 
index fd443852103c998fd997c04ee64ebb7c32716785..ed9c5b5ff028947061bd8a86949c58e9456e98a3 100644 (file)
@@ -50,11 +50,6 @@ config GENERIC_CALIBRATE_DELAY
        bool
        default y
 
-config GENERIC_BUG
-       bool
-       default y
-       depends on BUG
-
 config HZ
        int
        default 100
index a05571937ad33cbe5d86b956cc587c74b5db2d9b..2b899858532a9a7e6c0638e20fe7a3ed18eabe1c 100644 (file)
@@ -126,7 +126,7 @@ config X86
        select HAVE_EBPF_JIT                    if X86_64
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
        select HAVE_EXIT_THREAD
-       select HAVE_FENTRY                      if X86_64
+       select HAVE_FENTRY                      if X86_64 || DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
index 57f7ec35216ef5e2a78efa0d303f8f343b010d68..50bc26949e9edf75cc5978f8a08a4273bf489aec 100644 (file)
 #include <asm/errno.h>
 #include <asm/segment.h>
 #include <asm/smp.h>
-#include <asm/page_types.h>
 #include <asm/percpu.h>
 #include <asm/processor-flags.h>
-#include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
 #include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
-#include <asm/export.h>
 #include <asm/frame.h>
 
        .section .entry.text, "ax"
@@ -585,7 +582,7 @@ ENTRY(iret_exc      )
         * will soon execute iret and the tracer was already set to
         * the irqstate after the IRET:
         */
-       DISABLE_INTERRUPTS(CLBR_EAX)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        lss     (%esp), %esp                    /* switch to espfix segment */
        jmp     .Lrestore_nocheck
 #endif
@@ -886,172 +883,6 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
 
 #endif /* CONFIG_HYPERV */
 
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(mcount)
-       ret
-END(mcount)
-
-ENTRY(ftrace_caller)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       pushl   $0                              /* Pass NULL as regs pointer */
-       movl    4*4(%esp), %eax
-       movl    0x4(%ebp), %edx
-       movl    function_trace_op, %ecx
-       subl    $MCOUNT_INSN_SIZE, %eax
-
-.globl ftrace_call
-ftrace_call:
-       call    ftrace_stub
-
-       addl    $4, %esp                        /* skip NULL pointer */
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-.Lftrace_ret:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
-       jmp     ftrace_stub
-#endif
-
-/* This is weak to keep gas from relaxing the jumps */
-WEAK(ftrace_stub)
-       ret
-END(ftrace_caller)
-
-ENTRY(ftrace_regs_caller)
-       pushf   /* push flags before compare (in cs location) */
-
-       /*
-        * i386 does not save SS and ESP when coming from kernel.
-        * Instead, to get sp, &regs->sp is used (see ptrace.h).
-        * Unfortunately, that means eflags must be at the same location
-        * as the current return ip is. We move the return ip into the
-        * ip location, and move flags into the return ip location.
-        */
-       pushl   4(%esp)                         /* save return ip into ip slot */
-
-       pushl   $0                              /* Load 0 into orig_ax */
-       pushl   %gs
-       pushl   %fs
-       pushl   %es
-       pushl   %ds
-       pushl   %eax
-       pushl   %ebp
-       pushl   %edi
-       pushl   %esi
-       pushl   %edx
-       pushl   %ecx
-       pushl   %ebx
-
-       movl    13*4(%esp), %eax                /* Get the saved flags */
-       movl    %eax, 14*4(%esp)                /* Move saved flags into regs->flags location */
-                                               /* clobbering return ip */
-       movl    $__KERNEL_CS, 13*4(%esp)
-
-       movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
-       subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
-       movl    0x4(%ebp), %edx                 /* Load parent ip (2nd parameter) */
-       movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd parameter */
-       pushl   %esp                            /* Save pt_regs as 4th parameter */
-
-GLOBAL(ftrace_regs_call)
-       call    ftrace_stub
-
-       addl    $4, %esp                        /* Skip pt_regs */
-       movl    14*4(%esp), %eax                /* Move flags back into cs */
-       movl    %eax, 13*4(%esp)                /* Needed to keep addl  from modifying flags */
-       movl    12*4(%esp), %eax                /* Get return ip from regs->ip */
-       movl    %eax, 14*4(%esp)                /* Put return ip back for ret */
-
-       popl    %ebx
-       popl    %ecx
-       popl    %edx
-       popl    %esi
-       popl    %edi
-       popl    %ebp
-       popl    %eax
-       popl    %ds
-       popl    %es
-       popl    %fs
-       popl    %gs
-       addl    $8, %esp                        /* Skip orig_ax and ip */
-       popf                                    /* Pop flags at end (no addl to corrupt flags) */
-       jmp     .Lftrace_ret
-
-       popf
-       jmp     ftrace_stub
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(mcount)
-       cmpl    $__PAGE_OFFSET, %esp
-       jb      ftrace_stub                     /* Paging not enabled yet? */
-
-       cmpl    $ftrace_stub, ftrace_trace_function
-       jnz     .Ltrace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       cmpl    $ftrace_stub, ftrace_graph_return
-       jnz     ftrace_graph_caller
-
-       cmpl    $ftrace_graph_entry_stub, ftrace_graph_entry
-       jnz     ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
-       ret
-
-       /* taken from glibc */
-.Ltrace:
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       movl    0xc(%esp), %eax
-       movl    0x4(%ebp), %edx
-       subl    $MCOUNT_INSN_SIZE, %eax
-
-       call    *ftrace_trace_function
-
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       jmp     ftrace_stub
-END(mcount)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-EXPORT_SYMBOL(mcount)
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-       pushl   %eax
-       pushl   %ecx
-       pushl   %edx
-       movl    0xc(%esp), %eax
-       lea     0x4(%ebp), %edx
-       movl    (%ebp), %ecx
-       subl    $MCOUNT_INSN_SIZE, %eax
-       call    prepare_ftrace_return
-       popl    %edx
-       popl    %ecx
-       popl    %eax
-       ret
-END(ftrace_graph_caller)
-
-.globl return_to_handler
-return_to_handler:
-       pushl   %eax
-       pushl   %edx
-       movl    %ebp, %eax
-       call    ftrace_return_to_handler
-       movl    %eax, %ecx
-       popl    %edx
-       popl    %eax
-       jmp     *%ecx
-#endif
-
 #ifdef CONFIG_TRACING
 ENTRY(trace_page_fault)
        ASM_CLAC
index 044d18ebc43ce96a512abd1e5f36eb8dfaee0636..d2b2a2948ffe8ec3b045a64521d2703e7db3a445 100644 (file)
@@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath:
         * If we see that no exit work is required (which we are required
         * to check with IRQs off), then we can go straight to SYSRET64.
         */
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movq    PER_CPU_VAR(current_task), %r11
        testl   $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
@@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath:
         * raise(3) will trigger this, for example.  IRQs are off.
         */
        TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
+       ENABLE_INTERRUPTS(CLBR_ANY)
        SAVE_EXTRA_REGS
        movq    %rsp, %rdi
        call    syscall_return_slowpath /* returns with IRQs disabled */
@@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64)
         * Called from fast path -- disable IRQs again, pop return address
         * and jump to slow path
         */
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        popq    %rax
        jmp     entry_SYSCALL64_slow_path
@@ -518,7 +518,7 @@ common_interrupt:
        interrupt do_IRQ
        /* 0(%rsp): old RSP */
 ret_from_intr:
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        decl    PER_CPU_VAR(irq_count)
 
@@ -1051,7 +1051,7 @@ END(paranoid_entry)
  * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  */
 ENTRY(paranoid_exit)
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF_DEBUG
        testl   %ebx, %ebx                      /* swapgs needed? */
        jnz     paranoid_exit_no_swapgs
@@ -1156,10 +1156,9 @@ END(error_entry)
  *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
  */
 ENTRY(error_exit)
-       movl    %ebx, %eax
-       DISABLE_INTERRUPTS(CLBR_NONE)
+       DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
-       testl   %eax, %eax
+       testl   %ebx, %ebx
        jnz     retint_kernel
        jmp     retint_user
 END(error_exit)
index ba38ebbaced3cd0224caef932f3f59a5cc2cc5cf..39e702d90cdbdda8df0b42349d9f8d262b08bf4d 100644 (file)
@@ -1,36 +1,82 @@
 #ifndef _ASM_X86_BUG_H
 #define _ASM_X86_BUG_H
 
-#define HAVE_ARCH_BUG
+#include <linux/stringify.h>
 
-#ifdef CONFIG_DEBUG_BUGVERBOSE
+/*
+ * Since some emulators terminate on UD2, we cannot use it for WARN.
+ * Since various instruction decoders disagree on the length of UD1,
+ * we cannot use it either. So use UD0 for WARN.
+ *
+ * (binutils knows about "ud1" but {en,de}codes it as 2 bytes, whereas
+ *  our kernel decoder thinks it takes a ModRM byte, which seems consistent
+ *  with various things like the Intel SDM instruction encoding rules)
+ */
+
+#define ASM_UD0                ".byte 0x0f, 0xff"
+#define ASM_UD1                ".byte 0x0f, 0xb9" /* + ModRM */
+#define ASM_UD2                ".byte 0x0f, 0x0b"
+
+#define INSN_UD0       0xff0f
+#define INSN_UD2       0x0b0f
+
+#define LEN_UD0                2
+
+#ifdef CONFIG_GENERIC_BUG
 
 #ifdef CONFIG_X86_32
-# define __BUG_C0      "2:\t.long 1b, %c0\n"
+# define __BUG_REL(val)        ".long " __stringify(val)
 #else
-# define __BUG_C0      "2:\t.long 1b - 2b, %c0 - 2b\n"
+# define __BUG_REL(val)        ".long " __stringify(val) " - 2b"
 #endif
 
-#define BUG()                                                  \
-do {                                                           \
-       asm volatile("1:\tud2\n"                                \
-                    ".pushsection __bug_table,\"a\"\n"         \
-                    __BUG_C0                                   \
-                    "\t.word %c1, 0\n"                         \
-                    "\t.org 2b+%c2\n"                          \
-                    ".popsection"                              \
-                    : : "i" (__FILE__), "i" (__LINE__),        \
-                    "i" (sizeof(struct bug_entry)));           \
-       unreachable();                                          \
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+
+#define _BUG_FLAGS(ins, flags)                                         \
+do {                                                                   \
+       asm volatile("1:\t" ins "\n"                                    \
+                    ".pushsection __bug_table,\"a\"\n"                 \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t"  __BUG_REL(%c0) "\t# bug_entry::file\n"       \
+                    "\t.word %c1"        "\t# bug_entry::line\n"       \
+                    "\t.word %c2"        "\t# bug_entry::flags\n"      \
+                    "\t.org 2b+%c3\n"                                  \
+                    ".popsection"                                      \
+                    : : "i" (__FILE__), "i" (__LINE__),                \
+                        "i" (flags),                                   \
+                        "i" (sizeof(struct bug_entry)));               \
 } while (0)
 
+#else /* !CONFIG_DEBUG_BUGVERBOSE */
+
+#define _BUG_FLAGS(ins, flags)                                         \
+do {                                                                   \
+       asm volatile("1:\t" ins "\n"                                    \
+                    ".pushsection __bug_table,\"a\"\n"                 \
+                    "2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n"   \
+                    "\t.word %c0"        "\t# bug_entry::flags\n"      \
+                    "\t.org 2b+%c1\n"                                  \
+                    ".popsection"                                      \
+                    : : "i" (flags),                                   \
+                        "i" (sizeof(struct bug_entry)));               \
+} while (0)
+
+#endif /* CONFIG_DEBUG_BUGVERBOSE */
+
 #else
+
+#define _BUG_FLAGS(ins, flags)  asm volatile(ins)
+
+#endif /* CONFIG_GENERIC_BUG */
+
+#define HAVE_ARCH_BUG
 #define BUG()                                                  \
 do {                                                           \
-       asm volatile("ud2");                                    \
+       _BUG_FLAGS(ASM_UD2, 0);                                 \
        unreachable();                                          \
 } while (0)
-#endif
+
+#define __WARN_FLAGS(flags)    _BUG_FLAGS(ASM_UD0, BUGFLAG_WARNING|(flags))
 
 #include <asm-generic/bug.h>
 
index b3bebf9e57466985cfcf6a54dd5d4410dd5417c8..b4a0d43248cf3d6f2c7ef042200c31a25f29f122 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/page_64_types.h>
 
 #ifndef __ASSEMBLY__
+#include <asm/alternative.h>
 
 /* duplicated to the one in bootmem.h */
 extern unsigned long max_pfn;
@@ -34,7 +35,20 @@ extern unsigned long __phys_addr_symbol(unsigned long);
 #define pfn_valid(pfn)          ((pfn) < max_pfn)
 #endif
 
-void clear_page(void *page);
+void clear_page_orig(void *page);
+void clear_page_rep(void *page);
+void clear_page_erms(void *page);
+
+static inline void clear_page(void *page)
+{
+       alternative_call_2(clear_page_orig,
+                          clear_page_rep, X86_FEATURE_REP_GOOD,
+                          clear_page_erms, X86_FEATURE_ERMS,
+                          "=D" (page),
+                          "0" (page)
+                          : "memory", "rax", "rcx");
+}
+
 void copy_page(void *to, void *from);
 
 #endif /* !__ASSEMBLY__ */
index 6fa75b17aec33aea051e1de2aea9199b0f6c836f..9b10dcd51716f95a267e5906961b57d8812e43fd 100644 (file)
@@ -12,8 +12,10 @@ struct unwind_state {
        struct task_struct *task;
        int graph_idx;
 #ifdef CONFIG_FRAME_POINTER
+       bool got_irq;
        unsigned long *bp, *orig_sp;
        struct pt_regs *regs;
+       unsigned long ip;
 #else
        unsigned long *sp;
 #endif
index 84c00592d3598a2dc851202893bcc3e62cf8e669..4b994232cb5739f15eef17c69be00aa3b3bc5243 100644 (file)
@@ -27,7 +27,7 @@ KASAN_SANITIZE_stacktrace.o := n
 
 OBJECT_FILES_NON_STANDARD_head_$(BITS).o               := y
 OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o    := y
-OBJECT_FILES_NON_STANDARD_mcount_$(BITS).o             := y
+OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o             := y
 OBJECT_FILES_NON_STANDARD_test_nx.o                    := y
 
 # If instrumentation of this dir is enabled, boot hangs during first second.
@@ -46,7 +46,7 @@ obj-$(CONFIG_MODIFY_LDT_SYSCALL)      += ldt.o
 obj-y                  += setup.o x86_init.o i8259.o irqinit.o jump_label.o
 obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y                  += probe_roms.o
-obj-$(CONFIG_X86_64)   += sys_x86_64.o mcount_64.o
+obj-$(CONFIG_X86_64)   += sys_x86_64.o
 obj-$(CONFIG_X86_ESPFIX64)     += espfix_64.o
 obj-$(CONFIG_SYSFS)    += ksysfs.o
 obj-y                  += bootflag.o e820.o
@@ -82,6 +82,7 @@ obj-y                         += apic/
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_LIVEPATCH)        += livepatch.o
+obj-$(CONFIG_FUNCTION_TRACER)  += ftrace_$(BITS).o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
 obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 obj-$(CONFIG_X86_TSC)          += trace_clock.o
index 09d4ac0d2661fdda13f7ae3acd7412bd6b988d7d..dbce3cca94cb46fda2b3fd676be1d078b4f73161 100644 (file)
@@ -77,7 +77,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - softirq stack
         * - hardirq stack
         */
-       for (regs = NULL; stack; stack = stack_info.next_sp) {
+       for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
                const char *stack_name;
 
                /*
@@ -289,9 +289,6 @@ void die(const char *str, struct pt_regs *regs, long err)
        unsigned long flags = oops_begin();
        int sig = SIGSEGV;
 
-       if (!user_mode(regs))
-               report_bug(regs->ip, regs);
-
        if (__die(str, regs, err))
                sig = 0;
        oops_end(flags, regs, sig);
index b0b3a3df7c2080d3d526908441fa93b449b6708f..e5f0b40e66d238b931d14d78ea579abd195a3350 100644 (file)
@@ -162,15 +162,3 @@ void show_regs(struct pt_regs *regs)
        }
        pr_cont("\n");
 }
-
-int is_valid_bugaddr(unsigned long ip)
-{
-       unsigned short ud2;
-
-       if (ip < PAGE_OFFSET)
-               return 0;
-       if (probe_kernel_address((unsigned short *)ip, ud2))
-               return 0;
-
-       return ud2 == 0x0b0f;
-}
index a8b117e93b4620181b3b471049d1ce4925f33ec3..3e1471d5748723069d70577fd9061a1cb26770ba 100644 (file)
@@ -178,13 +178,3 @@ void show_regs(struct pt_regs *regs)
        }
        pr_cont("\n");
 }
-
-int is_valid_bugaddr(unsigned long ip)
-{
-       unsigned short ud2;
-
-       if (__copy_from_user(&ud2, (const void __user *) ip, sizeof(ud2)))
-               return 0;
-
-       return ud2 == 0x0b0f;
-}
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
new file mode 100644 (file)
index 0000000..722a145
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ *  Copyright (C) 2017  Steven Rostedt, VMware Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+#include <asm/segment.h>
+#include <asm/export.h>
+#include <asm/ftrace.h>
+
+#ifdef CC_USING_FENTRY
+# define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
+#else
+# define function_hook mcount
+EXPORT_SYMBOL(mcount)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
+#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
+# define USING_FRAME_POINTER
+#endif
+
+#ifdef USING_FRAME_POINTER
+# define MCOUNT_FRAME                  1       /* using frame = true  */
+#else
+# define MCOUNT_FRAME                  0       /* using frame = false */
+#endif
+
+ENTRY(function_hook)
+       ret
+END(function_hook)
+
+ENTRY(ftrace_caller)
+
+#ifdef USING_FRAME_POINTER
+# ifdef CC_USING_FENTRY
+       /*
+        * Frame pointers are of ip followed by bp.
+        * Since fentry is an immediate jump, we are left with
+        * parent-ip, function-ip. We need to add a frame with
+        * parent-ip followed by ebp.
+        */
+       pushl   4(%esp)                         /* parent ip */
+       pushl   %ebp
+       movl    %esp, %ebp
+       pushl   2*4(%esp)                       /* function ip */
+# endif
+       /* For mcount, the function ip is directly above */
+       pushl   %ebp
+       movl    %esp, %ebp
+#endif
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       pushl   $0                              /* Pass NULL as regs pointer */
+
+#ifdef USING_FRAME_POINTER
+       /* Load parent ebp into edx */
+       movl    4*4(%esp), %edx
+#else
+       /* There's no frame pointer, load the appropriate stack addr instead */
+       lea     4*4(%esp), %edx
+#endif
+
+       movl    (MCOUNT_FRAME+4)*4(%esp), %eax  /* load the rip */
+       /* Get the parent ip */
+       movl    4(%edx), %edx                   /* edx has ebp */
+
+       movl    function_trace_op, %ecx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+.globl ftrace_call
+ftrace_call:
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* skip NULL pointer */
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+#ifdef USING_FRAME_POINTER
+       popl    %ebp
+# ifdef CC_USING_FENTRY
+       addl    $4,%esp                         /* skip function ip */
+       popl    %ebp                            /* this is the orig bp */
+       addl    $4, %esp                        /* skip parent ip */
+# endif
+#endif
+.Lftrace_ret:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       jmp     ftrace_stub
+#endif
+
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
+       ret
+END(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+       /*
+        * i386 does not save SS and ESP when coming from kernel.
+        * Instead, to get sp, &regs->sp is used (see ptrace.h).
+        * Unfortunately, that means eflags must be at the same location
+        * as the current return ip is. We move the return ip into the
+        * regs->ip location, and move flags into the return ip location.
+        */
+       pushl   $__KERNEL_CS
+       pushl   4(%esp)                         /* Save the return ip */
+       pushl   $0                              /* Load 0 into orig_ax */
+       pushl   %gs
+       pushl   %fs
+       pushl   %es
+       pushl   %ds
+       pushl   %eax
+
+       /* Get flags and place them into the return ip slot */
+       pushf
+       popl    %eax
+       movl    %eax, 8*4(%esp)
+
+       pushl   %ebp
+       pushl   %edi
+       pushl   %esi
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+
+       movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
+       subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
+#ifdef CC_USING_FENTRY
+       movl    15*4(%esp), %edx                /* Load parent ip (2nd parameter) */
+#else
+       movl    0x4(%ebp), %edx                 /* Load parent ip (2nd parameter) */
+#endif
+       movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd parameter */
+       pushl   %esp                            /* Save pt_regs as 4th parameter */
+
+GLOBAL(ftrace_regs_call)
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* Skip pt_regs */
+
+       /* restore flags */
+       push    14*4(%esp)
+       popf
+
+       /* Move return ip back to its original location */
+       movl    12*4(%esp), %eax
+       movl    %eax, 14*4(%esp)
+
+       popl    %ebx
+       popl    %ecx
+       popl    %edx
+       popl    %esi
+       popl    %edi
+       popl    %ebp
+       popl    %eax
+       popl    %ds
+       popl    %es
+       popl    %fs
+       popl    %gs
+
+       /* use lea to not affect flags */
+       lea     3*4(%esp), %esp                 /* Skip orig_ax, ip and cs */
+
+       jmp     .Lftrace_ret
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(function_hook)
+       cmpl    $__PAGE_OFFSET, %esp
+       jb      ftrace_stub                     /* Paging not enabled yet? */
+
+       cmpl    $ftrace_stub, ftrace_trace_function
+       jnz     .Ltrace
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpl    $ftrace_stub, ftrace_graph_return
+       jnz     ftrace_graph_caller
+
+       cmpl    $ftrace_graph_entry_stub, ftrace_graph_entry
+       jnz     ftrace_graph_caller
+#endif
+.globl ftrace_stub
+ftrace_stub:
+       ret
+
+       /* taken from glibc */
+.Ltrace:
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    0xc(%esp), %eax
+       movl    0x4(%ebp), %edx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+       call    *ftrace_trace_function
+
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       jmp     ftrace_stub
+END(function_hook)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    3*4(%esp), %eax
+       /* Even with frame pointers, fentry doesn't have one here */
+#ifdef CC_USING_FENTRY
+       lea     4*4(%esp), %edx
+       movl    $0, %ecx
+#else
+       lea     0x4(%ebp), %edx
+       movl    (%ebp), %ecx
+#endif
+       subl    $MCOUNT_INSN_SIZE, %eax
+       call    prepare_ftrace_return
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       ret
+END(ftrace_graph_caller)
+
+.globl return_to_handler
+return_to_handler:
+       pushl   %eax
+       pushl   %edx
+#ifdef CC_USING_FENTRY
+       movl    $0, %eax
+#else
+       movl    %ebp, %eax
+#endif
+       call    ftrace_return_to_handler
+       movl    %eax, %ecx
+       popl    %edx
+       popl    %eax
+       jmp     *%ecx
+#endif
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
new file mode 100644 (file)
index 0000000..1dfac63
--- /dev/null
@@ -0,0 +1,332 @@
+/*
+ *  Copyright (C) 2014  Steven Rostedt, Red Hat Inc
+ */
+
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm/ftrace.h>
+#include <asm/export.h>
+
+
+       .code64
+       .section .entry.text, "ax"
+
+#ifdef CC_USING_FENTRY
+# define function_hook __fentry__
+EXPORT_SYMBOL(__fentry__)
+#else
+# define function_hook mcount
+EXPORT_SYMBOL(mcount)
+#endif
+
+/* All cases save the original rbp (8 bytes) */
+#ifdef CONFIG_FRAME_POINTER
+# ifdef CC_USING_FENTRY
+/* Save parent and function stack frames (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE    (8+16*2)
+# else
+/* Save just function stack frame (rip and rbp) */
+#  define MCOUNT_FRAME_SIZE    (8+16)
+# endif
+#else
+/* No need to save a stack frame */
+# define MCOUNT_FRAME_SIZE     8
+#endif /* CONFIG_FRAME_POINTER */
+
+/* Size of stack used to save mcount regs in save_mcount_regs */
+#define MCOUNT_REG_SIZE                (SS+8 + MCOUNT_FRAME_SIZE)
+
+/*
+ * gcc -pg option adds a call to 'mcount' in most functions.
+ * When -mfentry is used, the call is to 'fentry' and not 'mcount'
+ * and is done before the function's stack frame is set up.
+ * They both require a set of regs to be saved before calling
+ * any C code and restored before returning back to the function.
+ *
+ * On boot up, all these calls are converted into nops. When tracing
+ * is enabled, the call can jump to either ftrace_caller or
+ * ftrace_regs_caller. Callbacks (tracing functions) that require
+ * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
+ * it. For this reason, the size of the pt_regs structure will be
+ * allocated on the stack and the required mcount registers will
+ * be saved in the locations that pt_regs has them in.
+ */
+
+/*
+ * @added: the amount of stack added before calling this
+ *
+ * After this is called, the following registers contain:
+ *
+ *  %rdi - holds the address that called the trampoline
+ *  %rsi - holds the parent function (traced function's return address)
+ *  %rdx - holds the original %rbp
+ */
+.macro save_mcount_regs added=0
+
+       /* Always save the original rbp */
+       pushq %rbp
+
+#ifdef CONFIG_FRAME_POINTER
+       /*
+        * Stack traces will stop at the ftrace trampoline if the frame pointer
+        * is not set up properly. If fentry is used, we need to save a frame
+        * pointer for the parent as well as the function traced, because the
+        * fentry is called before the stack frame is set up, where as mcount
+        * is called afterward.
+        */
+#ifdef CC_USING_FENTRY
+       /* Save the parent pointer (skip orig rbp and our return address) */
+       pushq \added+8*2(%rsp)
+       pushq %rbp
+       movq %rsp, %rbp
+       /* Save the return address (now skip orig rbp, rbp and parent) */
+       pushq \added+8*3(%rsp)
+#else
+       /* Can't assume that rip is before this (unless added was zero) */
+       pushq \added+8(%rsp)
+#endif
+       pushq %rbp
+       movq %rsp, %rbp
+#endif /* CONFIG_FRAME_POINTER */
+
+       /*
+        * We add enough stack to save all regs.
+        */
+       subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
+       movq %rax, RAX(%rsp)
+       movq %rcx, RCX(%rsp)
+       movq %rdx, RDX(%rsp)
+       movq %rsi, RSI(%rsp)
+       movq %rdi, RDI(%rsp)
+       movq %r8, R8(%rsp)
+       movq %r9, R9(%rsp)
+       /*
+        * Save the original RBP. Even though the mcount ABI does not
+        * require this, it helps out callers.
+        */
+       movq MCOUNT_REG_SIZE-8(%rsp), %rdx
+       movq %rdx, RBP(%rsp)
+
+       /* Copy the parent address into %rsi (second parameter) */
+#ifdef CC_USING_FENTRY
+       movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
+#else
+       /* %rdx contains original %rbp */
+       movq 8(%rdx), %rsi
+#endif
+
+        /* Move RIP to its proper location */
+       movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
+       movq %rdi, RIP(%rsp)
+
+       /*
+        * Now %rdi (the first parameter) has the return address of
+        * where ftrace_call returns. But the callbacks expect the
+        * address of the call itself.
+        */
+       subq $MCOUNT_INSN_SIZE, %rdi
+       .endm
+
+.macro restore_mcount_regs
+       movq R9(%rsp), %r9
+       movq R8(%rsp), %r8
+       movq RDI(%rsp), %rdi
+       movq RSI(%rsp), %rsi
+       movq RDX(%rsp), %rdx
+       movq RCX(%rsp), %rcx
+       movq RAX(%rsp), %rax
+
+       /* ftrace_regs_caller can modify %rbp */
+       movq RBP(%rsp), %rbp
+
+       addq $MCOUNT_REG_SIZE, %rsp
+
+       .endm
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(function_hook)
+       retq
+END(function_hook)
+
+ENTRY(ftrace_caller)
+       /* save_mcount_regs fills in first two parameters */
+       save_mcount_regs
+
+GLOBAL(ftrace_caller_op_ptr)
+       /* Load the ftrace_ops into the 3rd parameter */
+       movq function_trace_op(%rip), %rdx
+
+       /* regs go into 4th parameter (but make it NULL) */
+       movq $0, %rcx
+
+GLOBAL(ftrace_call)
+       call ftrace_stub
+
+       restore_mcount_regs
+
+       /*
+        * The copied trampoline must call ftrace_epilogue as it
+        * still may need to call the function graph tracer.
+        *
+        * The code up to this label is copied into trampolines so
+        * think twice before adding any new code or changing the
+        * layout here.
+        */
+GLOBAL(ftrace_epilogue)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+GLOBAL(ftrace_graph_call)
+       jmp ftrace_stub
+#endif
+
+/* This is weak to keep gas from relaxing the jumps */
+WEAK(ftrace_stub)
+       retq
+END(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+       /* Save the current flags before any operations that can change them */
+       pushfq
+
+       /* added 8 bytes to save flags */
+       save_mcount_regs 8
+       /* save_mcount_regs fills in first two parameters */
+
+GLOBAL(ftrace_regs_caller_op_ptr)
+       /* Load the ftrace_ops into the 3rd parameter */
+       movq function_trace_op(%rip), %rdx
+
+       /* Save the rest of pt_regs */
+       movq %r15, R15(%rsp)
+       movq %r14, R14(%rsp)
+       movq %r13, R13(%rsp)
+       movq %r12, R12(%rsp)
+       movq %r11, R11(%rsp)
+       movq %r10, R10(%rsp)
+       movq %rbx, RBX(%rsp)
+       /* Copy saved flags */
+       movq MCOUNT_REG_SIZE(%rsp), %rcx
+       movq %rcx, EFLAGS(%rsp)
+       /* Kernel segments */
+       movq $__KERNEL_DS, %rcx
+       movq %rcx, SS(%rsp)
+       movq $__KERNEL_CS, %rcx
+       movq %rcx, CS(%rsp)
+       /* Stack - skipping return address and flags */
+       leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
+       movq %rcx, RSP(%rsp)
+
+       /* regs go into 4th parameter */
+       leaq (%rsp), %rcx
+
+GLOBAL(ftrace_regs_call)
+       call ftrace_stub
+
+       /* Copy flags back to SS, to restore them */
+       movq EFLAGS(%rsp), %rax
+       movq %rax, MCOUNT_REG_SIZE(%rsp)
+
+       /* Handlers can change the RIP */
+       movq RIP(%rsp), %rax
+       movq %rax, MCOUNT_REG_SIZE+8(%rsp)
+
+       /* restore the rest of pt_regs */
+       movq R15(%rsp), %r15
+       movq R14(%rsp), %r14
+       movq R13(%rsp), %r13
+       movq R12(%rsp), %r12
+       movq R10(%rsp), %r10
+       movq RBX(%rsp), %rbx
+
+       restore_mcount_regs
+
+       /* Restore flags */
+       popfq
+
+       /*
+        * As this jmp to ftrace_epilogue can be a short jump
+        * it must not be copied into the trampoline.
+        * The trampoline will add the code to jump
+        * to the return.
+        */
+GLOBAL(ftrace_regs_caller_end)
+
+       jmp ftrace_epilogue
+
+END(ftrace_regs_caller)
+
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(function_hook)
+       cmpq $ftrace_stub, ftrace_trace_function
+       jnz trace
+
+fgraph_trace:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpq $ftrace_stub, ftrace_graph_return
+       jnz ftrace_graph_caller
+
+       cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
+       jnz ftrace_graph_caller
+#endif
+
+GLOBAL(ftrace_stub)
+       retq
+
+trace:
+       /* save_mcount_regs fills in first two parameters */
+       save_mcount_regs
+
+       /*
+        * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
+        * set (see include/asm/ftrace.h and include/linux/ftrace.h).  Only the
+        * ip and parent ip are used and the list function is called when
+        * function tracing is enabled.
+        */
+       call   *ftrace_trace_function
+
+       restore_mcount_regs
+
+       jmp fgraph_trace
+END(function_hook)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       /* Saves rbp into %rdx and fills first parameter  */
+       save_mcount_regs
+
+#ifdef CC_USING_FENTRY
+       leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
+       movq $0, %rdx   /* No framepointers needed */
+#else
+       /* Save address of the return address of traced function */
+       leaq 8(%rdx), %rsi
+       /* ftrace does sanity checks against frame pointers */
+       movq (%rdx), %rdx
+#endif
+       call    prepare_ftrace_return
+
+       restore_mcount_regs
+
+       retq
+END(ftrace_graph_caller)
+
+GLOBAL(return_to_handler)
+       subq  $24, %rsp
+
+       /* Save the return values */
+       movq %rax, (%rsp)
+       movq %rdx, 8(%rsp)
+       movq %rbp, %rdi
+
+       call ftrace_return_to_handler
+
+       movq %rax, %rdi
+       movq 8(%rsp), %rdx
+       movq (%rsp), %rax
+       addq $24, %rsp
+       jmp *%rdi
+#endif
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
deleted file mode 100644 (file)
index 7b0d3da..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- *  linux/arch/x86_64/mcount_64.S
- *
- *  Copyright (C) 2014  Steven Rostedt, Red Hat Inc
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/ftrace.h>
-#include <asm/export.h>
-
-
-       .code64
-       .section .entry.text, "ax"
-
-
-#ifdef CONFIG_FUNCTION_TRACER
-
-#ifdef CC_USING_FENTRY
-# define function_hook __fentry__
-EXPORT_SYMBOL(__fentry__)
-#else
-# define function_hook mcount
-EXPORT_SYMBOL(mcount)
-#endif
-
-/* All cases save the original rbp (8 bytes) */
-#ifdef CONFIG_FRAME_POINTER
-# ifdef CC_USING_FENTRY
-/* Save parent and function stack frames (rip and rbp) */
-#  define MCOUNT_FRAME_SIZE    (8+16*2)
-# else
-/* Save just function stack frame (rip and rbp) */
-#  define MCOUNT_FRAME_SIZE    (8+16)
-# endif
-#else
-/* No need to save a stack frame */
-# define MCOUNT_FRAME_SIZE     8
-#endif /* CONFIG_FRAME_POINTER */
-
-/* Size of stack used to save mcount regs in save_mcount_regs */
-#define MCOUNT_REG_SIZE                (SS+8 + MCOUNT_FRAME_SIZE)
-
-/*
- * gcc -pg option adds a call to 'mcount' in most functions.
- * When -mfentry is used, the call is to 'fentry' and not 'mcount'
- * and is done before the function's stack frame is set up.
- * They both require a set of regs to be saved before calling
- * any C code and restored before returning back to the function.
- *
- * On boot up, all these calls are converted into nops. When tracing
- * is enabled, the call can jump to either ftrace_caller or
- * ftrace_regs_caller. Callbacks (tracing functions) that require
- * ftrace_regs_caller (like kprobes) need to have pt_regs passed to
- * it. For this reason, the size of the pt_regs structure will be
- * allocated on the stack and the required mcount registers will
- * be saved in the locations that pt_regs has them in.
- */
-
-/*
- * @added: the amount of stack added before calling this
- *
- * After this is called, the following registers contain:
- *
- *  %rdi - holds the address that called the trampoline
- *  %rsi - holds the parent function (traced function's return address)
- *  %rdx - holds the original %rbp
- */
-.macro save_mcount_regs added=0
-
-       /* Always save the original rbp */
-       pushq %rbp
-
-#ifdef CONFIG_FRAME_POINTER
-       /*
-        * Stack traces will stop at the ftrace trampoline if the frame pointer
-        * is not set up properly. If fentry is used, we need to save a frame
-        * pointer for the parent as well as the function traced, because the
-        * fentry is called before the stack frame is set up, where as mcount
-        * is called afterward.
-        */
-#ifdef CC_USING_FENTRY
-       /* Save the parent pointer (skip orig rbp and our return address) */
-       pushq \added+8*2(%rsp)
-       pushq %rbp
-       movq %rsp, %rbp
-       /* Save the return address (now skip orig rbp, rbp and parent) */
-       pushq \added+8*3(%rsp)
-#else
-       /* Can't assume that rip is before this (unless added was zero) */
-       pushq \added+8(%rsp)
-#endif
-       pushq %rbp
-       movq %rsp, %rbp
-#endif /* CONFIG_FRAME_POINTER */
-
-       /*
-        * We add enough stack to save all regs.
-        */
-       subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
-       movq %rax, RAX(%rsp)
-       movq %rcx, RCX(%rsp)
-       movq %rdx, RDX(%rsp)
-       movq %rsi, RSI(%rsp)
-       movq %rdi, RDI(%rsp)
-       movq %r8, R8(%rsp)
-       movq %r9, R9(%rsp)
-       /*
-        * Save the original RBP. Even though the mcount ABI does not
-        * require this, it helps out callers.
-        */
-       movq MCOUNT_REG_SIZE-8(%rsp), %rdx
-       movq %rdx, RBP(%rsp)
-
-       /* Copy the parent address into %rsi (second parameter) */
-#ifdef CC_USING_FENTRY
-       movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
-#else
-       /* %rdx contains original %rbp */
-       movq 8(%rdx), %rsi
-#endif
-
-        /* Move RIP to its proper location */
-       movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
-       movq %rdi, RIP(%rsp)
-
-       /*
-        * Now %rdi (the first parameter) has the return address of
-        * where ftrace_call returns. But the callbacks expect the
-        * address of the call itself.
-        */
-       subq $MCOUNT_INSN_SIZE, %rdi
-       .endm
-
-.macro restore_mcount_regs
-       movq R9(%rsp), %r9
-       movq R8(%rsp), %r8
-       movq RDI(%rsp), %rdi
-       movq RSI(%rsp), %rsi
-       movq RDX(%rsp), %rdx
-       movq RCX(%rsp), %rcx
-       movq RAX(%rsp), %rax
-
-       /* ftrace_regs_caller can modify %rbp */
-       movq RBP(%rsp), %rbp
-
-       addq $MCOUNT_REG_SIZE, %rsp
-
-       .endm
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(function_hook)
-       retq
-END(function_hook)
-
-ENTRY(ftrace_caller)
-       /* save_mcount_regs fills in first two parameters */
-       save_mcount_regs
-
-GLOBAL(ftrace_caller_op_ptr)
-       /* Load the ftrace_ops into the 3rd parameter */
-       movq function_trace_op(%rip), %rdx
-
-       /* regs go into 4th parameter (but make it NULL) */
-       movq $0, %rcx
-
-GLOBAL(ftrace_call)
-       call ftrace_stub
-
-       restore_mcount_regs
-
-       /*
-        * The copied trampoline must call ftrace_epilogue as it
-        * still may need to call the function graph tracer.
-        *
-        * The code up to this label is copied into trampolines so
-        * think twice before adding any new code or changing the
-        * layout here.
-        */
-GLOBAL(ftrace_epilogue)
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-GLOBAL(ftrace_graph_call)
-       jmp ftrace_stub
-#endif
-
-/* This is weak to keep gas from relaxing the jumps */
-WEAK(ftrace_stub)
-       retq
-END(ftrace_caller)
-
-ENTRY(ftrace_regs_caller)
-       /* Save the current flags before any operations that can change them */
-       pushfq
-
-       /* added 8 bytes to save flags */
-       save_mcount_regs 8
-       /* save_mcount_regs fills in first two parameters */
-
-GLOBAL(ftrace_regs_caller_op_ptr)
-       /* Load the ftrace_ops into the 3rd parameter */
-       movq function_trace_op(%rip), %rdx
-
-       /* Save the rest of pt_regs */
-       movq %r15, R15(%rsp)
-       movq %r14, R14(%rsp)
-       movq %r13, R13(%rsp)
-       movq %r12, R12(%rsp)
-       movq %r11, R11(%rsp)
-       movq %r10, R10(%rsp)
-       movq %rbx, RBX(%rsp)
-       /* Copy saved flags */
-       movq MCOUNT_REG_SIZE(%rsp), %rcx
-       movq %rcx, EFLAGS(%rsp)
-       /* Kernel segments */
-       movq $__KERNEL_DS, %rcx
-       movq %rcx, SS(%rsp)
-       movq $__KERNEL_CS, %rcx
-       movq %rcx, CS(%rsp)
-       /* Stack - skipping return address and flags */
-       leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
-       movq %rcx, RSP(%rsp)
-
-       /* regs go into 4th parameter */
-       leaq (%rsp), %rcx
-
-GLOBAL(ftrace_regs_call)
-       call ftrace_stub
-
-       /* Copy flags back to SS, to restore them */
-       movq EFLAGS(%rsp), %rax
-       movq %rax, MCOUNT_REG_SIZE(%rsp)
-
-       /* Handlers can change the RIP */
-       movq RIP(%rsp), %rax
-       movq %rax, MCOUNT_REG_SIZE+8(%rsp)
-
-       /* restore the rest of pt_regs */
-       movq R15(%rsp), %r15
-       movq R14(%rsp), %r14
-       movq R13(%rsp), %r13
-       movq R12(%rsp), %r12
-       movq R10(%rsp), %r10
-       movq RBX(%rsp), %rbx
-
-       restore_mcount_regs
-
-       /* Restore flags */
-       popfq
-
-       /*
-        * As this jmp to ftrace_epilogue can be a short jump
-        * it must not be copied into the trampoline.
-        * The trampoline will add the code to jump
-        * to the return.
-        */
-GLOBAL(ftrace_regs_caller_end)
-
-       jmp ftrace_epilogue
-
-END(ftrace_regs_caller)
-
-
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(function_hook)
-       cmpq $ftrace_stub, ftrace_trace_function
-       jnz trace
-
-fgraph_trace:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       cmpq $ftrace_stub, ftrace_graph_return
-       jnz ftrace_graph_caller
-
-       cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
-       jnz ftrace_graph_caller
-#endif
-
-GLOBAL(ftrace_stub)
-       retq
-
-trace:
-       /* save_mcount_regs fills in first two parameters */
-       save_mcount_regs
-
-       /*
-        * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
-        * set (see include/asm/ftrace.h and include/linux/ftrace.h).  Only the
-        * ip and parent ip are used and the list function is called when
-        * function tracing is enabled.
-        */
-       call   *ftrace_trace_function
-
-       restore_mcount_regs
-
-       jmp fgraph_trace
-END(function_hook)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-       /* Saves rbp into %rdx and fills first parameter  */
-       save_mcount_regs
-
-#ifdef CC_USING_FENTRY
-       leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
-       movq $0, %rdx   /* No framepointers needed */
-#else
-       /* Save address of the return address of traced function */
-       leaq 8(%rdx), %rsi
-       /* ftrace does sanity checks against frame pointers */
-       movq (%rdx), %rdx
-#endif
-       call    prepare_ftrace_return
-
-       restore_mcount_regs
-
-       retq
-END(ftrace_graph_caller)
-
-GLOBAL(return_to_handler)
-       subq  $24, %rsp
-
-       /* Save the return values */
-       movq %rax, (%rsp)
-       movq %rdx, 8(%rsp)
-       movq %rbp, %rdi
-
-       call ftrace_return_to_handler
-
-       movq %rax, %rdi
-       movq 8(%rsp), %rdx
-       movq (%rsp), %rax
-       addq $24, %rsp
-       jmp *%rdi
-#endif
index 4e496379a871687281ddc0c69c0e10d7ec036f09..3995d3a777d49c569e3ba79e3eca5d2206ab1b0a 100644 (file)
@@ -169,6 +169,37 @@ void ist_end_non_atomic(void)
        preempt_disable();
 }
 
+int is_valid_bugaddr(unsigned long addr)
+{
+       unsigned short ud;
+
+       if (addr < TASK_SIZE_MAX)
+               return 0;
+
+       if (probe_kernel_address((unsigned short *)addr, ud))
+               return 0;
+
+       return ud == INSN_UD0 || ud == INSN_UD2;
+}
+
+static int fixup_bug(struct pt_regs *regs, int trapnr)
+{
+       if (trapnr != X86_TRAP_UD)
+               return 0;
+
+       switch (report_bug(regs->ip, regs)) {
+       case BUG_TRAP_TYPE_NONE:
+       case BUG_TRAP_TYPE_BUG:
+               break;
+
+       case BUG_TRAP_TYPE_WARN:
+               regs->ip += LEN_UD0;
+               return 1;
+       }
+
+       return 0;
+}
+
 static nokprobe_inline int
 do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
                  struct pt_regs *regs, long error_code)
@@ -187,12 +218,15 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
        }
 
        if (!user_mode(regs)) {
-               if (!fixup_exception(regs, trapnr)) {
-                       tsk->thread.error_code = error_code;
-                       tsk->thread.trap_nr = trapnr;
-                       die(str, regs, error_code);
-               }
-               return 0;
+               if (fixup_exception(regs, trapnr))
+                       return 0;
+
+               if (fixup_bug(regs, trapnr))
+                       return 0;
+
+               tsk->thread.error_code = error_code;
+               tsk->thread.trap_nr = trapnr;
+               die(str, regs, error_code);
        }
 
        return -1;
index 08339262b666e56f2623406a10c42f3184c83e29..fec70fe3b1ec637920c758a34a17e2f6cecbb082 100644 (file)
@@ -1,6 +1,8 @@
 #include <linux/sched.h>
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
+#include <linux/interrupt.h>
+#include <asm/sections.h>
 #include <asm/ptrace.h>
 #include <asm/bitops.h>
 #include <asm/stacktrace.h>
        val;                                            \
 })
 
-static void unwind_dump(struct unwind_state *state, unsigned long *sp)
+static void unwind_dump(struct unwind_state *state)
 {
        static bool dumped_before = false;
        bool prev_zero, zero = false;
-       unsigned long word;
+       unsigned long word, *sp;
+       struct stack_info stack_info = {0};
+       unsigned long visit_mask = 0;
 
        if (dumped_before)
                return;
 
        dumped_before = true;
 
-       printk_deferred("unwind stack type:%d next_sp:%p mask:%lx graph_idx:%d\n",
+       printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
                        state->stack_info.type, state->stack_info.next_sp,
                        state->stack_mask, state->graph_idx);
 
-       for (sp = state->orig_sp; sp < state->stack_info.end; sp++) {
-               word = READ_ONCE_NOCHECK(*sp);
+       for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+               if (get_stack_info(sp, state->task, &stack_info, &visit_mask))
+                       break;
 
-               prev_zero = zero;
-               zero = word == 0;
+               for (; sp < stack_info.end; sp++) {
 
-               if (zero) {
-                       if (!prev_zero)
-                               printk_deferred("%p: %016x ...\n", sp, 0);
-                       continue;
-               }
+                       word = READ_ONCE_NOCHECK(*sp);
+
+                       prev_zero = zero;
+                       zero = word == 0;
+
+                       if (zero) {
+                               if (!prev_zero)
+                                       printk_deferred("%p: %0*x ...\n",
+                                                       sp, BITS_PER_LONG/4, 0);
+                               continue;
+                       }
 
-               printk_deferred("%p: %016lx (%pB)\n", sp, word, (void *)word);
+                       printk_deferred("%p: %0*lx (%pB)\n",
+                                       sp, BITS_PER_LONG/4, word, (void *)word);
+               }
        }
 }
 
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
-       unsigned long addr;
-       unsigned long *addr_p = unwind_get_return_address_ptr(state);
-
        if (unwind_done(state))
                return 0;
 
-       if (state->regs && user_mode(state->regs))
-               return 0;
-
-       addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
-       addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
-                                    addr_p);
-
-       return __kernel_text_address(addr) ? addr : 0;
+       return __kernel_text_address(state->ip) ? state->ip : 0;
 }
 EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
@@ -82,16 +84,41 @@ static size_t regs_size(struct pt_regs *regs)
        return sizeof(*regs);
 }
 
+static bool in_entry_code(unsigned long ip)
+{
+       char *addr = (char *)ip;
+
+       if (addr >= __entry_text_start && addr < __entry_text_end)
+               return true;
+
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
+       if (addr >= __irqentry_text_start && addr < __irqentry_text_end)
+               return true;
+#endif
+
+       return false;
+}
+
+static inline unsigned long *last_frame(struct unwind_state *state)
+{
+       return (unsigned long *)task_pt_regs(state->task) - 2;
+}
+
 #ifdef CONFIG_X86_32
 #define GCC_REALIGN_WORDS 3
 #else
 #define GCC_REALIGN_WORDS 1
 #endif
 
+static inline unsigned long *last_aligned_frame(struct unwind_state *state)
+{
+       return last_frame(state) - GCC_REALIGN_WORDS;
+}
+
 static bool is_last_task_frame(struct unwind_state *state)
 {
-       unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
-       unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
+       unsigned long *last_bp = last_frame(state);
+       unsigned long *aligned_bp = last_aligned_frame(state);
 
        /*
         * We have to check for the last task frame at two different locations
@@ -135,26 +162,70 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp)
        return (struct pt_regs *)(regs & ~0x1);
 }
 
-static bool update_stack_state(struct unwind_state *state, void *addr,
-                              size_t len)
+static bool update_stack_state(struct unwind_state *state,
+                              unsigned long *next_bp)
 {
        struct stack_info *info = &state->stack_info;
-       enum stack_type orig_type = info->type;
+       enum stack_type prev_type = info->type;
+       struct pt_regs *regs;
+       unsigned long *frame, *prev_frame_end, *addr_p, addr;
+       size_t len;
+
+       if (state->regs)
+               prev_frame_end = (void *)state->regs + regs_size(state->regs);
+       else
+               prev_frame_end = (void *)state->bp + FRAME_HEADER_SIZE;
+
+       /* Is the next frame pointer an encoded pointer to pt_regs? */
+       regs = decode_frame_pointer(next_bp);
+       if (regs) {
+               frame = (unsigned long *)regs;
+               len = regs_size(regs);
+               state->got_irq = true;
+       } else {
+               frame = next_bp;
+               len = FRAME_HEADER_SIZE;
+       }
 
        /*
-        * If addr isn't on the current stack, switch to the next one.
+        * If the next bp isn't on the current stack, switch to the next one.
         *
         * We may have to traverse multiple stacks to deal with the possibility
-        * that 'info->next_sp' could point to an empty stack and 'addr' could
-        * be on a subsequent stack.
+        * that info->next_sp could point to an empty stack and the next bp
+        * could be on a subsequent stack.
         */
-       while (!on_stack(info, addr, len))
+       while (!on_stack(info, frame, len))
                if (get_stack_info(info->next_sp, state->task, info,
                                   &state->stack_mask))
                        return false;
 
-       if (!state->orig_sp || info->type != orig_type)
-               state->orig_sp = addr;
+       /* Make sure it only unwinds up and doesn't overlap the prev frame: */
+       if (state->orig_sp && state->stack_info.type == prev_type &&
+           frame < prev_frame_end)
+               return false;
+
+       /* Move state to the next frame: */
+       if (regs) {
+               state->regs = regs;
+               state->bp = NULL;
+       } else {
+               state->bp = next_bp;
+               state->regs = NULL;
+       }
+
+       /* Save the return address: */
+       if (state->regs && user_mode(state->regs))
+               state->ip = 0;
+       else {
+               addr_p = unwind_get_return_address_ptr(state);
+               addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+               state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
+                                                 addr, addr_p);
+       }
+
+       /* Save the original stack pointer for unwind_dump(): */
+       if (!state->orig_sp)
+               state->orig_sp = frame;
 
        return true;
 }
@@ -162,14 +233,12 @@ static bool update_stack_state(struct unwind_state *state, void *addr,
 bool unwind_next_frame(struct unwind_state *state)
 {
        struct pt_regs *regs;
-       unsigned long *next_bp, *next_frame;
-       size_t next_len;
-       enum stack_type prev_type = state->stack_info.type;
+       unsigned long *next_bp;
 
        if (unwind_done(state))
                return false;
 
-       /* have we reached the end? */
+       /* Have we reached the end? */
        if (state->regs && user_mode(state->regs))
                goto the_end;
 
@@ -197,54 +266,19 @@ bool unwind_next_frame(struct unwind_state *state)
                 */
                state->regs = regs;
                state->bp = NULL;
+               state->ip = 0;
                return true;
        }
 
-       /* get the next frame pointer */
+       /* Get the next frame pointer: */
        if (state->regs)
                next_bp = (unsigned long *)state->regs->bp;
        else
-               next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
-
-       /* is the next frame pointer an encoded pointer to pt_regs? */
-       regs = decode_frame_pointer(next_bp);
-       if (regs) {
-               next_frame = (unsigned long *)regs;
-               next_len = sizeof(*regs);
-       } else {
-               next_frame = next_bp;
-               next_len = FRAME_HEADER_SIZE;
-       }
-
-       /* make sure the next frame's data is accessible */
-       if (!update_stack_state(state, next_frame, next_len)) {
-               /*
-                * Don't warn on bad regs->bp.  An interrupt in entry code
-                * might cause a false positive warning.
-                */
-               if (state->regs)
-                       goto the_end;
+               next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task, *state->bp);
 
+       /* Move to the next frame if it's safe: */
+       if (!update_stack_state(state, next_bp))
                goto bad_address;
-       }
-
-       /* Make sure it only unwinds up and doesn't overlap the last frame: */
-       if (state->stack_info.type == prev_type) {
-               if (state->regs && (void *)next_frame < (void *)state->regs + regs_size(state->regs))
-                       goto bad_address;
-
-               if (state->bp && (void *)next_frame < (void *)state->bp + FRAME_HEADER_SIZE)
-                       goto bad_address;
-       }
-
-       /* move to the next frame */
-       if (regs) {
-               state->regs = regs;
-               state->bp = NULL;
-       } else {
-               state->bp = next_bp;
-               state->regs = NULL;
-       }
 
        return true;
 
@@ -259,18 +293,29 @@ bad_address:
        if (state->task != current)
                goto the_end;
 
+       /*
+        * Don't warn if the unwinder got lost due to an interrupt in entry
+        * code or in the C handler before the first frame pointer got set up:
+        */
+       if (state->got_irq && in_entry_code(state->ip))
+               goto the_end;
+       if (state->regs &&
+           state->regs->sp >= (unsigned long)last_aligned_frame(state) &&
+           state->regs->sp < (unsigned long)task_pt_regs(state->task))
+               goto the_end;
+
        if (state->regs) {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
                        state->regs, state->task->comm,
-                       state->task->pid, next_frame);
-               unwind_dump(state, (unsigned long *)state->regs);
+                       state->task->pid, next_bp);
+               unwind_dump(state);
        } else {
                printk_deferred_once(KERN_WARNING
                        "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
                        state->bp, state->task->comm,
-                       state->task->pid, next_frame);
-               unwind_dump(state, state->bp);
+                       state->task->pid, next_bp);
+               unwind_dump(state);
        }
 the_end:
        state->stack_info.type = STACK_TYPE_UNKNOWN;
@@ -281,35 +326,24 @@ EXPORT_SYMBOL_GPL(unwind_next_frame);
 void __unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs, unsigned long *first_frame)
 {
-       unsigned long *bp, *frame;
-       size_t len;
+       unsigned long *bp;
 
        memset(state, 0, sizeof(*state));
        state->task = task;
+       state->got_irq = (regs);
 
-       /* don't even attempt to start from user mode regs */
+       /* Don't even attempt to start from user mode regs: */
        if (regs && user_mode(regs)) {
                state->stack_info.type = STACK_TYPE_UNKNOWN;
                return;
        }
 
-       /* set up the starting stack frame */
        bp = get_frame_pointer(task, regs);
-       regs = decode_frame_pointer(bp);
-       if (regs) {
-               state->regs = regs;
-               frame = (unsigned long *)regs;
-               len = sizeof(*regs);
-       } else {
-               state->bp = bp;
-               frame = bp;
-               len = FRAME_HEADER_SIZE;
-       }
 
-       /* initialize stack info and make sure the frame data is accessible */
-       get_stack_info(frame, state->task, &state->stack_info,
+       /* Initialize stack info and make sure the frame data is accessible: */
+       get_stack_info(bp, state->task, &state->stack_info,
                       &state->stack_mask);
-       update_stack_state(state, frame, len);
+       update_stack_state(state, bp);
 
        /*
         * The caller can provide the address of the first frame directly
index 22881ddcbb9fc0c0a23ee7faf0578727b09a23f6..039f36738e4905564c2b38dc7179488368a14da8 100644 (file)
@@ -34,7 +34,7 @@ bool unwind_next_frame(struct unwind_state *state)
                                return true;
                }
 
-               state->sp = info->next_sp;
+               state->sp = PTR_ALIGN(info->next_sp, sizeof(long));
 
        } while (!get_stack_info(state->sp, state->task, info,
                                 &state->stack_mask));
@@ -49,7 +49,7 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        memset(state, 0, sizeof(*state));
 
        state->task = task;
-       state->sp   = first_frame;
+       state->sp   = PTR_ALIGN(first_frame, sizeof(long));
 
        get_stack_info(first_frame, state->task, &state->stack_info,
                       &state->stack_mask);
index c74ae9ce8dc40307e870b0dbabd2031cf5c116a4..c8a3b61be0aa0b48605396a4dd24af233e3475a4 100644 (file)
@@ -146,6 +146,7 @@ SECTIONS
                _edata = .;
        } :data
 
+       BUG_TABLE
 
        . = ALIGN(PAGE_SIZE);
        __vvar_page = .;
index 5e2af3a88cf5e47e2505926938d3f239d55f2ff3..81b1635d67dee9624de37e1f4577a36e9b7065cb 100644 (file)
  * Zero a page.
  * %rdi        - page
  */
-ENTRY(clear_page)
-
-       ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
-                     "jmp clear_page_c_e", X86_FEATURE_ERMS
-
+ENTRY(clear_page_rep)
        movl $4096/8,%ecx
        xorl %eax,%eax
        rep stosq
        ret
-ENDPROC(clear_page)
-EXPORT_SYMBOL(clear_page)
+ENDPROC(clear_page_rep)
+EXPORT_SYMBOL_GPL(clear_page_rep)
 
 ENTRY(clear_page_orig)
-
        xorl   %eax,%eax
        movl   $4096/64,%ecx
        .p2align 4
@@ -47,10 +42,12 @@ ENTRY(clear_page_orig)
        nop
        ret
 ENDPROC(clear_page_orig)
+EXPORT_SYMBOL_GPL(clear_page_orig)
 
-ENTRY(clear_page_c_e)
+ENTRY(clear_page_erms)
        movl $4096,%ecx
        xorl %eax,%eax
        rep stosb
        ret
-ENDPROC(clear_page_c_e)
+ENDPROC(clear_page_erms)
+EXPORT_SYMBOL_GPL(clear_page_erms)
index 69f0827d5f5391e1ad3b4ff970aa63504c17b434..46cbbfe03285922a520c40c78061853d55a2de1a 100644 (file)
@@ -8,7 +8,7 @@ else
        BITS := 64
 endif
 
-obj-y = bug.o bugs_$(BITS).o delay.o fault.o ldt.o \
+obj-y = bugs_$(BITS).o delay.o fault.o ldt.o \
        ptrace_$(BITS).o ptrace_user.o setjmp_$(BITS).o signal.o \
        stub_$(BITS).o stub_segv.o \
        sys_call_table_$(BITS).o sysrq_$(BITS).o tls_$(BITS).o \
diff --git a/arch/x86/um/bug.c b/arch/x86/um/bug.c
deleted file mode 100644 (file)
index e8034e3..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 2006 Jeff Dike (jdike@addtoit.com)
- * Licensed under the GPL V2
- */
-
-#include <linux/uaccess.h>
-
-/*
- * Mostly copied from i386/x86_86 - eliminated the eip < PAGE_OFFSET because
- * that's not relevant in skas mode.
- */
-
-int is_valid_bugaddr(unsigned long eip)
-{
-       unsigned short ud2;
-
-       if (probe_kernel_address((unsigned short __user *)eip, ud2))
-               return 0;
-
-       return ud2 == 0x0b0f;
-}
index 6f96247226a4d2b9c1e9aa56be423ffa0c4d49df..d6f4aed479a12b0da4ed81c3c905183a08e55e39 100644 (file)
@@ -5,7 +5,9 @@
 
 #ifdef CONFIG_GENERIC_BUG
 #define BUGFLAG_WARNING                (1 << 0)
-#define BUGFLAG_TAINT(taint)   (BUGFLAG_WARNING | ((taint) << 8))
+#define BUGFLAG_ONCE           (1 << 1)
+#define BUGFLAG_DONE           (1 << 2)
+#define BUGFLAG_TAINT(taint)   ((taint) << 8)
 #define BUG_GET_TAINT(bug)     ((bug)->flags >> 8)
 #endif
 
@@ -55,6 +57,18 @@ struct bug_entry {
 #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
 #endif
 
+#ifdef __WARN_FLAGS
+#define __WARN_TAINT(taint)            __WARN_FLAGS(BUGFLAG_TAINT(taint))
+#define __WARN_ONCE_TAINT(taint)       __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
+
+#define WARN_ON_ONCE(condition) ({                             \
+       int __ret_warn_on = !!(condition);                      \
+       if (unlikely(__ret_warn_on))                            \
+               __WARN_ONCE_TAINT(TAINT_WARN);                  \
+       unlikely(__ret_warn_on);                                \
+})
+#endif
+
 /*
  * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
  * significant issues that need prompt attention if they should ever
@@ -97,7 +111,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 #endif
 
 #ifndef WARN
-#define WARN(condition, format...) ({                                          \
+#define WARN(condition, format...) ({                                  \
        int __ret_warn_on = !!(condition);                              \
        if (unlikely(__ret_warn_on))                                    \
                __WARN_printf(format);                                  \
@@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
        unlikely(__ret_warn_on);                                        \
 })
 
+#ifndef WARN_ON_ONCE
 #define WARN_ON_ONCE(condition)        ({                              \
        static bool __section(.data.unlikely) __warned;         \
        int __ret_warn_once = !!(condition);                    \
@@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
        }                                                       \
        unlikely(__ret_warn_once);                              \
 })
+#endif
 
 #define WARN_ONCE(condition, format...)        ({                      \
        static bool __section(.data.unlikely) __warned;         \
index 143db9c523e25f38488bd43302b82f316e3124a9..3558f4eb1a865792601882f67a6e35211b803278 100644 (file)
                *(.rodata1)                                             \
        }                                                               \
                                                                        \
-       BUG_TABLE                                                       \
-                                                                       \
        /* PCI quirks */                                                \
        .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
                VMLINUX_SYMBOL(__start_pci_fixups_early) = .;           \
                READ_MOSTLY_DATA(cacheline)                             \
                DATA_DATA                                               \
                CONSTRUCTORS                                            \
-       }
+       }                                                               \
+       BUG_TABLE
 
 #define INIT_TEXT_SECTION(inittext_align)                              \
        . = ALIGN(inittext_align);                                      \
index 5828489309bbd22a255f11064418dc3a6b9366de..687b557fc5eb9fca13f4dadd3643eb769886da67 100644 (file)
@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
        return bug->flags & BUGFLAG_WARNING;
 }
 
-const struct bug_entry *find_bug(unsigned long bugaddr);
+struct bug_entry *find_bug(unsigned long bugaddr);
 
 enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
 
index 06edbbef062322f12662f95d726b2d83856ac62c..a6a1137d06db75f94d005a9e646e061847be1c93 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -47,7 +47,7 @@
 #include <linux/sched.h>
 #include <linux/rculist.h>
 
-extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
+extern struct bug_entry __start___bug_table[], __stop___bug_table[];
 
 static inline unsigned long bug_addr(const struct bug_entry *bug)
 {
@@ -62,10 +62,10 @@ static inline unsigned long bug_addr(const struct bug_entry *bug)
 /* Updates are protected by module mutex */
 static LIST_HEAD(module_bug_list);
 
-static const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static struct bug_entry *module_find_bug(unsigned long bugaddr)
 {
        struct module *mod;
-       const struct bug_entry *bug = NULL;
+       struct bug_entry *bug = NULL;
 
        rcu_read_lock_sched();
        list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
@@ -122,15 +122,15 @@ void module_bug_cleanup(struct module *mod)
 
 #else
 
-static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
+static inline struct bug_entry *module_find_bug(unsigned long bugaddr)
 {
        return NULL;
 }
 #endif
 
-const struct bug_entry *find_bug(unsigned long bugaddr)
+struct bug_entry *find_bug(unsigned long bugaddr)
 {
-       const struct bug_entry *bug;
+       struct bug_entry *bug;
 
        for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
                if (bugaddr == bug_addr(bug))
@@ -141,9 +141,9 @@ const struct bug_entry *find_bug(unsigned long bugaddr)
 
 enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
 {
-       const struct bug_entry *bug;
+       struct bug_entry *bug;
        const char *file;
-       unsigned line, warning;
+       unsigned line, warning, once, done;
 
        if (!is_valid_bugaddr(bugaddr))
                return BUG_TRAP_TYPE_NONE;
@@ -164,6 +164,18 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
                line = bug->line;
 #endif
                warning = (bug->flags & BUGFLAG_WARNING) != 0;
+               once = (bug->flags & BUGFLAG_ONCE) != 0;
+               done = (bug->flags & BUGFLAG_DONE) != 0;
+
+               if (warning && once) {
+                       if (done)
+                               return BUG_TRAP_TYPE_WARN;
+
+                       /*
+                        * Since this is the only store, concurrency is not an issue.
+                        */
+                       bug->flags |= BUGFLAG_DONE;
+               }
        }
 
        if (warning) {