]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
x86/cpu_entry_area: Add debugstore entries to cpu_entry_area
authorThomas Gleixner <tglx@linutronix.de>
Mon, 4 Dec 2017 14:07:49 +0000 (15:07 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 23 Dec 2017 20:13:00 +0000 (21:13 +0100)
The Intel PEBS/BTS debug store is a design trainwreck as it expects virtual
addresses which must be visible in any execution context.

So it is required to make these mappings visible to user space when kernel
page table isolation is active.

Provide enough room for the buffer mappings in the cpu_entry_area so the
buffers are available in the user space visible page tables.

At the point where the kernel side entry area is populated there is no
buffer available yet, but the kernel PMD must be populated. To achieve this
set the entries for these buffers to non present.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/events/intel/ds.c
arch/x86/events/perf_event.h
arch/x86/include/asm/cpu_entry_area.h
arch/x86/include/asm/intel_ds.h [new file with mode: 0644]
arch/x86/mm/cpu_entry_area.c

index 3674a4b6f8bd0c5f12223b8f5c16067a933450df..6522f0279cb89ba73fc8f74b47f5da37bc932669 100644 (file)
@@ -8,11 +8,12 @@
 
 #include "../perf_event.h"
 
+/* Waste a full page so it can be mapped into the cpu_entry_area */
+DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
 /* The size of a BTS record in bytes: */
 #define BTS_RECORD_SIZE                24
 
-#define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
-#define PEBS_BUFFER_SIZE       (PAGE_SIZE << 4)
 #define PEBS_FIXUP_SIZE                PAGE_SIZE
 
 /*
index f7aaadf9331fb75587e74a42b041d78b2a014fc0..373f9eda80b1b1d3faca70498ed4d9e84cadff2c 100644 (file)
@@ -14,6 +14,8 @@
 
 #include <linux/perf_event.h>
 
+#include <asm/intel_ds.h>
+
 /* To enable MSR tracing please use the generic trace points. */
 
 /*
@@ -77,8 +79,6 @@ struct amd_nb {
        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
-/* The maximal number of PEBS events: */
-#define MAX_PEBS_EVENTS                8
 #define PEBS_COUNTER_MASK      ((1ULL << MAX_PEBS_EVENTS) - 1)
 
 /*
@@ -95,23 +95,6 @@ struct amd_nb {
        PERF_SAMPLE_TRANSACTION | PERF_SAMPLE_PHYS_ADDR | \
        PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
 
-/*
- * A debug store configuration.
- *
- * We only support architectures that use 64bit fields.
- */
-struct debug_store {
-       u64     bts_buffer_base;
-       u64     bts_index;
-       u64     bts_absolute_maximum;
-       u64     bts_interrupt_threshold;
-       u64     pebs_buffer_base;
-       u64     pebs_index;
-       u64     pebs_absolute_maximum;
-       u64     pebs_interrupt_threshold;
-       u64     pebs_event_reset[MAX_PEBS_EVENTS];
-};
-
 #define PEBS_REGS \
        (PERF_REG_X86_AX | \
         PERF_REG_X86_BX | \
index 2fbc69a0916edb672277ab8c9650ec3ddeb3c646..4a7884b8dca55bc58f077a90ad93d6398bddd053 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/percpu-defs.h>
 #include <asm/processor.h>
+#include <asm/intel_ds.h>
 
 /*
  * cpu_entry_area is a percpu region that contains things needed by the CPU
@@ -40,6 +41,18 @@ struct cpu_entry_area {
         */
        char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
 #endif
+#ifdef CONFIG_CPU_SUP_INTEL
+       /*
+        * Per CPU debug store for Intel performance monitoring. Wastes a
+        * full page at the moment.
+        */
+       struct debug_store cpu_debug_store;
+       /*
+        * The actual PEBS/BTS buffers must be mapped to user space
+        * Reserve enough fixmap PTEs.
+        */
+       struct debug_store_buffers cpu_debug_buffers;
+#endif
 };
 
 #define CPU_ENTRY_AREA_SIZE    (sizeof(struct cpu_entry_area))
diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
new file mode 100644 (file)
index 0000000..62a9f49
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _ASM_INTEL_DS_H
+#define _ASM_INTEL_DS_H
+
+#include <linux/percpu-defs.h>
+
+#define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
+#define PEBS_BUFFER_SIZE       (PAGE_SIZE << 4)
+
+/* The maximal number of PEBS events: */
+#define MAX_PEBS_EVENTS                8
+
+/*
+ * A debug store configuration.
+ *
+ * We only support architectures that use 64bit fields.
+ */
+struct debug_store {
+       u64     bts_buffer_base;
+       u64     bts_index;
+       u64     bts_absolute_maximum;
+       u64     bts_interrupt_threshold;
+       u64     pebs_buffer_base;
+       u64     pebs_index;
+       u64     pebs_absolute_maximum;
+       u64     pebs_interrupt_threshold;
+       u64     pebs_event_reset[MAX_PEBS_EVENTS];
+} __aligned(PAGE_SIZE);
+
+DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
+
+struct debug_store_buffers {
+       char    bts_buffer[BTS_BUFFER_SIZE];
+       char    pebs_buffer[PEBS_BUFFER_SIZE];
+};
+
+#endif
index fe814fd5e0140fbf7c4c1aacccc7993e89febbac..b9283cc276220db667ab091a3358eb5741813f7f 100644 (file)
@@ -38,6 +38,32 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
                cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
 }
 
+static void percpu_setup_debug_store(int cpu)
+{
+#ifdef CONFIG_CPU_SUP_INTEL
+       int npages;
+       void *cea;
+
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return;
+
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
+       npages = sizeof(struct debug_store) / PAGE_SIZE;
+       BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
+       cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
+                            PAGE_KERNEL);
+
+       cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
+       /*
+        * Force the population of PMDs for not yet allocated per cpu
+        * memory like debug store buffers.
+        */
+       npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
+       for (; npages; npages--, cea += PAGE_SIZE)
+               cea_set_pte(cea, 0, PAGE_NONE);
+#endif
+}
+
 /* Setup the fixmap mappings only once per-processor */
 static void __init setup_cpu_entry_area(int cpu)
 {
@@ -109,6 +135,7 @@ static void __init setup_cpu_entry_area(int cpu)
        cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
                     __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
 #endif
+       percpu_setup_debug_store(cpu);
 }
 
 static __init void setup_cpu_entry_area_ptes(void)