]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0231-UBUNTU-SAUCE-BODGE-temporarily-disable-some-kprobe-t.patch
revert buggy SCSI error handler commit
[pve-kernel.git] / patches / kernel / 0231-UBUNTU-SAUCE-BODGE-temporarily-disable-some-kprobe-t.patch
CommitLineData
321d628a
FG
1From 13df3feb2ec154e844bca62c6bb7b91ced2b067c Mon Sep 17 00:00:00 2001
2From: Colin Ian King <colin.king@canonical.com>
3Date: Sat, 6 Jan 2018 10:26:31 +0000
633c5ed1 4Subject: [PATCH 231/242] UBUNTU: SAUCE: BODGE: temporarily disable some kprobe
321d628a
FG
5 trace points which are cratering
6MIME-Version: 1.0
7Content-Type: text/plain; charset=UTF-8
8Content-Transfer-Encoding: 8bit
9
10Most of the interrupt related trace points are cratering when enabled.
11Simply turn them off temporarily while we are investigating this.
12
13CVE-2017-5754
14Based on work by Colin King <colin.king@canonical.com>
15Signed-off-by: Andy Whitcroft <apw@canonical.com>
16Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
17(cherry picked from commit 4ecc04d14ee2f9b46d3e252215a7622d7d47e974)
18Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
19---
20 arch/x86/include/asm/trace/irq_vectors.h | 2 +-
21 arch/x86/kernel/apic/apic.c | 7 -------
22 arch/x86/kernel/cpu/mcheck/mce_amd.c | 3 ---
23 arch/x86/kernel/cpu/mcheck/therm_throt.c | 3 ---
24 arch/x86/kernel/cpu/mcheck/threshold.c | 3 ---
25 arch/x86/kernel/irq.c | 3 ---
26 arch/x86/kernel/irq_work.c | 3 ---
27 arch/x86/kernel/smp.c | 7 -------
28 arch/x86/mm/fault.c | 9 ++-------
29 9 files changed, 3 insertions(+), 37 deletions(-)
30
31diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
32index 7825b4426e7e..cf529e274a14 100644
33--- a/arch/x86/include/asm/trace/irq_vectors.h
34+++ b/arch/x86/include/asm/trace/irq_vectors.h
35@@ -67,7 +67,7 @@ DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
36 * irq_work - called when entering/exiting a irq work interrupt
37 * vector handler
38 */
39-DEFINE_IRQ_VECTOR_EVENT(irq_work);
40+// DEFINE_IRQ_VECTOR_EVENT(irq_work);
41
42 /*
43 * We must dis-allow sampling irq_work_exit() because perf event sampling
44diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
45index bb63c1350524..4a018da7eca1 100644
46--- a/arch/x86/kernel/apic/apic.c
47+++ b/arch/x86/kernel/apic/apic.c
48@@ -35,7 +35,6 @@
49 #include <linux/smp.h>
50 #include <linux/mm.h>
51
52-#include <asm/trace/irq_vectors.h>
53 #include <asm/irq_remapping.h>
54 #include <asm/perf_event.h>
55 #include <asm/x86_init.h>
56@@ -1074,9 +1073,7 @@ __visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
57 * interrupt lock, which is the WrongThing (tm) to do.
58 */
59 entering_ack_irq();
60- trace_local_timer_entry(LOCAL_TIMER_VECTOR);
61 local_apic_timer_interrupt();
62- trace_local_timer_exit(LOCAL_TIMER_VECTOR);
63 exiting_irq();
64
65 set_irq_regs(old_regs);
66@@ -1967,9 +1964,7 @@ __visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
67 u8 vector = ~regs->orig_ax;
68
69 entering_irq();
70- trace_spurious_apic_entry(vector);
71 __smp_spurious_interrupt(vector);
72- trace_spurious_apic_exit(vector);
73 exiting_irq();
74 }
75
76@@ -2023,9 +2018,7 @@ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
77 __visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
78 {
79 entering_irq();
80- trace_error_apic_entry(ERROR_APIC_VECTOR);
81 __smp_error_interrupt(regs);
82- trace_error_apic_exit(ERROR_APIC_VECTOR);
83 exiting_irq();
84 }
85
86diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
87index 5ce1a5689162..c983db8ccdb8 100644
88--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
89+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
90@@ -26,7 +26,6 @@
91 #include <asm/apic.h>
92 #include <asm/mce.h>
93 #include <asm/msr.h>
94-#include <asm/trace/irq_vectors.h>
95
96 #define NR_BLOCKS 5
97 #define THRESHOLD_MAX 0xFFF
98@@ -787,9 +786,7 @@ asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
99 asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
100 {
101 entering_irq();
102- trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
103 __smp_deferred_error_interrupt();
104- trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
105 exiting_ack_irq();
106 }
107
108diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
109index f7370abd33c6..f366a622e186 100644
110--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
111+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
112@@ -28,7 +28,6 @@
113 #include <asm/apic.h>
114 #include <asm/mce.h>
115 #include <asm/msr.h>
116-#include <asm/trace/irq_vectors.h>
117
118 /* How long to wait between reporting thermal events */
119 #define CHECK_INTERVAL (300 * HZ)
120@@ -408,9 +407,7 @@ asmlinkage __visible void __irq_entry
121 smp_trace_thermal_interrupt(struct pt_regs *regs)
122 {
123 entering_irq();
124- trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
125 __smp_thermal_interrupt();
126- trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
127 exiting_ack_irq();
128 }
129
130diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
131index bb0e75eed10a..623f3e3515e0 100644
132--- a/arch/x86/kernel/cpu/mcheck/threshold.c
133+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
134@@ -7,7 +7,6 @@
135 #include <asm/irq_vectors.h>
136 #include <asm/apic.h>
137 #include <asm/mce.h>
138-#include <asm/trace/irq_vectors.h>
139
140 static void default_threshold_interrupt(void)
141 {
142@@ -33,8 +32,6 @@ asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
143 asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void)
144 {
145 entering_irq();
146- trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
147 __smp_threshold_interrupt();
148- trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
149 exiting_ack_irq();
150 }
151diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
152index a84142a910f3..792a49c3c6d9 100644
153--- a/arch/x86/kernel/irq.c
154+++ b/arch/x86/kernel/irq.c
155@@ -19,7 +19,6 @@
156 #include <asm/desc.h>
157
158 #define CREATE_TRACE_POINTS
159-#include <asm/trace/irq_vectors.h>
160
161 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
162 EXPORT_PER_CPU_SYMBOL(irq_stat);
163@@ -327,9 +326,7 @@ __visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
164 struct pt_regs *old_regs = set_irq_regs(regs);
165
166 entering_ack_irq();
167- trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
168 __smp_x86_platform_ipi();
169- trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
170 exiting_irq();
171 set_irq_regs(old_regs);
172 }
173diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
174index 275487872be2..06f12444c1b4 100644
175--- a/arch/x86/kernel/irq_work.c
176+++ b/arch/x86/kernel/irq_work.c
177@@ -8,7 +8,6 @@
178 #include <linux/irq_work.h>
179 #include <linux/hardirq.h>
180 #include <asm/apic.h>
181-#include <asm/trace/irq_vectors.h>
182 #include <linux/interrupt.h>
183
184 static inline void __smp_irq_work_interrupt(void)
185@@ -27,9 +26,7 @@ __visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
186 __visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs)
187 {
188 ipi_entering_ack_irq();
189- trace_irq_work_entry(IRQ_WORK_VECTOR);
190 __smp_irq_work_interrupt();
191- trace_irq_work_exit(IRQ_WORK_VECTOR);
192 exiting_irq();
193 }
194
195diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
196index d798c0da451c..fbf36f1731ab 100644
197--- a/arch/x86/kernel/smp.c
198+++ b/arch/x86/kernel/smp.c
199@@ -31,7 +31,6 @@
200 #include <asm/apic.h>
201 #include <asm/nmi.h>
202 #include <asm/mce.h>
203-#include <asm/trace/irq_vectors.h>
204 #include <asm/kexec.h>
205 #include <asm/virtext.h>
206
207@@ -280,9 +279,7 @@ __visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
208 * to nest.
209 */
210 ipi_entering_ack_irq();
211- trace_reschedule_entry(RESCHEDULE_VECTOR);
212 __smp_reschedule_interrupt();
213- trace_reschedule_exit(RESCHEDULE_VECTOR);
214 exiting_irq();
215 /*
216 * KVM uses this interrupt to force a cpu out of guest mode
217@@ -306,9 +303,7 @@ __visible void __irq_entry
218 smp_trace_call_function_interrupt(struct pt_regs *regs)
219 {
220 ipi_entering_ack_irq();
221- trace_call_function_entry(CALL_FUNCTION_VECTOR);
222 __smp_call_function_interrupt();
223- trace_call_function_exit(CALL_FUNCTION_VECTOR);
224 exiting_irq();
225 }
226
227@@ -330,9 +325,7 @@ __visible void __irq_entry
228 smp_trace_call_function_single_interrupt(struct pt_regs *regs)
229 {
230 ipi_entering_ack_irq();
231- trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
232 __smp_call_function_single_interrupt();
233- trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
234 exiting_irq();
235 }
236
237diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
238index d3a57e7ad311..4f6478d14d1f 100644
239--- a/arch/x86/mm/fault.c
240+++ b/arch/x86/mm/fault.c
241@@ -26,7 +26,6 @@
242 #include <asm/mmu_context.h> /* vma_pkey() */
243
244 #define CREATE_TRACE_POINTS
245-#include <asm/trace/exceptions.h>
246
247 /*
248 * Returns 0 if mmiotrace is disabled, or if the fault is not
249@@ -1471,10 +1470,6 @@ static nokprobe_inline void
250 trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
251 unsigned long error_code)
252 {
253- if (user_mode(regs))
254- trace_page_fault_user(address, regs, error_code);
255- else
256- trace_page_fault_kernel(address, regs, error_code);
257 }
258
259 /*
260@@ -1491,8 +1486,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
261 enum ctx_state prev_state;
262
263 prev_state = exception_enter();
264- if (trace_irqvectors_enabled())
265- trace_page_fault_entries(address, regs, error_code);
266+// if (trace_irqvectors_enabled())
267+// trace_page_fault_entries(address, regs, error_code);
268
269 __do_page_fault(regs, error_code, address);
270 exception_exit(prev_state);
271--
2722.14.2
273