]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0020-x86-entry-64-Add-unwind-hint-annotations.patch
add objtool build fix
[pve-kernel.git] / patches / kernel / 0020-x86-entry-64-Add-unwind-hint-annotations.patch
CommitLineData
321d628a
FG
1From 884fcb9e8befe21a962d95664b1e60377284636a Mon Sep 17 00:00:00 2001
2From: Josh Poimboeuf <jpoimboe@redhat.com>
3Date: Tue, 11 Jul 2017 10:33:44 -0500
b378f209 4Subject: [PATCH 020/233] x86/entry/64: Add unwind hint annotations
321d628a
FG
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9CVE-2017-5754
10
11Add unwind hint annotations to entry_64.S. This will enable the ORC
12unwinder to unwind through any location in the entry code including
13syscalls, interrupts, and exceptions.
14
15Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
16Cc: Andy Lutomirski <luto@kernel.org>
17Cc: Borislav Petkov <bp@alien8.de>
18Cc: Brian Gerst <brgerst@gmail.com>
19Cc: Denys Vlasenko <dvlasenk@redhat.com>
20Cc: H. Peter Anvin <hpa@zytor.com>
21Cc: Jiri Slaby <jslaby@suse.cz>
22Cc: Linus Torvalds <torvalds@linux-foundation.org>
23Cc: Mike Galbraith <efault@gmx.de>
24Cc: Peter Zijlstra <peterz@infradead.org>
25Cc: Thomas Gleixner <tglx@linutronix.de>
26Cc: live-patching@vger.kernel.org
27Link: http://lkml.kernel.org/r/b9f6d478aadf68ba57c739dcfac34ec0dc021c4c.1499786555.git.jpoimboe@redhat.com
28Signed-off-by: Ingo Molnar <mingo@kernel.org>
29(cherry picked from commit 8c1f75587a18ca032da8f6376d1ed882d7095289)
30Signed-off-by: Andy Whitcroft <apw@canonical.com>
31Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
32(cherry picked from commit a8448e6971c1e71b22c651131d14f8be76e6d399)
33Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
34---
35 arch/x86/entry/Makefile | 1 -
36 arch/x86/entry/calling.h | 5 ++++
37 arch/x86/entry/entry_64.S | 71 ++++++++++++++++++++++++++++++++++++++++-------
38 3 files changed, 66 insertions(+), 11 deletions(-)
39
40diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
41index 9976fcecd17e..af28a8a24366 100644
42--- a/arch/x86/entry/Makefile
43+++ b/arch/x86/entry/Makefile
44@@ -2,7 +2,6 @@
45 # Makefile for the x86 low level entry code
46 #
47
48-OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
49 OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
50
51 CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
52diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
53index 05ed3d393da7..640aafebdc00 100644
54--- a/arch/x86/entry/calling.h
55+++ b/arch/x86/entry/calling.h
56@@ -1,4 +1,5 @@
57 #include <linux/jump_label.h>
58+#include <asm/unwind_hints.h>
59
60 /*
61
62@@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with
63 movq %rdx, 12*8+\offset(%rsp)
64 movq %rsi, 13*8+\offset(%rsp)
65 movq %rdi, 14*8+\offset(%rsp)
66+ UNWIND_HINT_REGS offset=\offset extra=0
67 .endm
68 .macro SAVE_C_REGS offset=0
69 SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
70@@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with
71 movq %r12, 3*8+\offset(%rsp)
72 movq %rbp, 4*8+\offset(%rsp)
73 movq %rbx, 5*8+\offset(%rsp)
74+ UNWIND_HINT_REGS offset=\offset
75 .endm
76
77 .macro RESTORE_EXTRA_REGS offset=0
78@@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with
79 movq 3*8+\offset(%rsp), %r12
80 movq 4*8+\offset(%rsp), %rbp
81 movq 5*8+\offset(%rsp), %rbx
82+ UNWIND_HINT_REGS offset=\offset extra=0
83 .endm
84
85 .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
86@@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with
87 .endif
88 movq 13*8(%rsp), %rsi
89 movq 14*8(%rsp), %rdi
90+ UNWIND_HINT_IRET_REGS offset=16*8
91 .endm
92 .macro RESTORE_C_REGS
93 RESTORE_C_REGS_HELPER 1,1,1,1,1
94diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
95index 184b70712545..64b233ab7cad 100644
96--- a/arch/x86/entry/entry_64.S
97+++ b/arch/x86/entry/entry_64.S
98@@ -36,6 +36,7 @@
99 #include <asm/smap.h>
100 #include <asm/pgtable_types.h>
101 #include <asm/export.h>
102+#include <asm/frame.h>
103 #include <linux/err.h>
104
105 .code64
106@@ -43,9 +44,10 @@
107
108 #ifdef CONFIG_PARAVIRT
109 ENTRY(native_usergs_sysret64)
110+ UNWIND_HINT_EMPTY
111 swapgs
112 sysretq
113-ENDPROC(native_usergs_sysret64)
114+END(native_usergs_sysret64)
115 #endif /* CONFIG_PARAVIRT */
116
117 .macro TRACE_IRQS_IRETQ
118@@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64)
119 */
120
121 ENTRY(entry_SYSCALL_64)
122+ UNWIND_HINT_EMPTY
123 /*
124 * Interrupts are off on entry.
125 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
126@@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
127 pushq %r10 /* pt_regs->r10 */
128 pushq %r11 /* pt_regs->r11 */
129 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
130+ UNWIND_HINT_REGS extra=0
131
132 /*
133 * If we need to do entry work or if we guess we'll need to do
134@@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath:
135 movq EFLAGS(%rsp), %r11
136 RESTORE_C_REGS_EXCEPT_RCX_R11
137 movq RSP(%rsp), %rsp
138+ UNWIND_HINT_EMPTY
139 USERGS_SYSRET64
140
141 1:
142@@ -316,6 +321,7 @@ syscall_return_via_sysret:
143 /* rcx and r11 are already restored (see code above) */
144 RESTORE_C_REGS_EXCEPT_RCX_R11
145 movq RSP(%rsp), %rsp
146+ UNWIND_HINT_EMPTY
147 USERGS_SYSRET64
148
149 opportunistic_sysret_failed:
150@@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64)
151 DISABLE_INTERRUPTS(CLBR_ANY)
152 TRACE_IRQS_OFF
153 popq %rax
154+ UNWIND_HINT_REGS extra=0
155 jmp entry_SYSCALL64_slow_path
156
157 1:
158@@ -351,6 +358,7 @@ END(stub_ptregs_64)
159
160 .macro ptregs_stub func
161 ENTRY(ptregs_\func)
162+ UNWIND_HINT_FUNC
163 leaq \func(%rip), %rax
164 jmp stub_ptregs_64
165 END(ptregs_\func)
166@@ -367,6 +375,7 @@ END(ptregs_\func)
167 * %rsi: next task
168 */
169 ENTRY(__switch_to_asm)
170+ UNWIND_HINT_FUNC
171 /*
172 * Save callee-saved registers
173 * This must match the order in inactive_task_frame
174@@ -406,6 +415,7 @@ END(__switch_to_asm)
175 * r12: kernel thread arg
176 */
177 ENTRY(ret_from_fork)
178+ UNWIND_HINT_EMPTY
179 movq %rax, %rdi
180 call schedule_tail /* rdi: 'prev' task parameter */
181
182@@ -413,6 +423,7 @@ ENTRY(ret_from_fork)
183 jnz 1f /* kernel threads are uncommon */
184
185 2:
186+ UNWIND_HINT_REGS
187 movq %rsp, %rdi
188 call syscall_return_slowpath /* returns with IRQs disabled */
189 TRACE_IRQS_ON /* user mode is traced as IRQS on */
190@@ -440,10 +451,11 @@ END(ret_from_fork)
191 ENTRY(irq_entries_start)
192 vector=FIRST_EXTERNAL_VECTOR
193 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
194+ UNWIND_HINT_IRET_REGS
195 pushq $(~vector+0x80) /* Note: always in signed byte range */
196- vector=vector+1
197 jmp common_interrupt
198 .align 8
199+ vector=vector+1
200 .endr
201 END(irq_entries_start)
202
203@@ -465,9 +477,14 @@ END(irq_entries_start)
204 *
205 * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
206 */
207-.macro ENTER_IRQ_STACK old_rsp
208+.macro ENTER_IRQ_STACK regs=1 old_rsp
209 DEBUG_ENTRY_ASSERT_IRQS_OFF
210 movq %rsp, \old_rsp
211+
212+ .if \regs
213+ UNWIND_HINT_REGS base=\old_rsp
214+ .endif
215+
216 incl PER_CPU_VAR(irq_count)
217 jnz .Lirq_stack_push_old_rsp_\@
218
219@@ -504,16 +521,24 @@ END(irq_entries_start)
220
221 .Lirq_stack_push_old_rsp_\@:
222 pushq \old_rsp
223+
224+ .if \regs
225+ UNWIND_HINT_REGS indirect=1
226+ .endif
227 .endm
228
229 /*
230 * Undoes ENTER_IRQ_STACK.
231 */
232-.macro LEAVE_IRQ_STACK
233+.macro LEAVE_IRQ_STACK regs=1
234 DEBUG_ENTRY_ASSERT_IRQS_OFF
235 /* We need to be off the IRQ stack before decrementing irq_count. */
236 popq %rsp
237
238+ .if \regs
239+ UNWIND_HINT_REGS
240+ .endif
241+
242 /*
243 * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
244 * the irq stack but we're not on it.
245@@ -624,6 +649,7 @@ restore_c_regs_and_iret:
246 INTERRUPT_RETURN
247
248 ENTRY(native_iret)
249+ UNWIND_HINT_IRET_REGS
250 /*
251 * Are we returning to a stack segment from the LDT? Note: in
252 * 64-bit mode SS:RSP on the exception stack is always valid.
253@@ -696,6 +722,7 @@ native_irq_return_ldt:
254 orq PER_CPU_VAR(espfix_stack), %rax
255 SWAPGS
256 movq %rax, %rsp
257+ UNWIND_HINT_IRET_REGS offset=8
258
259 /*
260 * At this point, we cannot write to the stack any more, but we can
261@@ -717,6 +744,7 @@ END(common_interrupt)
262 */
263 .macro apicinterrupt3 num sym do_sym
264 ENTRY(\sym)
265+ UNWIND_HINT_IRET_REGS
266 ASM_CLAC
267 pushq $~(\num)
268 .Lcommon_\sym:
269@@ -803,6 +831,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
270
271 .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
272 ENTRY(\sym)
273+ UNWIND_HINT_IRET_REGS offset=8
274+
275 /* Sanity check */
276 .if \shift_ist != -1 && \paranoid == 0
277 .error "using shift_ist requires paranoid=1"
278@@ -826,6 +856,7 @@ ENTRY(\sym)
279 .else
280 call error_entry
281 .endif
282+ UNWIND_HINT_REGS
283 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
284
285 .if \paranoid
286@@ -923,6 +954,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
287 * edi: new selector
288 */
289 ENTRY(native_load_gs_index)
290+ FRAME_BEGIN
291 pushfq
292 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
293 SWAPGS
294@@ -931,8 +963,9 @@ ENTRY(native_load_gs_index)
295 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
296 SWAPGS
297 popfq
298+ FRAME_END
299 ret
300-END(native_load_gs_index)
301+ENDPROC(native_load_gs_index)
302 EXPORT_SYMBOL(native_load_gs_index)
303
304 _ASM_EXTABLE(.Lgs_change, bad_gs)
305@@ -955,12 +988,12 @@ bad_gs:
306 ENTRY(do_softirq_own_stack)
307 pushq %rbp
308 mov %rsp, %rbp
309- ENTER_IRQ_STACK old_rsp=%r11
310+ ENTER_IRQ_STACK regs=0 old_rsp=%r11
311 call __do_softirq
312- LEAVE_IRQ_STACK
313+ LEAVE_IRQ_STACK regs=0
314 leaveq
315 ret
316-END(do_softirq_own_stack)
317+ENDPROC(do_softirq_own_stack)
318
319 #ifdef CONFIG_XEN
320 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
321@@ -984,7 +1017,9 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
322 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
323 * see the correct pointer to the pt_regs
324 */
325+ UNWIND_HINT_FUNC
326 movq %rdi, %rsp /* we don't return, adjust the stack frame */
327+ UNWIND_HINT_REGS
328
329 ENTER_IRQ_STACK old_rsp=%r10
330 call xen_evtchn_do_upcall
331@@ -1010,6 +1045,7 @@ END(xen_do_hypervisor_callback)
332 * with its current contents: any discrepancy means we in category 1.
333 */
334 ENTRY(xen_failsafe_callback)
335+ UNWIND_HINT_EMPTY
336 movl %ds, %ecx
337 cmpw %cx, 0x10(%rsp)
338 jne 1f
339@@ -1029,11 +1065,13 @@ ENTRY(xen_failsafe_callback)
340 pushq $0 /* RIP */
341 pushq %r11
342 pushq %rcx
343+ UNWIND_HINT_IRET_REGS offset=8
344 jmp general_protection
345 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
346 movq (%rsp), %rcx
347 movq 8(%rsp), %r11
348 addq $0x30, %rsp
349+ UNWIND_HINT_IRET_REGS
350 pushq $-1 /* orig_ax = -1 => not a system call */
351 ALLOC_PT_GPREGS_ON_STACK
352 SAVE_C_REGS
353@@ -1079,6 +1117,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec
354 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
355 */
356 ENTRY(paranoid_entry)
357+ UNWIND_HINT_FUNC
358 cld
359 SAVE_C_REGS 8
360 SAVE_EXTRA_REGS 8
361@@ -1106,6 +1145,7 @@ END(paranoid_entry)
362 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
363 */
364 ENTRY(paranoid_exit)
365+ UNWIND_HINT_REGS
366 DISABLE_INTERRUPTS(CLBR_ANY)
367 TRACE_IRQS_OFF_DEBUG
368 testl %ebx, %ebx /* swapgs needed? */
369@@ -1127,6 +1167,7 @@ END(paranoid_exit)
370 * Return: EBX=0: came from user mode; EBX=1: otherwise
371 */
372 ENTRY(error_entry)
373+ UNWIND_HINT_FUNC
374 cld
375 SAVE_C_REGS 8
376 SAVE_EXTRA_REGS 8
377@@ -1211,6 +1252,7 @@ END(error_entry)
378 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
379 */
380 ENTRY(error_exit)
381+ UNWIND_HINT_REGS
382 DISABLE_INTERRUPTS(CLBR_ANY)
383 TRACE_IRQS_OFF
384 testl %ebx, %ebx
385@@ -1220,6 +1262,7 @@ END(error_exit)
386
387 /* Runs on exception stack */
388 ENTRY(nmi)
389+ UNWIND_HINT_IRET_REGS
390 /*
391 * Fix up the exception frame if we're on Xen.
392 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
393@@ -1293,11 +1336,13 @@ ENTRY(nmi)
394 cld
395 movq %rsp, %rdx
396 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
397+ UNWIND_HINT_IRET_REGS base=%rdx offset=8
398 pushq 5*8(%rdx) /* pt_regs->ss */
399 pushq 4*8(%rdx) /* pt_regs->rsp */
400 pushq 3*8(%rdx) /* pt_regs->flags */
401 pushq 2*8(%rdx) /* pt_regs->cs */
402 pushq 1*8(%rdx) /* pt_regs->rip */
403+ UNWIND_HINT_IRET_REGS
404 pushq $-1 /* pt_regs->orig_ax */
405 pushq %rdi /* pt_regs->di */
406 pushq %rsi /* pt_regs->si */
407@@ -1314,6 +1359,7 @@ ENTRY(nmi)
408 pushq %r13 /* pt_regs->r13 */
409 pushq %r14 /* pt_regs->r14 */
410 pushq %r15 /* pt_regs->r15 */
411+ UNWIND_HINT_REGS
412 ENCODE_FRAME_POINTER
413
414 /*
415@@ -1468,6 +1514,7 @@ first_nmi:
416 .rept 5
417 pushq 11*8(%rsp)
418 .endr
419+ UNWIND_HINT_IRET_REGS
420
421 /* Everything up to here is safe from nested NMIs */
422
423@@ -1483,6 +1530,7 @@ first_nmi:
424 pushq $__KERNEL_CS /* CS */
425 pushq $1f /* RIP */
426 INTERRUPT_RETURN /* continues at repeat_nmi below */
427+ UNWIND_HINT_IRET_REGS
428 1:
429 #endif
430
431@@ -1532,6 +1580,7 @@ end_repeat_nmi:
432 * exceptions might do.
433 */
434 call paranoid_entry
435+ UNWIND_HINT_REGS
436
437 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
438 movq %rsp, %rdi
439@@ -1569,17 +1618,19 @@ nmi_restore:
440 END(nmi)
441
442 ENTRY(ignore_sysret)
443+ UNWIND_HINT_EMPTY
444 mov $-ENOSYS, %eax
445 sysret
446 END(ignore_sysret)
447
448 ENTRY(rewind_stack_do_exit)
449+ UNWIND_HINT_FUNC
450 /* Prevent any naive code from trying to unwind to our caller. */
451 xorl %ebp, %ebp
452
453 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
454- leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
455+ leaq -PTREGS_SIZE(%rax), %rsp
456+ UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
457
458 call do_exit
459-1: jmp 1b
460 END(rewind_stack_do_exit)
461--
4622.14.2
463